Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- valley/lib/python3.10/site-packages/transformers/benchmark/__init__.py +0 -0
- valley/lib/python3.10/site-packages/transformers/benchmark/__pycache__/__init__.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/transformers/benchmark/__pycache__/benchmark.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/transformers/benchmark/__pycache__/benchmark_args.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/transformers/benchmark/__pycache__/benchmark_args_tf.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/transformers/benchmark/__pycache__/benchmark_args_utils.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/transformers/benchmark/__pycache__/benchmark_tf.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/transformers/benchmark/__pycache__/benchmark_utils.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/transformers/benchmark/benchmark.py +271 -0
- valley/lib/python3.10/site-packages/transformers/benchmark/benchmark_args.py +114 -0
- valley/lib/python3.10/site-packages/transformers/benchmark/benchmark_args_tf.py +136 -0
- valley/lib/python3.10/site-packages/transformers/benchmark/benchmark_args_utils.py +165 -0
- valley/lib/python3.10/site-packages/transformers/benchmark/benchmark_tf.py +298 -0
- valley/lib/python3.10/site-packages/transformers/benchmark/benchmark_utils.py +913 -0
- valley/lib/python3.10/site-packages/transformers/generation/flax_utils.py +1004 -0
- valley/lib/python3.10/site-packages/transformers/generation/logits_process.py +982 -0
- valley/lib/python3.10/site-packages/transformers/generation/stopping_criteria.py +132 -0
- valley/lib/python3.10/site-packages/transformers/onnx/__main__.py +240 -0
- valley/lib/python3.10/site-packages/transformers/onnx/convert.py +494 -0
- valley/lib/python3.10/site-packages/transformers/pipelines/__init__.py +976 -0
- valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/__init__.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/audio_classification.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/audio_utils.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/automatic_speech_recognition.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/base.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/conversational.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/document_question_answering.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/feature_extraction.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/fill_mask.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/image_classification.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/image_segmentation.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/image_to_text.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/object_detection.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/pt_utils.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/question_answering.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/table_question_answering.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/text2text_generation.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/text_classification.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/text_generation.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/token_classification.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/video_classification.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/zero_shot_classification.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/zero_shot_image_classification.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/zero_shot_object_detection.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/transformers/pipelines/fill_mask.py +242 -0
- valley/lib/python3.10/site-packages/transformers/pipelines/object_detection.py +178 -0
- valley/lib/python3.10/site-packages/transformers/pipelines/question_answering.py +664 -0
- valley/lib/python3.10/site-packages/transformers/pipelines/table_question_answering.py +436 -0
- valley/lib/python3.10/site-packages/transformers/pipelines/text2text_generation.py +366 -0
- valley/lib/python3.10/site-packages/transformers/pipelines/text_generation.py +296 -0
valley/lib/python3.10/site-packages/transformers/benchmark/__init__.py
ADDED
|
File without changes
|
valley/lib/python3.10/site-packages/transformers/benchmark/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (174 Bytes). View file
|
|
|
valley/lib/python3.10/site-packages/transformers/benchmark/__pycache__/benchmark.cpython-310.pyc
ADDED
|
Binary file (7.42 kB). View file
|
|
|
valley/lib/python3.10/site-packages/transformers/benchmark/__pycache__/benchmark_args.cpython-310.pyc
ADDED
|
Binary file (3.36 kB). View file
|
|
|
valley/lib/python3.10/site-packages/transformers/benchmark/__pycache__/benchmark_args_tf.cpython-310.pyc
ADDED
|
Binary file (3.93 kB). View file
|
|
|
valley/lib/python3.10/site-packages/transformers/benchmark/__pycache__/benchmark_args_utils.cpython-310.pyc
ADDED
|
Binary file (5.54 kB). View file
|
|
|
valley/lib/python3.10/site-packages/transformers/benchmark/__pycache__/benchmark_tf.cpython-310.pyc
ADDED
|
Binary file (9.45 kB). View file
|
|
|
valley/lib/python3.10/site-packages/transformers/benchmark/__pycache__/benchmark_utils.cpython-310.pyc
ADDED
|
Binary file (30 kB). View file
|
|
|
valley/lib/python3.10/site-packages/transformers/benchmark/benchmark.py
ADDED
|
@@ -0,0 +1,271 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2018 The HuggingFace Inc. team.
|
| 3 |
+
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
"""
|
| 17 |
+
Benchmarking the library on inference and training in PyTorch.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
import timeit
|
| 22 |
+
from typing import Callable, Optional
|
| 23 |
+
|
| 24 |
+
from ..configuration_utils import PretrainedConfig
|
| 25 |
+
from ..models.auto.modeling_auto import MODEL_MAPPING, MODEL_WITH_LM_HEAD_MAPPING
|
| 26 |
+
from ..utils import is_py3nvml_available, is_torch_available, logging
|
| 27 |
+
from .benchmark_utils import (
|
| 28 |
+
Benchmark,
|
| 29 |
+
Memory,
|
| 30 |
+
MemorySummary,
|
| 31 |
+
measure_peak_memory_cpu,
|
| 32 |
+
start_memory_tracing,
|
| 33 |
+
stop_memory_tracing,
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
if is_torch_available():
|
| 38 |
+
import torch
|
| 39 |
+
|
| 40 |
+
from .benchmark_args import PyTorchBenchmarkArguments
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
if is_py3nvml_available():
|
| 44 |
+
import py3nvml.py3nvml as nvml
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
logger = logging.get_logger(__name__)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
class PyTorchBenchmark(Benchmark):
|
| 51 |
+
args: PyTorchBenchmarkArguments
|
| 52 |
+
configs: PretrainedConfig
|
| 53 |
+
framework: str = "PyTorch"
|
| 54 |
+
|
| 55 |
+
@property
|
| 56 |
+
def framework_version(self):
|
| 57 |
+
return torch.__version__
|
| 58 |
+
|
| 59 |
+
def _inference_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:
|
| 60 |
+
_inference = self._prepare_inference_func(model_name, batch_size, sequence_length)
|
| 61 |
+
return self._measure_speed(_inference)
|
| 62 |
+
|
| 63 |
+
def _inference_memory(
|
| 64 |
+
self, model_name: str, batch_size: int, sequence_length: int
|
| 65 |
+
) -> [Memory, Optional[MemorySummary]]:
|
| 66 |
+
_inference = self._prepare_inference_func(model_name, batch_size, sequence_length)
|
| 67 |
+
return self._measure_memory(_inference)
|
| 68 |
+
|
| 69 |
+
def _train_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:
|
| 70 |
+
_train = self._prepare_train_func(model_name, batch_size, sequence_length)
|
| 71 |
+
return self._measure_speed(_train)
|
| 72 |
+
|
| 73 |
+
def _train_memory(
|
| 74 |
+
self, model_name: str, batch_size: int, sequence_length: int
|
| 75 |
+
) -> [Memory, Optional[MemorySummary]]:
|
| 76 |
+
_train = self._prepare_train_func(model_name, batch_size, sequence_length)
|
| 77 |
+
return self._measure_memory(_train)
|
| 78 |
+
|
| 79 |
+
def _prepare_inference_func(self, model_name: str, batch_size: int, sequence_length: int) -> Callable[[], None]:
|
| 80 |
+
config = self.config_dict[model_name]
|
| 81 |
+
|
| 82 |
+
if self.args.torchscript:
|
| 83 |
+
config.torchscript = True
|
| 84 |
+
|
| 85 |
+
has_model_class_in_config = (
|
| 86 |
+
hasattr(config, "architectures")
|
| 87 |
+
and isinstance(config.architectures, list)
|
| 88 |
+
and len(config.architectures) > 0
|
| 89 |
+
)
|
| 90 |
+
if not self.args.only_pretrain_model and has_model_class_in_config:
|
| 91 |
+
try:
|
| 92 |
+
model_class = config.architectures[0]
|
| 93 |
+
transformers_module = __import__("transformers", fromlist=[model_class])
|
| 94 |
+
model_cls = getattr(transformers_module, model_class)
|
| 95 |
+
model = model_cls(config)
|
| 96 |
+
except ImportError:
|
| 97 |
+
raise ImportError(
|
| 98 |
+
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
|
| 99 |
+
" set `--only_pretrain_model` or `args.only_pretrain_model=True`."
|
| 100 |
+
)
|
| 101 |
+
else:
|
| 102 |
+
model = MODEL_MAPPING[config.__class__](config)
|
| 103 |
+
|
| 104 |
+
model.eval()
|
| 105 |
+
model.to(self.args.device)
|
| 106 |
+
|
| 107 |
+
# encoder-decoder has vocab size saved differently
|
| 108 |
+
vocab_size = config.vocab_size if hasattr(config, "vocab_size") else config.encoder.vocab_size
|
| 109 |
+
input_ids = torch.randint(vocab_size, (batch_size, sequence_length), dtype=torch.long, device=self.args.device)
|
| 110 |
+
|
| 111 |
+
if self.args.fp16:
|
| 112 |
+
logger.info("Running training in Mixed Precision...")
|
| 113 |
+
if not self.args.is_gpu:
|
| 114 |
+
raise ValueError("Mixed precision is possible only for GPU.")
|
| 115 |
+
# amp seems to have memory leaks so that memory usage
|
| 116 |
+
# is measured using .half() for now https://github.com/NVIDIA/apex/issues/439
|
| 117 |
+
model.half()
|
| 118 |
+
|
| 119 |
+
if self.args.torchscript:
|
| 120 |
+
with torch.no_grad():
|
| 121 |
+
inference_model = torch.jit.trace(model, input_ids)
|
| 122 |
+
else:
|
| 123 |
+
inference_model = model
|
| 124 |
+
|
| 125 |
+
def encoder_decoder_forward():
|
| 126 |
+
with torch.no_grad():
|
| 127 |
+
outputs = inference_model(input_ids, decoder_input_ids=input_ids)
|
| 128 |
+
return outputs
|
| 129 |
+
|
| 130 |
+
def encoder_forward():
|
| 131 |
+
with torch.no_grad():
|
| 132 |
+
outputs = inference_model(input_ids)
|
| 133 |
+
return outputs
|
| 134 |
+
|
| 135 |
+
_forward = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
|
| 136 |
+
return _forward
|
| 137 |
+
|
| 138 |
+
def _prepare_train_func(self, model_name: str, batch_size: int, sequence_length: int) -> Callable[[], None]:
|
| 139 |
+
config = self.config_dict[model_name]
|
| 140 |
+
|
| 141 |
+
has_model_class_in_config = (
|
| 142 |
+
hasattr(config, "architectures")
|
| 143 |
+
and isinstance(config.architectures, list)
|
| 144 |
+
and len(config.architectures) > 0
|
| 145 |
+
)
|
| 146 |
+
if not self.args.only_pretrain_model and has_model_class_in_config:
|
| 147 |
+
try:
|
| 148 |
+
model_class = config.architectures[0]
|
| 149 |
+
transformers_module = __import__("transformers", fromlist=[model_class])
|
| 150 |
+
model_cls = getattr(transformers_module, model_class)
|
| 151 |
+
model = model_cls(config)
|
| 152 |
+
except ImportError:
|
| 153 |
+
raise ImportError(
|
| 154 |
+
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
|
| 155 |
+
" set `--only_pretrain_model` or `args.only_pretrain_model=True`."
|
| 156 |
+
)
|
| 157 |
+
else:
|
| 158 |
+
model = MODEL_WITH_LM_HEAD_MAPPING[config.__class__](config)
|
| 159 |
+
|
| 160 |
+
if self.args.torchscript:
|
| 161 |
+
raise NotImplementedError("Training for torchscript is currently not implemented")
|
| 162 |
+
else:
|
| 163 |
+
train_model = model
|
| 164 |
+
|
| 165 |
+
model.train()
|
| 166 |
+
model.to(self.args.device)
|
| 167 |
+
|
| 168 |
+
# encoder-decoder has vocab size saved differently
|
| 169 |
+
vocab_size = config.vocab_size if hasattr(config, "vocab_size") else config.encoder.vocab_size
|
| 170 |
+
input_ids = torch.randint(vocab_size, (batch_size, sequence_length), dtype=torch.long, device=self.args.device)
|
| 171 |
+
|
| 172 |
+
if self.args.fp16:
|
| 173 |
+
logger.info("Running training in Mixed Precision...")
|
| 174 |
+
if not self.args.is_gpu:
|
| 175 |
+
raise ValueError("Mixed precision is possible only for GPU.")
|
| 176 |
+
|
| 177 |
+
# amp seems to have memory leaks so that memory usage
|
| 178 |
+
# is measured using .half() for now https://github.com/NVIDIA/apex/issues/439
|
| 179 |
+
model.half()
|
| 180 |
+
|
| 181 |
+
def compute_loss_and_backprob_encoder():
|
| 182 |
+
loss = train_model(input_ids, labels=input_ids)[0]
|
| 183 |
+
loss.backward()
|
| 184 |
+
return loss
|
| 185 |
+
|
| 186 |
+
def compute_loss_and_backprob_encoder_decoder():
|
| 187 |
+
loss = train_model(input_ids, decoder_input_ids=input_ids, labels=input_ids)[0]
|
| 188 |
+
loss.backward()
|
| 189 |
+
return loss
|
| 190 |
+
|
| 191 |
+
_train = (
|
| 192 |
+
compute_loss_and_backprob_encoder_decoder
|
| 193 |
+
if config.is_encoder_decoder
|
| 194 |
+
else compute_loss_and_backprob_encoder
|
| 195 |
+
)
|
| 196 |
+
return _train
|
| 197 |
+
|
| 198 |
+
def _measure_speed(self, func) -> float:
|
| 199 |
+
try:
|
| 200 |
+
if self.args.is_tpu or self.args.torchscript:
|
| 201 |
+
# run additional 10 times to stabilize compilation for tpu and torchscript
|
| 202 |
+
logger.info("Do inference on TPU or torchscript. Running model 5 times to stabilize compilation")
|
| 203 |
+
timeit.repeat(
|
| 204 |
+
func,
|
| 205 |
+
repeat=1,
|
| 206 |
+
number=5,
|
| 207 |
+
)
|
| 208 |
+
|
| 209 |
+
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
|
| 210 |
+
runtimes = timeit.repeat(
|
| 211 |
+
func,
|
| 212 |
+
repeat=self.args.repeat,
|
| 213 |
+
number=10,
|
| 214 |
+
)
|
| 215 |
+
|
| 216 |
+
if self.args.is_tpu and self.args.torch_xla_tpu_print_metrics:
|
| 217 |
+
import torch_xla.debug.metrics as met
|
| 218 |
+
|
| 219 |
+
self.print_fn(met.metrics_report())
|
| 220 |
+
|
| 221 |
+
return min(runtimes) / 10.0
|
| 222 |
+
except RuntimeError as e:
|
| 223 |
+
self.print_fn(f"Doesn't fit on GPU. {e}")
|
| 224 |
+
return "N/A"
|
| 225 |
+
|
| 226 |
+
def _measure_memory(self, func: Callable[[], None]) -> [Memory, MemorySummary]:
|
| 227 |
+
try:
|
| 228 |
+
if self.args.trace_memory_line_by_line:
|
| 229 |
+
trace = start_memory_tracing("transformers")
|
| 230 |
+
|
| 231 |
+
if self.args.is_tpu:
|
| 232 |
+
# tpu
|
| 233 |
+
raise NotImplementedError(
|
| 234 |
+
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking with"
|
| 235 |
+
" `--no-memory` or `args.memory=False`"
|
| 236 |
+
)
|
| 237 |
+
elif self.args.is_gpu:
|
| 238 |
+
if not is_py3nvml_available():
|
| 239 |
+
logger.warning(
|
| 240 |
+
"py3nvml not installed, we won't log GPU memory usage. "
|
| 241 |
+
"Install py3nvml (pip install py3nvml) to log information about GPU."
|
| 242 |
+
)
|
| 243 |
+
memory = "N/A"
|
| 244 |
+
else:
|
| 245 |
+
logger.info(
|
| 246 |
+
"Measuring total GPU usage on GPU device. Make sure to not have additional processes running"
|
| 247 |
+
" on the same GPU."
|
| 248 |
+
)
|
| 249 |
+
# init nvml
|
| 250 |
+
nvml.nvmlInit()
|
| 251 |
+
func()
|
| 252 |
+
handle = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx)
|
| 253 |
+
meminfo = nvml.nvmlDeviceGetMemoryInfo(handle)
|
| 254 |
+
max_bytes_in_use = meminfo.used
|
| 255 |
+
memory = Memory(max_bytes_in_use)
|
| 256 |
+
# shutdown nvml
|
| 257 |
+
nvml.nvmlShutdown()
|
| 258 |
+
else:
|
| 259 |
+
# cpu
|
| 260 |
+
memory_bytes = measure_peak_memory_cpu(func)
|
| 261 |
+
memory = Memory(memory_bytes) if isinstance(memory_bytes, int) else memory_bytes
|
| 262 |
+
|
| 263 |
+
if self.args.trace_memory_line_by_line:
|
| 264 |
+
summary = stop_memory_tracing(trace)
|
| 265 |
+
else:
|
| 266 |
+
summary = None
|
| 267 |
+
|
| 268 |
+
return memory, summary
|
| 269 |
+
except RuntimeError as e:
|
| 270 |
+
self.print_fn(f"Doesn't fit on GPU. {e}")
|
| 271 |
+
return "N/A", None
|
valley/lib/python3.10/site-packages/transformers/benchmark/benchmark_args.py
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2018 The HuggingFace Inc. team.
|
| 3 |
+
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
|
| 17 |
+
from dataclasses import dataclass, field
|
| 18 |
+
from typing import Tuple
|
| 19 |
+
|
| 20 |
+
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
|
| 21 |
+
from .benchmark_args_utils import BenchmarkArguments
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
if is_torch_available():
|
| 25 |
+
import torch
|
| 26 |
+
|
| 27 |
+
if is_torch_tpu_available(check_device=False):
|
| 28 |
+
import torch_xla.core.xla_model as xm
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
logger = logging.get_logger(__name__)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
@dataclass
|
| 35 |
+
class PyTorchBenchmarkArguments(BenchmarkArguments):
|
| 36 |
+
deprecated_args = [
|
| 37 |
+
"no_inference",
|
| 38 |
+
"no_cuda",
|
| 39 |
+
"no_tpu",
|
| 40 |
+
"no_speed",
|
| 41 |
+
"no_memory",
|
| 42 |
+
"no_env_print",
|
| 43 |
+
"no_multi_process",
|
| 44 |
+
]
|
| 45 |
+
|
| 46 |
+
def __init__(self, **kwargs):
|
| 47 |
+
"""
|
| 48 |
+
This __init__ is there for legacy code. When removing deprecated args completely, the class can simply be
|
| 49 |
+
deleted
|
| 50 |
+
"""
|
| 51 |
+
for deprecated_arg in self.deprecated_args:
|
| 52 |
+
if deprecated_arg in kwargs:
|
| 53 |
+
positive_arg = deprecated_arg[3:]
|
| 54 |
+
setattr(self, positive_arg, not kwargs.pop(deprecated_arg))
|
| 55 |
+
logger.warning(
|
| 56 |
+
f"{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"
|
| 57 |
+
f" {positive_arg}={kwargs[positive_arg]}"
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
self.torchscript = kwargs.pop("torchscript", self.torchscript)
|
| 61 |
+
self.torch_xla_tpu_print_metrics = kwargs.pop("torch_xla_tpu_print_metrics", self.torch_xla_tpu_print_metrics)
|
| 62 |
+
self.fp16_opt_level = kwargs.pop("fp16_opt_level", self.fp16_opt_level)
|
| 63 |
+
super().__init__(**kwargs)
|
| 64 |
+
|
| 65 |
+
torchscript: bool = field(default=False, metadata={"help": "Trace the models using torchscript"})
|
| 66 |
+
torch_xla_tpu_print_metrics: bool = field(default=False, metadata={"help": "Print Xla/PyTorch tpu metrics"})
|
| 67 |
+
fp16_opt_level: str = field(
|
| 68 |
+
default="O1",
|
| 69 |
+
metadata={
|
| 70 |
+
"help": (
|
| 71 |
+
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
|
| 72 |
+
"See details at https://nvidia.github.io/apex/amp.html"
|
| 73 |
+
)
|
| 74 |
+
},
|
| 75 |
+
)
|
| 76 |
+
|
| 77 |
+
@cached_property
|
| 78 |
+
def _setup_devices(self) -> Tuple["torch.device", int]:
|
| 79 |
+
requires_backends(self, ["torch"])
|
| 80 |
+
logger.info("PyTorch: setting up devices")
|
| 81 |
+
if not self.cuda:
|
| 82 |
+
device = torch.device("cpu")
|
| 83 |
+
n_gpu = 0
|
| 84 |
+
elif is_torch_tpu_available():
|
| 85 |
+
device = xm.xla_device()
|
| 86 |
+
n_gpu = 0
|
| 87 |
+
else:
|
| 88 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 89 |
+
n_gpu = torch.cuda.device_count()
|
| 90 |
+
return device, n_gpu
|
| 91 |
+
|
| 92 |
+
@property
|
| 93 |
+
def is_tpu(self):
|
| 94 |
+
return is_torch_tpu_available() and self.tpu
|
| 95 |
+
|
| 96 |
+
@property
|
| 97 |
+
def device_idx(self) -> int:
|
| 98 |
+
requires_backends(self, ["torch"])
|
| 99 |
+
# TODO(PVP): currently only single GPU is supported
|
| 100 |
+
return torch.cuda.current_device()
|
| 101 |
+
|
| 102 |
+
@property
|
| 103 |
+
def device(self) -> "torch.device":
|
| 104 |
+
requires_backends(self, ["torch"])
|
| 105 |
+
return self._setup_devices[0]
|
| 106 |
+
|
| 107 |
+
@property
|
| 108 |
+
def n_gpu(self):
|
| 109 |
+
requires_backends(self, ["torch"])
|
| 110 |
+
return self._setup_devices[1]
|
| 111 |
+
|
| 112 |
+
@property
|
| 113 |
+
def is_gpu(self):
|
| 114 |
+
return self.n_gpu > 0
|
valley/lib/python3.10/site-packages/transformers/benchmark/benchmark_args_tf.py
ADDED
|
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2018 The HuggingFace Inc. team.
|
| 3 |
+
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
|
| 17 |
+
from dataclasses import dataclass, field
|
| 18 |
+
from typing import Tuple
|
| 19 |
+
|
| 20 |
+
from ..utils import cached_property, is_tf_available, logging, requires_backends
|
| 21 |
+
from .benchmark_args_utils import BenchmarkArguments
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
if is_tf_available():
|
| 25 |
+
import tensorflow as tf
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
logger = logging.get_logger(__name__)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
@dataclass
|
| 32 |
+
class TensorFlowBenchmarkArguments(BenchmarkArguments):
|
| 33 |
+
deprecated_args = [
|
| 34 |
+
"no_inference",
|
| 35 |
+
"no_cuda",
|
| 36 |
+
"no_tpu",
|
| 37 |
+
"no_speed",
|
| 38 |
+
"no_memory",
|
| 39 |
+
"no_env_print",
|
| 40 |
+
"no_multi_process",
|
| 41 |
+
]
|
| 42 |
+
|
| 43 |
+
def __init__(self, **kwargs):
|
| 44 |
+
"""
|
| 45 |
+
This __init__ is there for legacy code. When removing deprecated args completely, the class can simply be
|
| 46 |
+
deleted
|
| 47 |
+
"""
|
| 48 |
+
for deprecated_arg in self.deprecated_args:
|
| 49 |
+
if deprecated_arg in kwargs:
|
| 50 |
+
positive_arg = deprecated_arg[3:]
|
| 51 |
+
kwargs[positive_arg] = not kwargs.pop(deprecated_arg)
|
| 52 |
+
logger.warning(
|
| 53 |
+
f"{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"
|
| 54 |
+
f" {positive_arg}={kwargs[positive_arg]}"
|
| 55 |
+
)
|
| 56 |
+
self.tpu_name = kwargs.pop("tpu_name", self.tpu_name)
|
| 57 |
+
self.device_idx = kwargs.pop("device_idx", self.device_idx)
|
| 58 |
+
self.eager_mode = kwargs.pop("eager_mode", self.eager_mode)
|
| 59 |
+
self.use_xla = kwargs.pop("use_xla", self.use_xla)
|
| 60 |
+
super().__init__(**kwargs)
|
| 61 |
+
|
| 62 |
+
tpu_name: str = field(
|
| 63 |
+
default=None,
|
| 64 |
+
metadata={"help": "Name of TPU"},
|
| 65 |
+
)
|
| 66 |
+
device_idx: int = field(
|
| 67 |
+
default=0,
|
| 68 |
+
metadata={"help": "CPU / GPU device index. Defaults to 0."},
|
| 69 |
+
)
|
| 70 |
+
eager_mode: bool = field(default=False, metadata={"help": "Benchmark models in eager model."})
|
| 71 |
+
use_xla: bool = field(
|
| 72 |
+
default=False,
|
| 73 |
+
metadata={
|
| 74 |
+
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
|
| 75 |
+
},
|
| 76 |
+
)
|
| 77 |
+
|
| 78 |
+
@cached_property
|
| 79 |
+
def _setup_tpu(self) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
|
| 80 |
+
requires_backends(self, ["tf"])
|
| 81 |
+
tpu = None
|
| 82 |
+
if self.tpu:
|
| 83 |
+
try:
|
| 84 |
+
if self.tpu_name:
|
| 85 |
+
tpu = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name)
|
| 86 |
+
else:
|
| 87 |
+
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
|
| 88 |
+
except ValueError:
|
| 89 |
+
tpu = None
|
| 90 |
+
return tpu
|
| 91 |
+
|
| 92 |
+
@cached_property
|
| 93 |
+
def _setup_strategy(self) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
|
| 94 |
+
requires_backends(self, ["tf"])
|
| 95 |
+
if self.is_tpu:
|
| 96 |
+
tf.config.experimental_connect_to_cluster(self._setup_tpu)
|
| 97 |
+
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu)
|
| 98 |
+
|
| 99 |
+
strategy = tf.distribute.TPUStrategy(self._setup_tpu)
|
| 100 |
+
else:
|
| 101 |
+
# currently no multi gpu is allowed
|
| 102 |
+
if self.is_gpu:
|
| 103 |
+
# TODO: Currently only single GPU is supported
|
| 104 |
+
tf.config.set_visible_devices(self.gpu_list[self.device_idx], "GPU")
|
| 105 |
+
strategy = tf.distribute.OneDeviceStrategy(device=f"/gpu:{self.device_idx}")
|
| 106 |
+
else:
|
| 107 |
+
tf.config.set_visible_devices([], "GPU") # disable GPU
|
| 108 |
+
strategy = tf.distribute.OneDeviceStrategy(device=f"/cpu:{self.device_idx}")
|
| 109 |
+
|
| 110 |
+
return strategy
|
| 111 |
+
|
| 112 |
+
@property
|
| 113 |
+
def is_tpu(self) -> bool:
|
| 114 |
+
requires_backends(self, ["tf"])
|
| 115 |
+
return self._setup_tpu is not None
|
| 116 |
+
|
| 117 |
+
@property
|
| 118 |
+
def strategy(self) -> "tf.distribute.Strategy":
|
| 119 |
+
requires_backends(self, ["tf"])
|
| 120 |
+
return self._setup_strategy
|
| 121 |
+
|
| 122 |
+
@property
|
| 123 |
+
def gpu_list(self):
|
| 124 |
+
requires_backends(self, ["tf"])
|
| 125 |
+
return tf.config.list_physical_devices("GPU")
|
| 126 |
+
|
| 127 |
+
@property
|
| 128 |
+
def n_gpu(self) -> int:
|
| 129 |
+
requires_backends(self, ["tf"])
|
| 130 |
+
if self.cuda:
|
| 131 |
+
return len(self.gpu_list)
|
| 132 |
+
return 0
|
| 133 |
+
|
| 134 |
+
@property
|
| 135 |
+
def is_gpu(self) -> bool:
|
| 136 |
+
return self.n_gpu > 0
|
valley/lib/python3.10/site-packages/transformers/benchmark/benchmark_args_utils.py
ADDED
|
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2018 The HuggingFace Inc. team.
|
| 3 |
+
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
|
| 17 |
+
import dataclasses
|
| 18 |
+
import json
|
| 19 |
+
import warnings
|
| 20 |
+
from dataclasses import dataclass, field
|
| 21 |
+
from time import time
|
| 22 |
+
from typing import List
|
| 23 |
+
|
| 24 |
+
from ..utils import logging
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
logger = logging.get_logger(__name__)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def list_field(default=None, metadata=None):
|
| 31 |
+
return field(default_factory=lambda: default, metadata=metadata)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
@dataclass
|
| 35 |
+
class BenchmarkArguments:
|
| 36 |
+
"""
|
| 37 |
+
BenchMarkArguments are arguments we use in our benchmark scripts **which relate to the training loop itself**.
|
| 38 |
+
|
| 39 |
+
Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command
|
| 40 |
+
line.
|
| 41 |
+
"""
|
| 42 |
+
|
| 43 |
+
models: List[str] = list_field(
|
| 44 |
+
default=[],
|
| 45 |
+
metadata={
|
| 46 |
+
"help": (
|
| 47 |
+
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
|
| 48 |
+
" of all available models"
|
| 49 |
+
)
|
| 50 |
+
},
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
batch_sizes: List[int] = list_field(
|
| 54 |
+
default=[8], metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"}
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
sequence_lengths: List[int] = list_field(
|
| 58 |
+
default=[8, 32, 128, 512],
|
| 59 |
+
metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"},
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
inference: bool = field(
|
| 63 |
+
default=True,
|
| 64 |
+
metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."},
|
| 65 |
+
)
|
| 66 |
+
cuda: bool = field(
|
| 67 |
+
default=True,
|
| 68 |
+
metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."},
|
| 69 |
+
)
|
| 70 |
+
tpu: bool = field(
|
| 71 |
+
default=True, metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."}
|
| 72 |
+
)
|
| 73 |
+
fp16: bool = field(default=False, metadata={"help": "Use FP16 to accelerate inference."})
|
| 74 |
+
training: bool = field(default=False, metadata={"help": "Benchmark training of model"})
|
| 75 |
+
verbose: bool = field(default=False, metadata={"help": "Verbose memory tracing"})
|
| 76 |
+
speed: bool = field(
|
| 77 |
+
default=True,
|
| 78 |
+
metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."},
|
| 79 |
+
)
|
| 80 |
+
memory: bool = field(
|
| 81 |
+
default=True,
|
| 82 |
+
metadata={
|
| 83 |
+
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
|
| 84 |
+
},
|
| 85 |
+
)
|
| 86 |
+
trace_memory_line_by_line: bool = field(default=False, metadata={"help": "Trace memory line by line"})
|
| 87 |
+
save_to_csv: bool = field(default=False, metadata={"help": "Save result to a CSV file"})
|
| 88 |
+
log_print: bool = field(default=False, metadata={"help": "Save all print statements in a log file"})
|
| 89 |
+
env_print: bool = field(default=False, metadata={"help": "Whether to print environment information"})
|
| 90 |
+
multi_process: bool = field(
|
| 91 |
+
default=True,
|
| 92 |
+
metadata={
|
| 93 |
+
"help": (
|
| 94 |
+
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
|
| 95 |
+
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
|
| 96 |
+
" for debugging / testing and on TPU."
|
| 97 |
+
)
|
| 98 |
+
},
|
| 99 |
+
)
|
| 100 |
+
inference_time_csv_file: str = field(
|
| 101 |
+
default=f"inference_time_{round(time())}.csv",
|
| 102 |
+
metadata={"help": "CSV filename used if saving time results to csv."},
|
| 103 |
+
)
|
| 104 |
+
inference_memory_csv_file: str = field(
|
| 105 |
+
default=f"inference_memory_{round(time())}.csv",
|
| 106 |
+
metadata={"help": "CSV filename used if saving memory results to csv."},
|
| 107 |
+
)
|
| 108 |
+
train_time_csv_file: str = field(
|
| 109 |
+
default=f"train_time_{round(time())}.csv",
|
| 110 |
+
metadata={"help": "CSV filename used if saving time results to csv for training."},
|
| 111 |
+
)
|
| 112 |
+
train_memory_csv_file: str = field(
|
| 113 |
+
default=f"train_memory_{round(time())}.csv",
|
| 114 |
+
metadata={"help": "CSV filename used if saving memory results to csv for training."},
|
| 115 |
+
)
|
| 116 |
+
env_info_csv_file: str = field(
|
| 117 |
+
default=f"env_info_{round(time())}.csv",
|
| 118 |
+
metadata={"help": "CSV filename used if saving environment information."},
|
| 119 |
+
)
|
| 120 |
+
log_filename: str = field(
|
| 121 |
+
default=f"log_{round(time())}.csv",
|
| 122 |
+
metadata={"help": "Log filename used if print statements are saved in log."},
|
| 123 |
+
)
|
| 124 |
+
repeat: int = field(default=3, metadata={"help": "Times an experiment will be run."})
|
| 125 |
+
only_pretrain_model: bool = field(
|
| 126 |
+
default=False,
|
| 127 |
+
metadata={
|
| 128 |
+
"help": (
|
| 129 |
+
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
|
| 130 |
+
" model weights."
|
| 131 |
+
)
|
| 132 |
+
},
|
| 133 |
+
)
|
| 134 |
+
|
| 135 |
+
def __post_init__(self):
|
| 136 |
+
warnings.warn(
|
| 137 |
+
f"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
|
| 138 |
+
" are deprecated in general and it is advised to use external Benchmarking libraries "
|
| 139 |
+
" to benchmark Transformer models.",
|
| 140 |
+
FutureWarning,
|
| 141 |
+
)
|
| 142 |
+
|
| 143 |
+
def to_json_string(self):
|
| 144 |
+
"""
|
| 145 |
+
Serializes this instance to a JSON string.
|
| 146 |
+
"""
|
| 147 |
+
return json.dumps(dataclasses.asdict(self), indent=2)
|
| 148 |
+
|
| 149 |
+
@property
|
| 150 |
+
def model_names(self):
|
| 151 |
+
assert len(self.models) > 0, (
|
| 152 |
+
"Please make sure you provide at least one model name / model identifier, *e.g.* `--models"
|
| 153 |
+
" bert-base-cased` or `args.models = ['bert-base-cased']."
|
| 154 |
+
)
|
| 155 |
+
return self.models
|
| 156 |
+
|
| 157 |
+
@property
|
| 158 |
+
def do_multi_processing(self):
|
| 159 |
+
if not self.multi_process:
|
| 160 |
+
return False
|
| 161 |
+
elif self.is_tpu:
|
| 162 |
+
logger.info("Multiprocessing is currently not possible on TPU.")
|
| 163 |
+
return False
|
| 164 |
+
else:
|
| 165 |
+
return True
|
valley/lib/python3.10/site-packages/transformers/benchmark/benchmark_tf.py
ADDED
|
@@ -0,0 +1,298 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2018 The HuggingFace Inc. team.
|
| 3 |
+
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
"""
|
| 17 |
+
Benchmarking the library on inference and training in PyTorch.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
import random
|
| 22 |
+
import timeit
|
| 23 |
+
from functools import wraps
|
| 24 |
+
from typing import Callable, Optional
|
| 25 |
+
|
| 26 |
+
from ..configuration_utils import PretrainedConfig
|
| 27 |
+
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
|
| 28 |
+
from ..utils import is_py3nvml_available, is_tf_available, logging
|
| 29 |
+
from .benchmark_utils import (
|
| 30 |
+
Benchmark,
|
| 31 |
+
Memory,
|
| 32 |
+
MemorySummary,
|
| 33 |
+
measure_peak_memory_cpu,
|
| 34 |
+
start_memory_tracing,
|
| 35 |
+
stop_memory_tracing,
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
if is_tf_available():
|
| 40 |
+
import tensorflow as tf
|
| 41 |
+
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
|
| 42 |
+
|
| 43 |
+
from .benchmark_args_tf import TensorFlowBenchmarkArguments
|
| 44 |
+
|
| 45 |
+
if is_py3nvml_available():
|
| 46 |
+
import py3nvml.py3nvml as nvml
|
| 47 |
+
|
| 48 |
+
logger = logging.get_logger(__name__)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def run_with_tf_optimizations(do_eager_mode: bool, use_xla: bool):
|
| 52 |
+
def run_func(func):
|
| 53 |
+
@wraps(func)
|
| 54 |
+
def run_in_eager_mode(*args, **kwargs):
|
| 55 |
+
return func(*args, **kwargs)
|
| 56 |
+
|
| 57 |
+
@wraps(func)
|
| 58 |
+
@tf.function(experimental_compile=use_xla)
|
| 59 |
+
def run_in_graph_mode(*args, **kwargs):
|
| 60 |
+
return func(*args, **kwargs)
|
| 61 |
+
|
| 62 |
+
if do_eager_mode is True:
|
| 63 |
+
assert (
|
| 64 |
+
use_xla is False
|
| 65 |
+
), "Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`."
|
| 66 |
+
return run_in_eager_mode
|
| 67 |
+
else:
|
| 68 |
+
return run_in_graph_mode
|
| 69 |
+
|
| 70 |
+
return run_func
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def random_input_ids(batch_size: int, sequence_length: int, vocab_size: int) -> ["tf.Tensor"]:
|
| 74 |
+
rng = random.Random()
|
| 75 |
+
values = [rng.randint(0, vocab_size - 1) for i in range(batch_size * sequence_length)]
|
| 76 |
+
return tf.constant(values, shape=(batch_size, sequence_length), dtype=tf.int32)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
class TensorFlowBenchmark(Benchmark):
|
| 80 |
+
args: TensorFlowBenchmarkArguments
|
| 81 |
+
configs: PretrainedConfig
|
| 82 |
+
framework: str = "TensorFlow"
|
| 83 |
+
|
| 84 |
+
@property
|
| 85 |
+
def framework_version(self):
|
| 86 |
+
return tf.__version__
|
| 87 |
+
|
| 88 |
+
def _inference_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:
|
| 89 |
+
# initialize GPU on separate process
|
| 90 |
+
strategy = self.args.strategy
|
| 91 |
+
assert strategy is not None, "A device strategy has to be initialized before using TensorFlow."
|
| 92 |
+
_inference = self._prepare_inference_func(model_name, batch_size, sequence_length)
|
| 93 |
+
return self._measure_speed(_inference)
|
| 94 |
+
|
| 95 |
+
def _train_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:
|
| 96 |
+
strategy = self.args.strategy
|
| 97 |
+
assert strategy is not None, "A device strategy has to be initialized before using TensorFlow."
|
| 98 |
+
_train = self._prepare_train_func(model_name, batch_size, sequence_length)
|
| 99 |
+
return self._measure_speed(_train)
|
| 100 |
+
|
| 101 |
+
def _inference_memory(
|
| 102 |
+
self, model_name: str, batch_size: int, sequence_length: int
|
| 103 |
+
) -> [Memory, Optional[MemorySummary]]:
|
| 104 |
+
# initialize GPU on separate process
|
| 105 |
+
if self.args.is_gpu:
|
| 106 |
+
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], True)
|
| 107 |
+
strategy = self.args.strategy
|
| 108 |
+
assert strategy is not None, "A device strategy has to be initialized before using TensorFlow."
|
| 109 |
+
_inference = self._prepare_inference_func(model_name, batch_size, sequence_length)
|
| 110 |
+
return self._measure_memory(_inference)
|
| 111 |
+
|
| 112 |
+
def _train_memory(
|
| 113 |
+
self, model_name: str, batch_size: int, sequence_length: int
|
| 114 |
+
) -> [Memory, Optional[MemorySummary]]:
|
| 115 |
+
if self.args.is_gpu:
|
| 116 |
+
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], True)
|
| 117 |
+
strategy = self.args.strategy
|
| 118 |
+
assert strategy is not None, "A device strategy has to be initialized before using TensorFlow."
|
| 119 |
+
|
| 120 |
+
_train = self._prepare_train_func(model_name, batch_size, sequence_length)
|
| 121 |
+
return self._measure_memory(_train)
|
| 122 |
+
|
| 123 |
+
def _prepare_inference_func(self, model_name: str, batch_size: int, sequence_length: int) -> Callable[[], None]:
|
| 124 |
+
config = self.config_dict[model_name]
|
| 125 |
+
|
| 126 |
+
if self.args.fp16:
|
| 127 |
+
raise NotImplementedError("Mixed precision is currently not supported.")
|
| 128 |
+
|
| 129 |
+
has_model_class_in_config = (
|
| 130 |
+
hasattr(config, "architectures")
|
| 131 |
+
and isinstance(config.architectures, list)
|
| 132 |
+
and len(config.architectures) > 0
|
| 133 |
+
)
|
| 134 |
+
if not self.args.only_pretrain_model and has_model_class_in_config:
|
| 135 |
+
try:
|
| 136 |
+
model_class = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
|
| 137 |
+
transformers_module = __import__("transformers", fromlist=[model_class])
|
| 138 |
+
model_cls = getattr(transformers_module, model_class)
|
| 139 |
+
model = model_cls(config)
|
| 140 |
+
except ImportError:
|
| 141 |
+
raise ImportError(
|
| 142 |
+
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
|
| 143 |
+
" set `--only_pretrain_model` or `args.only_pretrain_model=True`."
|
| 144 |
+
)
|
| 145 |
+
else:
|
| 146 |
+
model = TF_MODEL_MAPPING[config.__class__](config)
|
| 147 |
+
|
| 148 |
+
# encoder-decoder has vocab size saved differently
|
| 149 |
+
vocab_size = config.vocab_size if hasattr(config, "vocab_size") else config.encoder.vocab_size
|
| 150 |
+
input_ids = random_input_ids(batch_size, sequence_length, vocab_size)
|
| 151 |
+
|
| 152 |
+
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla)
|
| 153 |
+
def encoder_decoder_forward():
|
| 154 |
+
return model(input_ids, decoder_input_ids=input_ids, training=False)
|
| 155 |
+
|
| 156 |
+
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla)
|
| 157 |
+
def encoder_forward():
|
| 158 |
+
return model(input_ids, training=False)
|
| 159 |
+
|
| 160 |
+
_inference = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
|
| 161 |
+
|
| 162 |
+
return _inference
|
| 163 |
+
|
| 164 |
+
def _prepare_train_func(self, model_name: str, batch_size: int, sequence_length: int) -> Callable[[], None]:
|
| 165 |
+
config = self.config_dict[model_name]
|
| 166 |
+
|
| 167 |
+
assert (
|
| 168 |
+
self.args.eager_mode is False
|
| 169 |
+
), "Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`."
|
| 170 |
+
|
| 171 |
+
if self.args.fp16:
|
| 172 |
+
raise NotImplementedError("Mixed precision is currently not supported.")
|
| 173 |
+
|
| 174 |
+
has_model_class_in_config = (
|
| 175 |
+
hasattr(config, "architectures")
|
| 176 |
+
and isinstance(config.architectures, list)
|
| 177 |
+
and len(config.architectures) > 0
|
| 178 |
+
)
|
| 179 |
+
if not self.args.only_pretrain_model and has_model_class_in_config:
|
| 180 |
+
try:
|
| 181 |
+
model_class = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
|
| 182 |
+
transformers_module = __import__("transformers", fromlist=[model_class])
|
| 183 |
+
model_cls = getattr(transformers_module, model_class)
|
| 184 |
+
model = model_cls(config)
|
| 185 |
+
except ImportError:
|
| 186 |
+
raise ImportError(
|
| 187 |
+
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
|
| 188 |
+
" set `--only_pretrain_model` or `args.only_pretrain_model=True`."
|
| 189 |
+
)
|
| 190 |
+
else:
|
| 191 |
+
model = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](config)
|
| 192 |
+
|
| 193 |
+
# encoder-decoder has vocab size saved differently
|
| 194 |
+
vocab_size = config.vocab_size if hasattr(config, "vocab_size") else config.encoder.vocab_size
|
| 195 |
+
input_ids = random_input_ids(batch_size, sequence_length, vocab_size)
|
| 196 |
+
|
| 197 |
+
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla)
|
| 198 |
+
def encoder_decoder_train():
|
| 199 |
+
loss = model(input_ids, decoder_input_ids=input_ids, labels=input_ids, training=True)[0]
|
| 200 |
+
gradients = tf.gradients(loss, model.trainable_variables)
|
| 201 |
+
return gradients
|
| 202 |
+
|
| 203 |
+
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla)
|
| 204 |
+
def encoder_train():
|
| 205 |
+
loss = model(input_ids, labels=input_ids, training=True)[0]
|
| 206 |
+
gradients = tf.gradients(loss, model.trainable_variables)
|
| 207 |
+
return gradients
|
| 208 |
+
|
| 209 |
+
_train = encoder_decoder_train if config.is_encoder_decoder else encoder_train
|
| 210 |
+
|
| 211 |
+
return _train
|
| 212 |
+
|
| 213 |
+
def _measure_speed(self, func) -> float:
|
| 214 |
+
with self.args.strategy.scope():
|
| 215 |
+
try:
|
| 216 |
+
if self.args.is_tpu or self.args.use_xla:
|
| 217 |
+
# run additional 10 times to stabilize compilation for tpu
|
| 218 |
+
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation")
|
| 219 |
+
timeit.repeat(func, repeat=1, number=5)
|
| 220 |
+
|
| 221 |
+
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
|
| 222 |
+
runtimes = timeit.repeat(
|
| 223 |
+
func,
|
| 224 |
+
repeat=self.args.repeat,
|
| 225 |
+
number=10,
|
| 226 |
+
)
|
| 227 |
+
|
| 228 |
+
return min(runtimes) / 10.0
|
| 229 |
+
except ResourceExhaustedError as e:
|
| 230 |
+
self.print_fn(f"Doesn't fit on GPU. {e}")
|
| 231 |
+
|
| 232 |
+
def _measure_memory(self, func: Callable[[], None]) -> [Memory, MemorySummary]:
|
| 233 |
+
logger.info(
|
| 234 |
+
"Note that TensorFlow allocates more memory than "
|
| 235 |
+
"it might need to speed up computation. "
|
| 236 |
+
"The memory reported here corresponds to the memory "
|
| 237 |
+
"reported by `nvidia-smi`, which can vary depending "
|
| 238 |
+
"on total available memory on the GPU that is used."
|
| 239 |
+
)
|
| 240 |
+
with self.args.strategy.scope():
|
| 241 |
+
try:
|
| 242 |
+
if self.args.trace_memory_line_by_line:
|
| 243 |
+
assert self.args.eager_mode, (
|
| 244 |
+
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
|
| 245 |
+
" consumption line by line."
|
| 246 |
+
)
|
| 247 |
+
trace = start_memory_tracing("transformers")
|
| 248 |
+
|
| 249 |
+
if self.args.is_tpu:
|
| 250 |
+
# tpu
|
| 251 |
+
raise NotImplementedError(
|
| 252 |
+
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
|
| 253 |
+
" with `args.memory=False`"
|
| 254 |
+
)
|
| 255 |
+
elif self.args.is_gpu:
|
| 256 |
+
# gpu
|
| 257 |
+
if not is_py3nvml_available():
|
| 258 |
+
logger.warning(
|
| 259 |
+
"py3nvml not installed, we won't log GPU memory usage. "
|
| 260 |
+
"Install py3nvml (pip install py3nvml) to log information about GPU."
|
| 261 |
+
)
|
| 262 |
+
memory = "N/A"
|
| 263 |
+
else:
|
| 264 |
+
logger.info(
|
| 265 |
+
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
|
| 266 |
+
" running on the same GPU."
|
| 267 |
+
)
|
| 268 |
+
# init nvml
|
| 269 |
+
nvml.nvmlInit()
|
| 270 |
+
func()
|
| 271 |
+
handle = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx)
|
| 272 |
+
meminfo = nvml.nvmlDeviceGetMemoryInfo(handle)
|
| 273 |
+
max_bytes_in_use = meminfo.used
|
| 274 |
+
memory = Memory(max_bytes_in_use)
|
| 275 |
+
# shutdown nvml
|
| 276 |
+
nvml.nvmlShutdown()
|
| 277 |
+
else:
|
| 278 |
+
# cpu
|
| 279 |
+
if self.args.trace_memory_line_by_line:
|
| 280 |
+
logger.info(
|
| 281 |
+
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
|
| 282 |
+
" TensorFlow."
|
| 283 |
+
)
|
| 284 |
+
memory = None
|
| 285 |
+
else:
|
| 286 |
+
memory_bytes = measure_peak_memory_cpu(func)
|
| 287 |
+
memory = Memory(memory_bytes) if isinstance(memory_bytes, int) else memory_bytes
|
| 288 |
+
if self.args.trace_memory_line_by_line:
|
| 289 |
+
summary = stop_memory_tracing(trace)
|
| 290 |
+
if memory is None:
|
| 291 |
+
memory = summary.total
|
| 292 |
+
else:
|
| 293 |
+
summary = None
|
| 294 |
+
|
| 295 |
+
return memory, summary
|
| 296 |
+
except ResourceExhaustedError as e:
|
| 297 |
+
self.print_fn(f"Doesn't fit on GPU. {e}")
|
| 298 |
+
return "N/A", None
|
valley/lib/python3.10/site-packages/transformers/benchmark/benchmark_utils.py
ADDED
|
@@ -0,0 +1,913 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
|
| 2 |
+
|
| 3 |
+
# Copyright 2020 The HuggingFace Team and the AllenNLP authors. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
"""
|
| 17 |
+
Utilities for working with the local dataset cache.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
import copy
|
| 21 |
+
import csv
|
| 22 |
+
import linecache
|
| 23 |
+
import os
|
| 24 |
+
import platform
|
| 25 |
+
import sys
|
| 26 |
+
import warnings
|
| 27 |
+
from abc import ABC, abstractmethod
|
| 28 |
+
from collections import defaultdict, namedtuple
|
| 29 |
+
from datetime import datetime
|
| 30 |
+
from multiprocessing import Pipe, Process, Queue
|
| 31 |
+
from multiprocessing.connection import Connection
|
| 32 |
+
from typing import Callable, Iterable, List, NamedTuple, Optional, Union
|
| 33 |
+
|
| 34 |
+
from .. import AutoConfig, PretrainedConfig
|
| 35 |
+
from .. import __version__ as version
|
| 36 |
+
from ..utils import is_psutil_available, is_py3nvml_available, is_tf_available, is_torch_available, logging
|
| 37 |
+
from .benchmark_args_utils import BenchmarkArguments
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
if is_torch_available():
|
| 41 |
+
from torch.cuda import empty_cache as torch_empty_cache
|
| 42 |
+
|
| 43 |
+
if is_tf_available():
|
| 44 |
+
from tensorflow.python.eager import context as tf_context
|
| 45 |
+
|
| 46 |
+
if is_psutil_available():
|
| 47 |
+
import psutil
|
| 48 |
+
|
| 49 |
+
if is_py3nvml_available():
|
| 50 |
+
import py3nvml.py3nvml as nvml
|
| 51 |
+
|
| 52 |
+
if platform.system() == "Windows":
|
| 53 |
+
from signal import CTRL_C_EVENT as SIGKILL
|
| 54 |
+
else:
|
| 55 |
+
from signal import SIGKILL
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
_is_memory_tracing_enabled = False
|
| 62 |
+
|
| 63 |
+
BenchmarkOutput = namedtuple(
|
| 64 |
+
"BenchmarkOutput",
|
| 65 |
+
[
|
| 66 |
+
"time_inference_result",
|
| 67 |
+
"memory_inference_result",
|
| 68 |
+
"time_train_result",
|
| 69 |
+
"memory_train_result",
|
| 70 |
+
"inference_summary",
|
| 71 |
+
"train_summary",
|
| 72 |
+
],
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def separate_process_wrapper_fn(func: Callable[[], None], do_multi_processing: bool) -> Callable[[], None]:
|
| 77 |
+
"""
|
| 78 |
+
This function wraps another function into its own separated process. In order to ensure accurate memory
|
| 79 |
+
measurements it is important that the function is executed in a separate process
|
| 80 |
+
|
| 81 |
+
Args:
|
| 82 |
+
- `func`: (`callable`): function() -> ... generic function which will be executed in its own separate process
|
| 83 |
+
- `do_multi_processing`: (`bool`) Whether to run function on separate process or not
|
| 84 |
+
"""
|
| 85 |
+
|
| 86 |
+
def multi_process_func(*args, **kwargs):
|
| 87 |
+
# run function in an individual
|
| 88 |
+
# process to get correct memory
|
| 89 |
+
def wrapper_func(queue: Queue, *args):
|
| 90 |
+
try:
|
| 91 |
+
result = func(*args)
|
| 92 |
+
except Exception as e:
|
| 93 |
+
logger.error(e)
|
| 94 |
+
print(e)
|
| 95 |
+
result = "N/A"
|
| 96 |
+
queue.put(result)
|
| 97 |
+
|
| 98 |
+
queue = Queue()
|
| 99 |
+
p = Process(target=wrapper_func, args=[queue] + list(args))
|
| 100 |
+
p.start()
|
| 101 |
+
result = queue.get()
|
| 102 |
+
p.join()
|
| 103 |
+
return result
|
| 104 |
+
|
| 105 |
+
if do_multi_processing:
|
| 106 |
+
logger.info(f"Function {func} is executed in its own process...")
|
| 107 |
+
return multi_process_func
|
| 108 |
+
else:
|
| 109 |
+
return func
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def is_memory_tracing_enabled():
|
| 113 |
+
global _is_memory_tracing_enabled
|
| 114 |
+
return _is_memory_tracing_enabled
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
class Frame(NamedTuple):
|
| 118 |
+
"""
|
| 119 |
+
`Frame` is a NamedTuple used to gather the current frame state. `Frame` has the following fields:
|
| 120 |
+
|
| 121 |
+
- 'filename' (string): Name of the file currently executed
|
| 122 |
+
- 'module' (string): Name of the module currently executed
|
| 123 |
+
- 'line_number' (int): Number of the line currently executed
|
| 124 |
+
- 'event' (string): Event that triggered the tracing (default will be "line")
|
| 125 |
+
- 'line_text' (string): Text of the line in the python script
|
| 126 |
+
"""
|
| 127 |
+
|
| 128 |
+
filename: str
|
| 129 |
+
module: str
|
| 130 |
+
line_number: int
|
| 131 |
+
event: str
|
| 132 |
+
line_text: str
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
class UsedMemoryState(NamedTuple):
|
| 136 |
+
"""
|
| 137 |
+
`UsedMemoryState` are named tuples with the following fields:
|
| 138 |
+
|
| 139 |
+
- 'frame': a `Frame` namedtuple (see below) storing information on the current tracing frame (current file,
|
| 140 |
+
location in current file)
|
| 141 |
+
- 'cpu_memory': CPU RSS memory state *before* executing the line
|
| 142 |
+
- 'gpu_memory': GPU used memory *before* executing the line (sum for all GPUs or for only `gpus_to_trace` if
|
| 143 |
+
provided)
|
| 144 |
+
"""
|
| 145 |
+
|
| 146 |
+
frame: Frame
|
| 147 |
+
cpu_memory: int
|
| 148 |
+
gpu_memory: int
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
class Memory(NamedTuple):
|
| 152 |
+
"""
|
| 153 |
+
`Memory` NamedTuple have a single field `bytes` and you can get a human readable str of the number of mega bytes by
|
| 154 |
+
calling `__repr__`
|
| 155 |
+
|
| 156 |
+
- `byte` (integer): number of bytes,
|
| 157 |
+
"""
|
| 158 |
+
|
| 159 |
+
bytes: int
|
| 160 |
+
|
| 161 |
+
def __repr__(self) -> str:
|
| 162 |
+
return str(bytes_to_mega_bytes(self.bytes))
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
class MemoryState(NamedTuple):
|
| 166 |
+
"""
|
| 167 |
+
`MemoryState` are namedtuples listing frame + CPU/GPU memory with the following fields:
|
| 168 |
+
|
| 169 |
+
- `frame` (`Frame`): the current frame (see above)
|
| 170 |
+
- `cpu`: CPU memory consumed at during the current frame as a `Memory` named tuple
|
| 171 |
+
- `gpu`: GPU memory consumed at during the current frame as a `Memory` named tuple
|
| 172 |
+
- `cpu_gpu`: CPU + GPU memory consumed at during the current frame as a `Memory` named tuple
|
| 173 |
+
"""
|
| 174 |
+
|
| 175 |
+
frame: Frame
|
| 176 |
+
cpu: Memory
|
| 177 |
+
gpu: Memory
|
| 178 |
+
cpu_gpu: Memory
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
class MemorySummary(NamedTuple):
|
| 182 |
+
"""
|
| 183 |
+
`MemorySummary` namedtuple otherwise with the fields:
|
| 184 |
+
|
| 185 |
+
- `sequential`: a list of `MemoryState` namedtuple (see below) computed from the provided `memory_trace` by
|
| 186 |
+
subtracting the memory after executing each line from the memory before executing said line.
|
| 187 |
+
- `cumulative`: a list of `MemoryState` namedtuple (see below) with cumulative increase in memory for each line
|
| 188 |
+
obtained by summing repeated memory increase for a line if it's executed several times. The list is sorted
|
| 189 |
+
from the frame with the largest memory consumption to the frame with the smallest (can be negative if memory
|
| 190 |
+
is released)
|
| 191 |
+
- `total`: total memory increase during the full tracing as a `Memory` named tuple (see below). Line with
|
| 192 |
+
memory release (negative consumption) are ignored if `ignore_released_memory` is `True` (default).
|
| 193 |
+
"""
|
| 194 |
+
|
| 195 |
+
sequential: List[MemoryState]
|
| 196 |
+
cumulative: List[MemoryState]
|
| 197 |
+
current: List[MemoryState]
|
| 198 |
+
total: Memory
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
MemoryTrace = List[UsedMemoryState]
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
def measure_peak_memory_cpu(function: Callable[[], None], interval=0.5, device_idx=None) -> int:
|
| 205 |
+
"""
|
| 206 |
+
measures peak cpu memory consumption of a given `function` running the function for at least interval seconds and
|
| 207 |
+
at most 20 * interval seconds. This function is heavily inspired by: `memory_usage` of the package
|
| 208 |
+
`memory_profiler`:
|
| 209 |
+
https://github.com/pythonprofilers/memory_profiler/blob/895c4ac7a08020d66ae001e24067da6dcea42451/memory_profiler.py#L239
|
| 210 |
+
|
| 211 |
+
Args:
|
| 212 |
+
- `function`: (`callable`): function() -> ... function without any arguments to measure for which to measure
|
| 213 |
+
the peak memory
|
| 214 |
+
|
| 215 |
+
- `interval`: (`float`, `optional`, defaults to `0.5`) interval in second for which to measure the memory usage
|
| 216 |
+
|
| 217 |
+
- `device_idx`: (`int`, `optional`, defaults to `None`) device id for which to measure gpu usage
|
| 218 |
+
|
| 219 |
+
Returns:
|
| 220 |
+
|
| 221 |
+
- `max_memory`: (`int`) consumed memory peak in Bytes
|
| 222 |
+
"""
|
| 223 |
+
|
| 224 |
+
def get_cpu_memory(process_id: int) -> int:
|
| 225 |
+
"""
|
| 226 |
+
measures current cpu memory usage of a given `process_id`
|
| 227 |
+
|
| 228 |
+
Args:
|
| 229 |
+
- `process_id`: (`int`) process_id for which to measure memory
|
| 230 |
+
|
| 231 |
+
Returns
|
| 232 |
+
|
| 233 |
+
- `memory`: (`int`) consumed memory in Bytes
|
| 234 |
+
"""
|
| 235 |
+
process = psutil.Process(process_id)
|
| 236 |
+
try:
|
| 237 |
+
meminfo_attr = "memory_info" if hasattr(process, "memory_info") else "get_memory_info"
|
| 238 |
+
memory = getattr(process, meminfo_attr)()[0]
|
| 239 |
+
except psutil.AccessDenied:
|
| 240 |
+
raise ValueError("Error with Psutil.")
|
| 241 |
+
return memory
|
| 242 |
+
|
| 243 |
+
if not is_psutil_available():
|
| 244 |
+
logger.warning(
|
| 245 |
+
"Psutil not installed, we won't log CPU memory usage. "
|
| 246 |
+
"Install Psutil (pip install psutil) to use CPU memory tracing."
|
| 247 |
+
)
|
| 248 |
+
max_memory = "N/A"
|
| 249 |
+
else:
|
| 250 |
+
|
| 251 |
+
class MemoryMeasureProcess(Process):
|
| 252 |
+
|
| 253 |
+
"""
|
| 254 |
+
`MemoryMeasureProcess` inherits from `Process` and overwrites its `run()` method. Used to measure the
|
| 255 |
+
memory usage of a process
|
| 256 |
+
"""
|
| 257 |
+
|
| 258 |
+
def __init__(self, process_id: int, child_connection: Connection, interval: float):
|
| 259 |
+
super().__init__()
|
| 260 |
+
self.process_id = process_id
|
| 261 |
+
self.interval = interval
|
| 262 |
+
self.connection = child_connection
|
| 263 |
+
self.num_measurements = 1
|
| 264 |
+
self.mem_usage = get_cpu_memory(self.process_id)
|
| 265 |
+
|
| 266 |
+
def run(self):
|
| 267 |
+
self.connection.send(0)
|
| 268 |
+
stop = False
|
| 269 |
+
while True:
|
| 270 |
+
self.mem_usage = max(self.mem_usage, get_cpu_memory(self.process_id))
|
| 271 |
+
self.num_measurements += 1
|
| 272 |
+
|
| 273 |
+
if stop:
|
| 274 |
+
break
|
| 275 |
+
|
| 276 |
+
stop = self.connection.poll(self.interval)
|
| 277 |
+
|
| 278 |
+
# send results to parent pipe
|
| 279 |
+
self.connection.send(self.mem_usage)
|
| 280 |
+
self.connection.send(self.num_measurements)
|
| 281 |
+
|
| 282 |
+
while True:
|
| 283 |
+
# create child, parent connection
|
| 284 |
+
child_connection, parent_connection = Pipe()
|
| 285 |
+
|
| 286 |
+
# instantiate process
|
| 287 |
+
mem_process = MemoryMeasureProcess(os.getpid(), child_connection, interval)
|
| 288 |
+
mem_process.start()
|
| 289 |
+
|
| 290 |
+
# wait until we get memory
|
| 291 |
+
parent_connection.recv()
|
| 292 |
+
|
| 293 |
+
try:
|
| 294 |
+
# execute function
|
| 295 |
+
function()
|
| 296 |
+
|
| 297 |
+
# start parent connection
|
| 298 |
+
parent_connection.send(0)
|
| 299 |
+
|
| 300 |
+
# receive memory and num measurements
|
| 301 |
+
max_memory = parent_connection.recv()
|
| 302 |
+
num_measurements = parent_connection.recv()
|
| 303 |
+
except Exception:
|
| 304 |
+
# kill process in a clean way
|
| 305 |
+
parent = psutil.Process(os.getpid())
|
| 306 |
+
for child in parent.children(recursive=True):
|
| 307 |
+
os.kill(child.pid, SIGKILL)
|
| 308 |
+
mem_process.join(0)
|
| 309 |
+
raise RuntimeError("Process killed. Error in Process")
|
| 310 |
+
|
| 311 |
+
# run process at least 20 * interval or until it finishes
|
| 312 |
+
mem_process.join(20 * interval)
|
| 313 |
+
|
| 314 |
+
if (num_measurements > 4) or (interval < 1e-6):
|
| 315 |
+
break
|
| 316 |
+
|
| 317 |
+
# reduce interval
|
| 318 |
+
interval /= 10
|
| 319 |
+
|
| 320 |
+
return max_memory
|
| 321 |
+
|
| 322 |
+
|
| 323 |
+
def start_memory_tracing(
|
| 324 |
+
modules_to_trace: Optional[Union[str, Iterable[str]]] = None,
|
| 325 |
+
modules_not_to_trace: Optional[Union[str, Iterable[str]]] = None,
|
| 326 |
+
events_to_trace: str = "line",
|
| 327 |
+
gpus_to_trace: Optional[List[int]] = None,
|
| 328 |
+
) -> MemoryTrace:
|
| 329 |
+
"""
|
| 330 |
+
Setup line-by-line tracing to record rss mem (RAM) at each line of a module or sub-module. See `./benchmark.py` for
|
| 331 |
+
usage examples. Current memory consumption is returned using psutil and in particular is the RSS memory "Resident
|
| 332 |
+
Set Size” (the non-swapped physical memory the process is using). See
|
| 333 |
+
https://psutil.readthedocs.io/en/latest/#psutil.Process.memory_info
|
| 334 |
+
|
| 335 |
+
Args:
|
| 336 |
+
- `modules_to_trace`: (None, string, list/tuple of string) if None, all events are recorded if string or list
|
| 337 |
+
of strings: only events from the listed module/sub-module will be recorded (e.g. 'fairseq' or
|
| 338 |
+
'transformers.models.gpt2.modeling_gpt2')
|
| 339 |
+
- `modules_not_to_trace`: (None, string, list/tuple of string) if None, no module is avoided if string or list
|
| 340 |
+
of strings: events from the listed module/sub-module will not be recorded (e.g. 'torch')
|
| 341 |
+
- `events_to_trace`: string or list of string of events to be recorded (see official python doc for
|
| 342 |
+
`sys.settrace` for the list of events) default to line
|
| 343 |
+
- `gpus_to_trace`: (optional list, default None) list of GPUs to trace. Default to tracing all GPUs
|
| 344 |
+
|
| 345 |
+
Return:
|
| 346 |
+
|
| 347 |
+
- `memory_trace` is a list of `UsedMemoryState` for each event (default each line of the traced script).
|
| 348 |
+
|
| 349 |
+
- `UsedMemoryState` are named tuples with the following fields:
|
| 350 |
+
|
| 351 |
+
- 'frame': a `Frame` namedtuple (see below) storing information on the current tracing frame (current
|
| 352 |
+
file, location in current file)
|
| 353 |
+
- 'cpu_memory': CPU RSS memory state *before* executing the line
|
| 354 |
+
- 'gpu_memory': GPU used memory *before* executing the line (sum for all GPUs or for only
|
| 355 |
+
`gpus_to_trace` if provided)
|
| 356 |
+
|
| 357 |
+
`Frame` is a namedtuple used by `UsedMemoryState` to list the current frame state. `Frame` has the following
|
| 358 |
+
fields: - 'filename' (string): Name of the file currently executed - 'module' (string): Name of the module
|
| 359 |
+
currently executed - 'line_number' (int): Number of the line currently executed - 'event' (string): Event that
|
| 360 |
+
triggered the tracing (default will be "line") - 'line_text' (string): Text of the line in the python script
|
| 361 |
+
|
| 362 |
+
"""
|
| 363 |
+
if is_psutil_available():
|
| 364 |
+
process = psutil.Process(os.getpid())
|
| 365 |
+
else:
|
| 366 |
+
logger.warning(
|
| 367 |
+
"Psutil not installed, we won't log CPU memory usage. "
|
| 368 |
+
"Install psutil (pip install psutil) to use CPU memory tracing."
|
| 369 |
+
)
|
| 370 |
+
process = None
|
| 371 |
+
|
| 372 |
+
if is_py3nvml_available():
|
| 373 |
+
try:
|
| 374 |
+
nvml.nvmlInit()
|
| 375 |
+
devices = list(range(nvml.nvmlDeviceGetCount())) if gpus_to_trace is None else gpus_to_trace
|
| 376 |
+
nvml.nvmlShutdown()
|
| 377 |
+
except (OSError, nvml.NVMLError):
|
| 378 |
+
logger.warning("Error while initializing communication with GPU. We won't perform GPU memory tracing.")
|
| 379 |
+
log_gpu = False
|
| 380 |
+
else:
|
| 381 |
+
log_gpu = is_torch_available() or is_tf_available()
|
| 382 |
+
else:
|
| 383 |
+
logger.warning(
|
| 384 |
+
"py3nvml not installed, we won't log GPU memory usage. "
|
| 385 |
+
"Install py3nvml (pip install py3nvml) to use GPU memory tracing."
|
| 386 |
+
)
|
| 387 |
+
log_gpu = False
|
| 388 |
+
|
| 389 |
+
memory_trace = []
|
| 390 |
+
|
| 391 |
+
def traceit(frame, event, args):
|
| 392 |
+
"""
|
| 393 |
+
Tracing method executed before running each line in a module or sub-module Record memory allocated in a list
|
| 394 |
+
with debugging information
|
| 395 |
+
"""
|
| 396 |
+
global _is_memory_tracing_enabled
|
| 397 |
+
|
| 398 |
+
if not _is_memory_tracing_enabled:
|
| 399 |
+
return traceit
|
| 400 |
+
|
| 401 |
+
# Filter events
|
| 402 |
+
if events_to_trace is not None:
|
| 403 |
+
if isinstance(events_to_trace, str) and event != events_to_trace:
|
| 404 |
+
return traceit
|
| 405 |
+
elif isinstance(events_to_trace, (list, tuple)) and event not in events_to_trace:
|
| 406 |
+
return traceit
|
| 407 |
+
|
| 408 |
+
if "__name__" not in frame.f_globals:
|
| 409 |
+
return traceit
|
| 410 |
+
|
| 411 |
+
# Filter modules
|
| 412 |
+
name = frame.f_globals["__name__"]
|
| 413 |
+
if not isinstance(name, str):
|
| 414 |
+
return traceit
|
| 415 |
+
else:
|
| 416 |
+
# Filter whitelist of modules to trace
|
| 417 |
+
if modules_to_trace is not None:
|
| 418 |
+
if isinstance(modules_to_trace, str) and modules_to_trace not in name:
|
| 419 |
+
return traceit
|
| 420 |
+
elif isinstance(modules_to_trace, (list, tuple)) and all(m not in name for m in modules_to_trace):
|
| 421 |
+
return traceit
|
| 422 |
+
|
| 423 |
+
# Filter blacklist of modules not to trace
|
| 424 |
+
if modules_not_to_trace is not None:
|
| 425 |
+
if isinstance(modules_not_to_trace, str) and modules_not_to_trace in name:
|
| 426 |
+
return traceit
|
| 427 |
+
elif isinstance(modules_not_to_trace, (list, tuple)) and any(m in name for m in modules_not_to_trace):
|
| 428 |
+
return traceit
|
| 429 |
+
|
| 430 |
+
# Record current tracing state (file, location in file...)
|
| 431 |
+
lineno = frame.f_lineno
|
| 432 |
+
filename = frame.f_globals["__file__"]
|
| 433 |
+
if filename.endswith(".pyc") or filename.endswith(".pyo"):
|
| 434 |
+
filename = filename[:-1]
|
| 435 |
+
line = linecache.getline(filename, lineno).rstrip()
|
| 436 |
+
traced_state = Frame(filename, name, lineno, event, line)
|
| 437 |
+
|
| 438 |
+
# Record current memory state (rss memory) and compute difference with previous memory state
|
| 439 |
+
cpu_mem = 0
|
| 440 |
+
if process is not None:
|
| 441 |
+
mem = process.memory_info()
|
| 442 |
+
cpu_mem = mem.rss
|
| 443 |
+
|
| 444 |
+
gpu_mem = 0
|
| 445 |
+
if log_gpu:
|
| 446 |
+
# Clear GPU caches
|
| 447 |
+
if is_torch_available():
|
| 448 |
+
torch_empty_cache()
|
| 449 |
+
if is_tf_available():
|
| 450 |
+
tf_context.context()._clear_caches() # See https://github.com/tensorflow/tensorflow/issues/20218#issuecomment-416771802
|
| 451 |
+
|
| 452 |
+
# Sum used memory for all GPUs
|
| 453 |
+
nvml.nvmlInit()
|
| 454 |
+
|
| 455 |
+
for i in devices:
|
| 456 |
+
handle = nvml.nvmlDeviceGetHandleByIndex(i)
|
| 457 |
+
meminfo = nvml.nvmlDeviceGetMemoryInfo(handle)
|
| 458 |
+
gpu_mem += meminfo.used
|
| 459 |
+
|
| 460 |
+
nvml.nvmlShutdown()
|
| 461 |
+
|
| 462 |
+
mem_state = UsedMemoryState(traced_state, cpu_mem, gpu_mem)
|
| 463 |
+
memory_trace.append(mem_state)
|
| 464 |
+
|
| 465 |
+
return traceit
|
| 466 |
+
|
| 467 |
+
sys.settrace(traceit)
|
| 468 |
+
|
| 469 |
+
global _is_memory_tracing_enabled
|
| 470 |
+
_is_memory_tracing_enabled = True
|
| 471 |
+
|
| 472 |
+
return memory_trace
|
| 473 |
+
|
| 474 |
+
|
| 475 |
+
def stop_memory_tracing(
|
| 476 |
+
memory_trace: Optional[MemoryTrace] = None, ignore_released_memory: bool = True
|
| 477 |
+
) -> Optional[MemorySummary]:
|
| 478 |
+
"""
|
| 479 |
+
Stop memory tracing cleanly and return a summary of the memory trace if a trace is given.
|
| 480 |
+
|
| 481 |
+
Args:
|
| 482 |
+
`memory_trace` (optional output of start_memory_tracing, default: None):
|
| 483 |
+
memory trace to convert in summary
|
| 484 |
+
`ignore_released_memory` (boolean, default: None):
|
| 485 |
+
if True we only sum memory increase to compute total memory
|
| 486 |
+
|
| 487 |
+
Return:
|
| 488 |
+
|
| 489 |
+
- None if `memory_trace` is None
|
| 490 |
+
- `MemorySummary` namedtuple otherwise with the fields:
|
| 491 |
+
|
| 492 |
+
- `sequential`: a list of `MemoryState` namedtuple (see below) computed from the provided `memory_trace` by
|
| 493 |
+
subtracting the memory after executing each line from the memory before executing said line.
|
| 494 |
+
- `cumulative`: a list of `MemoryState` namedtuple (see below) with cumulative increase in memory for each
|
| 495 |
+
line obtained by summing repeated memory increase for a line if it's executed several times. The list is
|
| 496 |
+
sorted from the frame with the largest memory consumption to the frame with the smallest (can be negative
|
| 497 |
+
if memory is released)
|
| 498 |
+
- `total`: total memory increase during the full tracing as a `Memory` named tuple (see below). Line with
|
| 499 |
+
memory release (negative consumption) are ignored if `ignore_released_memory` is `True` (default).
|
| 500 |
+
|
| 501 |
+
`Memory` named tuple have fields
|
| 502 |
+
|
| 503 |
+
- `byte` (integer): number of bytes,
|
| 504 |
+
- `string` (string): same as human readable string (ex: "3.5MB")
|
| 505 |
+
|
| 506 |
+
`Frame` are namedtuple used to list the current frame state and have the following fields:
|
| 507 |
+
|
| 508 |
+
- 'filename' (string): Name of the file currently executed
|
| 509 |
+
- 'module' (string): Name of the module currently executed
|
| 510 |
+
- 'line_number' (int): Number of the line currently executed
|
| 511 |
+
- 'event' (string): Event that triggered the tracing (default will be "line")
|
| 512 |
+
- 'line_text' (string): Text of the line in the python script
|
| 513 |
+
|
| 514 |
+
`MemoryState` are namedtuples listing frame + CPU/GPU memory with the following fields:
|
| 515 |
+
|
| 516 |
+
- `frame` (`Frame`): the current frame (see above)
|
| 517 |
+
- `cpu`: CPU memory consumed at during the current frame as a `Memory` named tuple
|
| 518 |
+
- `gpu`: GPU memory consumed at during the current frame as a `Memory` named tuple
|
| 519 |
+
- `cpu_gpu`: CPU + GPU memory consumed at during the current frame as a `Memory` named tuple
|
| 520 |
+
"""
|
| 521 |
+
global _is_memory_tracing_enabled
|
| 522 |
+
_is_memory_tracing_enabled = False
|
| 523 |
+
|
| 524 |
+
if memory_trace is not None and len(memory_trace) > 1:
|
| 525 |
+
memory_diff_trace = []
|
| 526 |
+
memory_curr_trace = []
|
| 527 |
+
|
| 528 |
+
cumulative_memory_dict = defaultdict(lambda: [0, 0, 0])
|
| 529 |
+
|
| 530 |
+
for (
|
| 531 |
+
(frame, cpu_mem, gpu_mem),
|
| 532 |
+
(next_frame, next_cpu_mem, next_gpu_mem),
|
| 533 |
+
) in zip(memory_trace[:-1], memory_trace[1:]):
|
| 534 |
+
cpu_mem_inc = next_cpu_mem - cpu_mem
|
| 535 |
+
gpu_mem_inc = next_gpu_mem - gpu_mem
|
| 536 |
+
cpu_gpu_mem_inc = cpu_mem_inc + gpu_mem_inc
|
| 537 |
+
memory_diff_trace.append(
|
| 538 |
+
MemoryState(
|
| 539 |
+
frame=frame,
|
| 540 |
+
cpu=Memory(cpu_mem_inc),
|
| 541 |
+
gpu=Memory(gpu_mem_inc),
|
| 542 |
+
cpu_gpu=Memory(cpu_gpu_mem_inc),
|
| 543 |
+
)
|
| 544 |
+
)
|
| 545 |
+
|
| 546 |
+
memory_curr_trace.append(
|
| 547 |
+
MemoryState(
|
| 548 |
+
frame=frame,
|
| 549 |
+
cpu=Memory(next_cpu_mem),
|
| 550 |
+
gpu=Memory(next_gpu_mem),
|
| 551 |
+
cpu_gpu=Memory(next_gpu_mem + next_cpu_mem),
|
| 552 |
+
)
|
| 553 |
+
)
|
| 554 |
+
|
| 555 |
+
cumulative_memory_dict[frame][0] += cpu_mem_inc
|
| 556 |
+
cumulative_memory_dict[frame][1] += gpu_mem_inc
|
| 557 |
+
cumulative_memory_dict[frame][2] += cpu_gpu_mem_inc
|
| 558 |
+
|
| 559 |
+
cumulative_memory = sorted(
|
| 560 |
+
cumulative_memory_dict.items(), key=lambda x: x[1][2], reverse=True
|
| 561 |
+
) # order by the total CPU + GPU memory increase
|
| 562 |
+
cumulative_memory = [
|
| 563 |
+
MemoryState(
|
| 564 |
+
frame=frame,
|
| 565 |
+
cpu=Memory(cpu_mem_inc),
|
| 566 |
+
gpu=Memory(gpu_mem_inc),
|
| 567 |
+
cpu_gpu=Memory(cpu_gpu_mem_inc),
|
| 568 |
+
)
|
| 569 |
+
for frame, (cpu_mem_inc, gpu_mem_inc, cpu_gpu_mem_inc) in cumulative_memory
|
| 570 |
+
]
|
| 571 |
+
|
| 572 |
+
memory_curr_trace = sorted(memory_curr_trace, key=lambda x: x.cpu_gpu.bytes, reverse=True)
|
| 573 |
+
|
| 574 |
+
if ignore_released_memory:
|
| 575 |
+
total_memory = sum(max(0, step_trace.cpu_gpu.bytes) for step_trace in memory_diff_trace)
|
| 576 |
+
else:
|
| 577 |
+
total_memory = sum(step_trace.cpu_gpu.bytes for step_trace in memory_diff_trace)
|
| 578 |
+
|
| 579 |
+
total_memory = Memory(total_memory)
|
| 580 |
+
|
| 581 |
+
return MemorySummary(
|
| 582 |
+
sequential=memory_diff_trace,
|
| 583 |
+
cumulative=cumulative_memory,
|
| 584 |
+
current=memory_curr_trace,
|
| 585 |
+
total=total_memory,
|
| 586 |
+
)
|
| 587 |
+
|
| 588 |
+
return None
|
| 589 |
+
|
| 590 |
+
|
| 591 |
+
def bytes_to_mega_bytes(memory_amount: int) -> int:
|
| 592 |
+
"""Utility to convert a number of bytes (int) into a number of mega bytes (int)"""
|
| 593 |
+
return memory_amount >> 20
|
| 594 |
+
|
| 595 |
+
|
| 596 |
+
class Benchmark(ABC):
|
| 597 |
+
"""
|
| 598 |
+
Benchmarks is a simple but feature-complete benchmarking script to compare memory and time performance of models in
|
| 599 |
+
Transformers.
|
| 600 |
+
"""
|
| 601 |
+
|
| 602 |
+
args: BenchmarkArguments
|
| 603 |
+
configs: PretrainedConfig
|
| 604 |
+
framework: str
|
| 605 |
+
|
| 606 |
+
def __init__(self, args: BenchmarkArguments = None, configs: PretrainedConfig = None):
|
| 607 |
+
self.args = args
|
| 608 |
+
if configs is None:
|
| 609 |
+
self.config_dict = {
|
| 610 |
+
model_name: AutoConfig.from_pretrained(model_name) for model_name in self.args.model_names
|
| 611 |
+
}
|
| 612 |
+
else:
|
| 613 |
+
self.config_dict = dict(zip(self.args.model_names, configs))
|
| 614 |
+
|
| 615 |
+
warnings.warn(
|
| 616 |
+
f"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
|
| 617 |
+
" are deprecated in general and it is advised to use external Benchmarking libraries "
|
| 618 |
+
" to benchmark Transformer models.",
|
| 619 |
+
FutureWarning,
|
| 620 |
+
)
|
| 621 |
+
|
| 622 |
+
if self.args.memory and os.getenv("TRANSFORMERS_USE_MULTIPROCESSING") == 0:
|
| 623 |
+
logger.warning(
|
| 624 |
+
"Memory consumption will not be measured accurately if `args.multi_process` is set to `False.` The"
|
| 625 |
+
" flag 'TRANSFORMERS_USE_MULTIPROCESSING' should only be disabled for debugging / testing."
|
| 626 |
+
)
|
| 627 |
+
|
| 628 |
+
self._print_fn = None
|
| 629 |
+
self._framework_version = None
|
| 630 |
+
self._environment_info = None
|
| 631 |
+
|
| 632 |
+
@property
|
| 633 |
+
def print_fn(self):
|
| 634 |
+
if self._print_fn is None:
|
| 635 |
+
if self.args.log_print:
|
| 636 |
+
|
| 637 |
+
def print_and_log(*args):
|
| 638 |
+
with open(self.args.log_filename, "a") as log_file:
|
| 639 |
+
log_file.write("".join(args) + "\n")
|
| 640 |
+
print(*args)
|
| 641 |
+
|
| 642 |
+
self._print_fn = print_and_log
|
| 643 |
+
else:
|
| 644 |
+
self._print_fn = print
|
| 645 |
+
return self._print_fn
|
| 646 |
+
|
| 647 |
+
@property
|
| 648 |
+
@abstractmethod
|
| 649 |
+
def framework_version(self):
|
| 650 |
+
pass
|
| 651 |
+
|
| 652 |
+
@abstractmethod
|
| 653 |
+
def _inference_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:
|
| 654 |
+
pass
|
| 655 |
+
|
| 656 |
+
@abstractmethod
|
| 657 |
+
def _train_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:
|
| 658 |
+
pass
|
| 659 |
+
|
| 660 |
+
@abstractmethod
|
| 661 |
+
def _inference_memory(
|
| 662 |
+
self, model_name: str, batch_size: int, sequence_length: int
|
| 663 |
+
) -> [Memory, Optional[MemorySummary]]:
|
| 664 |
+
pass
|
| 665 |
+
|
| 666 |
+
@abstractmethod
|
| 667 |
+
def _train_memory(
|
| 668 |
+
self, model_name: str, batch_size: int, sequence_length: int
|
| 669 |
+
) -> [Memory, Optional[MemorySummary]]:
|
| 670 |
+
pass
|
| 671 |
+
|
| 672 |
+
def inference_speed(self, *args, **kwargs) -> float:
|
| 673 |
+
return separate_process_wrapper_fn(self._inference_speed, self.args.do_multi_processing)(*args, **kwargs)
|
| 674 |
+
|
| 675 |
+
def train_speed(self, *args, **kwargs) -> float:
|
| 676 |
+
return separate_process_wrapper_fn(self._train_speed, self.args.do_multi_processing)(*args, **kwargs)
|
| 677 |
+
|
| 678 |
+
def inference_memory(self, *args, **kwargs) -> [Memory, Optional[MemorySummary]]:
|
| 679 |
+
return separate_process_wrapper_fn(self._inference_memory, self.args.do_multi_processing)(*args, **kwargs)
|
| 680 |
+
|
| 681 |
+
def train_memory(self, *args, **kwargs) -> [Memory, Optional[MemorySummary]]:
|
| 682 |
+
return separate_process_wrapper_fn(self._train_memory, self.args.do_multi_processing)(*args, **kwargs)
|
| 683 |
+
|
| 684 |
+
def run(self):
|
| 685 |
+
result_dict = {model_name: {} for model_name in self.args.model_names}
|
| 686 |
+
inference_result_time = copy.deepcopy(result_dict)
|
| 687 |
+
inference_result_memory = copy.deepcopy(result_dict)
|
| 688 |
+
train_result_time = copy.deepcopy(result_dict)
|
| 689 |
+
train_result_memory = copy.deepcopy(result_dict)
|
| 690 |
+
|
| 691 |
+
for c, model_name in enumerate(self.args.model_names):
|
| 692 |
+
self.print_fn(f"{c + 1} / {len(self.args.model_names)}")
|
| 693 |
+
|
| 694 |
+
model_dict = {
|
| 695 |
+
"bs": self.args.batch_sizes,
|
| 696 |
+
"ss": self.args.sequence_lengths,
|
| 697 |
+
"result": {i: {} for i in self.args.batch_sizes},
|
| 698 |
+
}
|
| 699 |
+
inference_result_time[model_name] = copy.deepcopy(model_dict)
|
| 700 |
+
inference_result_memory[model_name] = copy.deepcopy(model_dict)
|
| 701 |
+
train_result_time[model_name] = copy.deepcopy(model_dict)
|
| 702 |
+
train_result_memory[model_name] = copy.deepcopy(model_dict)
|
| 703 |
+
|
| 704 |
+
inference_summary = train_summary = None
|
| 705 |
+
|
| 706 |
+
for batch_size in self.args.batch_sizes:
|
| 707 |
+
for sequence_length in self.args.sequence_lengths:
|
| 708 |
+
if self.args.inference:
|
| 709 |
+
if self.args.memory:
|
| 710 |
+
memory, inference_summary = self.inference_memory(model_name, batch_size, sequence_length)
|
| 711 |
+
inference_result_memory[model_name]["result"][batch_size][sequence_length] = memory
|
| 712 |
+
if self.args.speed:
|
| 713 |
+
time = self.inference_speed(model_name, batch_size, sequence_length)
|
| 714 |
+
inference_result_time[model_name]["result"][batch_size][sequence_length] = time
|
| 715 |
+
|
| 716 |
+
if self.args.training:
|
| 717 |
+
if self.args.memory:
|
| 718 |
+
memory, train_summary = self.train_memory(model_name, batch_size, sequence_length)
|
| 719 |
+
train_result_memory[model_name]["result"][batch_size][sequence_length] = memory
|
| 720 |
+
if self.args.speed:
|
| 721 |
+
time = self.train_speed(model_name, batch_size, sequence_length)
|
| 722 |
+
train_result_time[model_name]["result"][batch_size][sequence_length] = time
|
| 723 |
+
|
| 724 |
+
if self.args.inference:
|
| 725 |
+
if self.args.speed:
|
| 726 |
+
self.print_fn("\n" + 20 * "=" + ("INFERENCE - SPEED - RESULT").center(40) + 20 * "=")
|
| 727 |
+
self.print_results(inference_result_time, type_label="Time in s")
|
| 728 |
+
self.save_to_csv(inference_result_time, self.args.inference_time_csv_file)
|
| 729 |
+
if self.args.is_tpu:
|
| 730 |
+
self.print_fn(
|
| 731 |
+
"TPU was used for inference. Note that the time after compilation stabilized (after ~10"
|
| 732 |
+
" inferences model.forward(..) calls) was measured."
|
| 733 |
+
)
|
| 734 |
+
|
| 735 |
+
if self.args.memory:
|
| 736 |
+
self.print_fn("\n" + 20 * "=" + ("INFERENCE - MEMORY - RESULT").center(40) + 20 * "=")
|
| 737 |
+
self.print_results(inference_result_memory, type_label="Memory in MB")
|
| 738 |
+
self.save_to_csv(inference_result_memory, self.args.inference_memory_csv_file)
|
| 739 |
+
|
| 740 |
+
if self.args.trace_memory_line_by_line:
|
| 741 |
+
self.print_fn("\n" + 20 * "=" + ("INFERENCE - MEMOMRY - LINE BY LINE - SUMMARY").center(40) + 20 * "=")
|
| 742 |
+
self.print_memory_trace_statistics(inference_summary)
|
| 743 |
+
|
| 744 |
+
if self.args.training:
|
| 745 |
+
if self.args.speed:
|
| 746 |
+
self.print_fn("\n" + 20 * "=" + ("TRAIN - SPEED - RESULTS").center(40) + 20 * "=")
|
| 747 |
+
self.print_results(train_result_time, "Time in s")
|
| 748 |
+
self.save_to_csv(train_result_time, self.args.train_time_csv_file)
|
| 749 |
+
if self.args.is_tpu:
|
| 750 |
+
self.print_fn(
|
| 751 |
+
"TPU was used for training. Note that the time after compilation stabilized (after ~10 train"
|
| 752 |
+
" loss=model.forward(...) + loss.backward() calls) was measured."
|
| 753 |
+
)
|
| 754 |
+
|
| 755 |
+
if self.args.memory:
|
| 756 |
+
self.print_fn("\n" + 20 * "=" + ("TRAIN - MEMORY - RESULTS").center(40) + 20 * "=")
|
| 757 |
+
self.print_results(train_result_memory, type_label="Memory in MB")
|
| 758 |
+
self.save_to_csv(train_result_memory, self.args.train_memory_csv_file)
|
| 759 |
+
|
| 760 |
+
if self.args.trace_memory_line_by_line:
|
| 761 |
+
self.print_fn("\n" + 20 * "=" + ("TRAIN - MEMOMRY - LINE BY LINE - SUMMARY").center(40) + 20 * "=")
|
| 762 |
+
self.print_memory_trace_statistics(train_summary)
|
| 763 |
+
|
| 764 |
+
if self.args.env_print:
|
| 765 |
+
self.print_fn("\n" + 20 * "=" + ("ENVIRONMENT INFORMATION").center(40) + 20 * "=")
|
| 766 |
+
self.print_fn("\n".join([f"- {prop}: {val}" for prop, val in self.environment_info.items()]) + "\n")
|
| 767 |
+
|
| 768 |
+
if self.args.save_to_csv:
|
| 769 |
+
with open(self.args.env_info_csv_file, mode="w", newline="") as csv_file:
|
| 770 |
+
writer = csv.writer(csv_file)
|
| 771 |
+
for key, value in self.environment_info.items():
|
| 772 |
+
writer.writerow([key, value])
|
| 773 |
+
|
| 774 |
+
return BenchmarkOutput(
|
| 775 |
+
inference_result_time,
|
| 776 |
+
inference_result_memory,
|
| 777 |
+
train_result_time,
|
| 778 |
+
train_result_memory,
|
| 779 |
+
inference_summary,
|
| 780 |
+
train_summary,
|
| 781 |
+
)
|
| 782 |
+
|
| 783 |
+
@property
|
| 784 |
+
def environment_info(self):
|
| 785 |
+
if self._environment_info is None:
|
| 786 |
+
info = {}
|
| 787 |
+
info["transformers_version"] = version
|
| 788 |
+
info["framework"] = self.framework
|
| 789 |
+
if self.framework == "PyTorch":
|
| 790 |
+
info["use_torchscript"] = self.args.torchscript
|
| 791 |
+
if self.framework == "TensorFlow":
|
| 792 |
+
info["eager_mode"] = self.args.eager_mode
|
| 793 |
+
info["use_xla"] = self.args.use_xla
|
| 794 |
+
info["framework_version"] = self.framework_version
|
| 795 |
+
info["python_version"] = platform.python_version()
|
| 796 |
+
info["system"] = platform.system()
|
| 797 |
+
info["cpu"] = platform.processor()
|
| 798 |
+
info["architecture"] = platform.architecture()[0]
|
| 799 |
+
info["date"] = datetime.date(datetime.now())
|
| 800 |
+
info["time"] = datetime.time(datetime.now())
|
| 801 |
+
info["fp16"] = self.args.fp16
|
| 802 |
+
info["use_multiprocessing"] = self.args.do_multi_processing
|
| 803 |
+
info["only_pretrain_model"] = self.args.only_pretrain_model
|
| 804 |
+
|
| 805 |
+
if is_psutil_available():
|
| 806 |
+
info["cpu_ram_mb"] = bytes_to_mega_bytes(psutil.virtual_memory().total)
|
| 807 |
+
else:
|
| 808 |
+
logger.warning(
|
| 809 |
+
"Psutil not installed, we won't log available CPU memory. "
|
| 810 |
+
"Install psutil (pip install psutil) to log available CPU memory."
|
| 811 |
+
)
|
| 812 |
+
info["cpu_ram_mb"] = "N/A"
|
| 813 |
+
|
| 814 |
+
info["use_gpu"] = self.args.is_gpu
|
| 815 |
+
if self.args.is_gpu:
|
| 816 |
+
info["num_gpus"] = 1 # TODO(PVP) Currently only single GPU is supported
|
| 817 |
+
if is_py3nvml_available():
|
| 818 |
+
nvml.nvmlInit()
|
| 819 |
+
handle = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx)
|
| 820 |
+
info["gpu"] = nvml.nvmlDeviceGetName(handle)
|
| 821 |
+
info["gpu_ram_mb"] = bytes_to_mega_bytes(nvml.nvmlDeviceGetMemoryInfo(handle).total)
|
| 822 |
+
info["gpu_power_watts"] = nvml.nvmlDeviceGetPowerManagementLimit(handle) / 1000
|
| 823 |
+
info["gpu_performance_state"] = nvml.nvmlDeviceGetPerformanceState(handle)
|
| 824 |
+
nvml.nvmlShutdown()
|
| 825 |
+
else:
|
| 826 |
+
logger.warning(
|
| 827 |
+
"py3nvml not installed, we won't log GPU memory usage. "
|
| 828 |
+
"Install py3nvml (pip install py3nvml) to log information about GPU."
|
| 829 |
+
)
|
| 830 |
+
info["gpu"] = "N/A"
|
| 831 |
+
info["gpu_ram_mb"] = "N/A"
|
| 832 |
+
info["gpu_power_watts"] = "N/A"
|
| 833 |
+
info["gpu_performance_state"] = "N/A"
|
| 834 |
+
|
| 835 |
+
info["use_tpu"] = self.args.is_tpu
|
| 836 |
+
# TODO(PVP): See if we can add more information about TPU
|
| 837 |
+
# see: https://github.com/pytorch/xla/issues/2180
|
| 838 |
+
|
| 839 |
+
self._environment_info = info
|
| 840 |
+
return self._environment_info
|
| 841 |
+
|
| 842 |
+
def print_results(self, result_dict, type_label):
|
| 843 |
+
self.print_fn(80 * "-")
|
| 844 |
+
self.print_fn(
|
| 845 |
+
"Model Name".center(30) + "Batch Size".center(15) + "Seq Length".center(15) + type_label.center(15)
|
| 846 |
+
)
|
| 847 |
+
self.print_fn(80 * "-")
|
| 848 |
+
for model_name in self.args.model_names:
|
| 849 |
+
for batch_size in result_dict[model_name]["bs"]:
|
| 850 |
+
for sequence_length in result_dict[model_name]["ss"]:
|
| 851 |
+
result = result_dict[model_name]["result"][batch_size][sequence_length]
|
| 852 |
+
if isinstance(result, float):
|
| 853 |
+
result = round(1000 * result) / 1000
|
| 854 |
+
result = "< 0.001" if result == 0.0 else str(result)
|
| 855 |
+
else:
|
| 856 |
+
result = str(result)
|
| 857 |
+
self.print_fn(
|
| 858 |
+
model_name[:30].center(30) + str(batch_size).center(15),
|
| 859 |
+
str(sequence_length).center(15),
|
| 860 |
+
result.center(15),
|
| 861 |
+
)
|
| 862 |
+
self.print_fn(80 * "-")
|
| 863 |
+
|
| 864 |
+
def print_memory_trace_statistics(self, summary: MemorySummary):
|
| 865 |
+
self.print_fn(
|
| 866 |
+
"\nLine by line memory consumption:\n"
|
| 867 |
+
+ "\n".join(
|
| 868 |
+
f"{state.frame.filename}:{state.frame.line_number}: mem {state.cpu_gpu}: {state.frame.line_text}"
|
| 869 |
+
for state in summary.sequential
|
| 870 |
+
)
|
| 871 |
+
)
|
| 872 |
+
self.print_fn(
|
| 873 |
+
"\nLines with top memory consumption:\n"
|
| 874 |
+
+ "\n".join(
|
| 875 |
+
f"=> {state.frame.filename}:{state.frame.line_number}: mem {state.cpu_gpu}: {state.frame.line_text}"
|
| 876 |
+
for state in summary.cumulative[:6]
|
| 877 |
+
)
|
| 878 |
+
)
|
| 879 |
+
self.print_fn(
|
| 880 |
+
"\nLines with lowest memory consumption:\n"
|
| 881 |
+
+ "\n".join(
|
| 882 |
+
f"=> {state.frame.filename}:{state.frame.line_number}: mem {state.cpu_gpu}: {state.frame.line_text}"
|
| 883 |
+
for state in summary.cumulative[-6:]
|
| 884 |
+
)
|
| 885 |
+
)
|
| 886 |
+
self.print_fn(f"\nTotal memory increase: {summary.total}")
|
| 887 |
+
|
| 888 |
+
def save_to_csv(self, result_dict, filename):
|
| 889 |
+
if not self.args.save_to_csv:
|
| 890 |
+
return
|
| 891 |
+
self.print_fn("Saving results to csv.")
|
| 892 |
+
with open(filename, mode="w") as csv_file:
|
| 893 |
+
assert len(self.args.model_names) > 0, f"At least 1 model should be defined, but got {self.model_names}"
|
| 894 |
+
|
| 895 |
+
fieldnames = ["model", "batch_size", "sequence_length"]
|
| 896 |
+
writer = csv.DictWriter(csv_file, fieldnames=fieldnames + ["result"])
|
| 897 |
+
writer.writeheader()
|
| 898 |
+
|
| 899 |
+
for model_name in self.args.model_names:
|
| 900 |
+
result_dict_model = result_dict[model_name]["result"]
|
| 901 |
+
for bs in result_dict_model:
|
| 902 |
+
for ss in result_dict_model[bs]:
|
| 903 |
+
result_model = result_dict_model[bs][ss]
|
| 904 |
+
writer.writerow(
|
| 905 |
+
{
|
| 906 |
+
"model": model_name,
|
| 907 |
+
"batch_size": bs,
|
| 908 |
+
"sequence_length": ss,
|
| 909 |
+
"result": ("{}" if not isinstance(result_model, float) else "{:.4f}").format(
|
| 910 |
+
result_model
|
| 911 |
+
),
|
| 912 |
+
}
|
| 913 |
+
)
|
valley/lib/python3.10/site-packages/transformers/generation/flax_utils.py
ADDED
|
@@ -0,0 +1,1004 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2021 The Google AI Flax Team Authors, and The HuggingFace Inc. team.
|
| 3 |
+
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
import copy
|
| 19 |
+
import inspect
|
| 20 |
+
import warnings
|
| 21 |
+
from functools import partial
|
| 22 |
+
from typing import Any, Dict, Optional, Union
|
| 23 |
+
|
| 24 |
+
import flax
|
| 25 |
+
import jax
|
| 26 |
+
import jax.numpy as jnp
|
| 27 |
+
import numpy as np
|
| 28 |
+
from jax import lax
|
| 29 |
+
|
| 30 |
+
from ..models.auto import (
|
| 31 |
+
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
|
| 32 |
+
FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
|
| 33 |
+
FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING,
|
| 34 |
+
)
|
| 35 |
+
from ..utils import ModelOutput, logging
|
| 36 |
+
from .configuration_utils import GenerationConfig
|
| 37 |
+
from .flax_logits_process import (
|
| 38 |
+
FlaxForcedBOSTokenLogitsProcessor,
|
| 39 |
+
FlaxForcedEOSTokenLogitsProcessor,
|
| 40 |
+
FlaxForceTokensLogitsProcessor,
|
| 41 |
+
FlaxLogitsProcessorList,
|
| 42 |
+
FlaxMinLengthLogitsProcessor,
|
| 43 |
+
FlaxSuppressTokensAtBeginLogitsProcessor,
|
| 44 |
+
FlaxSuppressTokensLogitsProcessor,
|
| 45 |
+
FlaxTemperatureLogitsWarper,
|
| 46 |
+
FlaxTopKLogitsWarper,
|
| 47 |
+
FlaxTopPLogitsWarper,
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
logger = logging.get_logger(__name__)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
@flax.struct.dataclass
|
| 55 |
+
class FlaxGreedySearchOutput(ModelOutput):
|
| 56 |
+
"""
|
| 57 |
+
Flax Base class for outputs of decoder-only generation models using greedy search.
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
Args:
|
| 61 |
+
sequences (`jnp.ndarray` of shape `(batch_size, max_length)`):
|
| 62 |
+
The generated sequences.
|
| 63 |
+
"""
|
| 64 |
+
|
| 65 |
+
sequences: jnp.ndarray = None
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
@flax.struct.dataclass
|
| 69 |
+
class FlaxSampleOutput(ModelOutput):
|
| 70 |
+
"""
|
| 71 |
+
Flax Base class for outputs of decoder-only generation models using sampling.
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
Args:
|
| 75 |
+
sequences (`jnp.ndarray` of shape `(batch_size, max_length)`):
|
| 76 |
+
The generated sequences.
|
| 77 |
+
"""
|
| 78 |
+
|
| 79 |
+
sequences: jnp.ndarray = None
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
@flax.struct.dataclass
|
| 83 |
+
class FlaxBeamSearchOutput(ModelOutput):
|
| 84 |
+
"""
|
| 85 |
+
Flax Base class for outputs of decoder-only generation models using greedy search.
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
Args:
|
| 89 |
+
sequences (`jnp.ndarray` of shape `(batch_size, max_length)`):
|
| 90 |
+
The generated sequences.
|
| 91 |
+
scores (`jnp.ndarray` of shape `(batch_size,)`):
|
| 92 |
+
The scores (log probabilities) of the generated sequences.
|
| 93 |
+
"""
|
| 94 |
+
|
| 95 |
+
sequences: jnp.ndarray = None
|
| 96 |
+
scores: jnp.ndarray = None
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
@flax.struct.dataclass
|
| 100 |
+
class GreedyState:
|
| 101 |
+
cur_len: jnp.ndarray
|
| 102 |
+
sequences: jnp.ndarray
|
| 103 |
+
running_token: jnp.ndarray
|
| 104 |
+
is_sent_finished: jnp.ndarray
|
| 105 |
+
model_kwargs: Dict[str, jnp.ndarray]
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
@flax.struct.dataclass
|
| 109 |
+
class SampleState:
|
| 110 |
+
cur_len: jnp.ndarray
|
| 111 |
+
sequences: jnp.ndarray
|
| 112 |
+
running_token: jnp.ndarray
|
| 113 |
+
is_sent_finished: jnp.ndarray
|
| 114 |
+
prng_key: jnp.ndarray
|
| 115 |
+
model_kwargs: Dict[str, jnp.ndarray]
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
@flax.struct.dataclass
|
| 119 |
+
class BeamSearchState:
|
| 120 |
+
cur_len: jnp.ndarray
|
| 121 |
+
running_sequences: jnp.ndarray
|
| 122 |
+
running_scores: jnp.ndarray
|
| 123 |
+
sequences: jnp.ndarray
|
| 124 |
+
scores: jnp.ndarray
|
| 125 |
+
is_sent_finished: jnp.ndarray
|
| 126 |
+
model_kwargs: Dict[str, jnp.ndarray]
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
class FlaxGenerationMixin:
|
| 130 |
+
"""
|
| 131 |
+
A class containing all functions for auto-regressive text generation, to be used as a mixin in
|
| 132 |
+
[`FlaxPreTrainedModel`].
|
| 133 |
+
|
| 134 |
+
The class exposes [`~generation.FlaxGenerationMixin.generate`], which can be used for:
|
| 135 |
+
- *greedy decoding* by calling [`~generation.FlaxGenerationMixin._greedy_search`] if `num_beams=1` and
|
| 136 |
+
`do_sample=False`
|
| 137 |
+
- *multinomial sampling* by calling [`~generation.FlaxGenerationMixin._sample`] if `num_beams=1` and
|
| 138 |
+
`do_sample=True`
|
| 139 |
+
- *beam-search decoding* by calling [`~generation.FlaxGenerationMixin._beam_search`] if `num_beams>1` and
|
| 140 |
+
`do_sample=False`
|
| 141 |
+
|
| 142 |
+
You do not need to call any of the above methods directly. Pass custom parameter values to 'generate' instead. To
|
| 143 |
+
learn more about decoding strategies refer to the [text generation strategies guide](../generation_strategies).
|
| 144 |
+
"""
|
| 145 |
+
|
| 146 |
+
def prepare_inputs_for_generation(self, *args, **kwargs):
|
| 147 |
+
raise NotImplementedError(
|
| 148 |
+
"A model class needs to define a `prepare_inputs_for_generation` method in order to use `generate`."
|
| 149 |
+
)
|
| 150 |
+
|
| 151 |
+
@staticmethod
|
| 152 |
+
def _run_loop_in_debug(cond_fn, body_fn, init_state):
|
| 153 |
+
"""
|
| 154 |
+
Run generation in untraced mode. This should only be used for debugging purposes.
|
| 155 |
+
"""
|
| 156 |
+
state = init_state
|
| 157 |
+
while cond_fn(state):
|
| 158 |
+
state = body_fn(state)
|
| 159 |
+
return state
|
| 160 |
+
|
| 161 |
+
def _prepare_encoder_decoder_kwargs_for_generation(self, input_ids, params, model_kwargs):
|
| 162 |
+
encoder_kwargs = {
|
| 163 |
+
argument: value
|
| 164 |
+
for argument, value in model_kwargs.items()
|
| 165 |
+
if not (argument.startswith("decoder_") or argument.startswith("cross_attn"))
|
| 166 |
+
}
|
| 167 |
+
model_kwargs["encoder_outputs"] = self.encode(input_ids, params=params, return_dict=True, **encoder_kwargs)
|
| 168 |
+
return model_kwargs
|
| 169 |
+
|
| 170 |
+
def _prepare_decoder_input_ids_for_generation(
|
| 171 |
+
self,
|
| 172 |
+
batch_size: int,
|
| 173 |
+
decoder_start_token_id: int = None,
|
| 174 |
+
bos_token_id: int = None,
|
| 175 |
+
model_kwargs: Optional[Dict[str, jnp.ndarray]] = None,
|
| 176 |
+
) -> jnp.ndarray:
|
| 177 |
+
if model_kwargs is not None and "decoder_input_ids" in model_kwargs:
|
| 178 |
+
# Only use this arg if not None, otherwise just remove from model_kwargs
|
| 179 |
+
decoder_input_ids = model_kwargs.pop("decoder_input_ids")
|
| 180 |
+
if decoder_input_ids is not None:
|
| 181 |
+
return decoder_input_ids
|
| 182 |
+
decoder_start_token_id = self._get_decoder_start_token_id(decoder_start_token_id, bos_token_id)
|
| 183 |
+
return jnp.array(decoder_start_token_id, dtype="i4").reshape(1, -1).repeat(batch_size, axis=0)
|
| 184 |
+
|
| 185 |
+
def _get_decoder_start_token_id(self, decoder_start_token_id: int = None, bos_token_id: int = None) -> int:
|
| 186 |
+
# retrieve decoder_start_token_id for encoder-decoder models
|
| 187 |
+
# fall back to bos_token_id if necessary
|
| 188 |
+
decoder_start_token_id = (
|
| 189 |
+
decoder_start_token_id
|
| 190 |
+
if decoder_start_token_id is not None
|
| 191 |
+
else self.generation_config.decoder_start_token_id
|
| 192 |
+
)
|
| 193 |
+
bos_token_id = bos_token_id if bos_token_id is not None else self.generation_config.bos_token_id
|
| 194 |
+
if decoder_start_token_id is not None:
|
| 195 |
+
return decoder_start_token_id
|
| 196 |
+
elif (
|
| 197 |
+
hasattr(self.config, "decoder")
|
| 198 |
+
and hasattr(self.config.decoder, "decoder_start_token_id")
|
| 199 |
+
and self.config.decoder.decoder_start_token_id is not None
|
| 200 |
+
):
|
| 201 |
+
return self.config.decoder.decoder_start_token_id
|
| 202 |
+
elif bos_token_id is not None:
|
| 203 |
+
return bos_token_id
|
| 204 |
+
elif (
|
| 205 |
+
hasattr(self.config, "decoder")
|
| 206 |
+
and hasattr(self.config.decoder, "bos_token_id")
|
| 207 |
+
and self.config.decoder.bos_token_id is not None
|
| 208 |
+
):
|
| 209 |
+
return self.config.decoder.bos_token_id
|
| 210 |
+
raise ValueError(
|
| 211 |
+
"`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation."
|
| 212 |
+
)
|
| 213 |
+
|
| 214 |
+
@staticmethod
|
| 215 |
+
def _expand_to_num_beams(tensor, num_beams):
|
| 216 |
+
return jnp.broadcast_to(tensor[:, None], (tensor.shape[0], num_beams) + tensor.shape[1:])
|
| 217 |
+
|
| 218 |
+
def _adapt_logits_for_beam_search(self, logits):
|
| 219 |
+
"""
|
| 220 |
+
This function can be overwritten in the specific modeling_flax_<model-name>.py classes to allow for custom beam
|
| 221 |
+
search behavior. Note that the only model that overwrites this method is [`~transformes.FlaxMarianMTModel`].
|
| 222 |
+
"""
|
| 223 |
+
return logits
|
| 224 |
+
|
| 225 |
+
def _validate_model_class(self):
|
| 226 |
+
"""
|
| 227 |
+
Confirms that the model class is compatible with generation. If not, raises an exception that points to the
|
| 228 |
+
right class to use.
|
| 229 |
+
"""
|
| 230 |
+
if not self.can_generate():
|
| 231 |
+
generate_compatible_mappings = [
|
| 232 |
+
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
|
| 233 |
+
FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING,
|
| 234 |
+
FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
|
| 235 |
+
]
|
| 236 |
+
generate_compatible_classes = set()
|
| 237 |
+
for model_mapping in generate_compatible_mappings:
|
| 238 |
+
supported_models = model_mapping.get(type(self.config), default=None)
|
| 239 |
+
if supported_models is not None:
|
| 240 |
+
generate_compatible_classes.add(supported_models.__name__)
|
| 241 |
+
exception_message = (
|
| 242 |
+
f"The current model class ({self.__class__.__name__}) is not compatible with `.generate()`, as "
|
| 243 |
+
"it doesn't have a language model head."
|
| 244 |
+
)
|
| 245 |
+
if generate_compatible_classes:
|
| 246 |
+
exception_message += f" Please use one of the following classes instead: {generate_compatible_classes}"
|
| 247 |
+
raise TypeError(exception_message)
|
| 248 |
+
|
| 249 |
+
def _validate_model_kwargs(self, model_kwargs: Dict[str, Any]):
|
| 250 |
+
"""Validates model kwargs for generation. Generate argument typos will also be caught here."""
|
| 251 |
+
unused_model_args = []
|
| 252 |
+
model_args = set(inspect.signature(self.prepare_inputs_for_generation).parameters)
|
| 253 |
+
# `kwargs`/`model_kwargs` is often used to handle optional forward pass inputs like `attention_mask`. If
|
| 254 |
+
# `prepare_inputs_for_generation` doesn't accept them, then a stricter check can be made ;)
|
| 255 |
+
if "kwargs" in model_args or "model_kwargs" in model_args:
|
| 256 |
+
model_args |= set(inspect.signature(self.__call__).parameters)
|
| 257 |
+
for key, value in model_kwargs.items():
|
| 258 |
+
if value is not None and key not in model_args:
|
| 259 |
+
unused_model_args.append(key)
|
| 260 |
+
|
| 261 |
+
if unused_model_args:
|
| 262 |
+
raise ValueError(
|
| 263 |
+
f"The following `model_kwargs` are not used by the model: {unused_model_args} (note: typos in the"
|
| 264 |
+
" generate arguments will also show up in this list)"
|
| 265 |
+
)
|
| 266 |
+
|
| 267 |
+
def generate(
|
| 268 |
+
self,
|
| 269 |
+
input_ids: jnp.ndarray,
|
| 270 |
+
generation_config: Optional[GenerationConfig] = None,
|
| 271 |
+
prng_key: Optional[jnp.ndarray] = None,
|
| 272 |
+
trace: bool = True,
|
| 273 |
+
params: Optional[Dict[str, jnp.ndarray]] = None,
|
| 274 |
+
logits_processor: Optional[FlaxLogitsProcessorList] = None,
|
| 275 |
+
**kwargs,
|
| 276 |
+
):
|
| 277 |
+
r"""
|
| 278 |
+
Generates sequences of token ids for models with a language modeling head.
|
| 279 |
+
|
| 280 |
+
Parameters:
|
| 281 |
+
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
|
| 282 |
+
The sequence used as a prompt for the generation.
|
| 283 |
+
generation_config (`~generation.GenerationConfig`, *optional*):
|
| 284 |
+
The generation configuration to be used as base parametrization for the generation call. `**kwargs`
|
| 285 |
+
passed to generate matching the attributes of `generation_config` will override them. If
|
| 286 |
+
`generation_config` is not provided, the default will be used, which had the following loading
|
| 287 |
+
priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
|
| 288 |
+
configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
|
| 289 |
+
default values, whose documentation should be checked to parameterize generation.
|
| 290 |
+
trace (`bool`, *optional*, defaults to `True`):
|
| 291 |
+
Whether to trace generation. Setting `trace=False` should only be used for debugging and will lead to a
|
| 292 |
+
considerably slower runtime.
|
| 293 |
+
params (`Dict[str, jnp.ndarray]`, *optional*):
|
| 294 |
+
Optionally the model parameters can be passed. Can be useful for parallelized generation.
|
| 295 |
+
logits_processor (`FlaxLogitsProcessorList `, *optional*):
|
| 296 |
+
Custom logits processors that complement the default logits processors built from arguments and
|
| 297 |
+
generation config. If a logit processor is passed that is already created with the arguments or a
|
| 298 |
+
generation config an error is thrown. This feature is intended for advanced users.
|
| 299 |
+
kwargs:
|
| 300 |
+
Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be
|
| 301 |
+
forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder
|
| 302 |
+
specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*.
|
| 303 |
+
|
| 304 |
+
Return:
|
| 305 |
+
[`~utils.ModelOutput`].
|
| 306 |
+
|
| 307 |
+
"""
|
| 308 |
+
# Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call
|
| 309 |
+
self._validate_model_class()
|
| 310 |
+
|
| 311 |
+
# priority: `generation_config` argument > `model.generation_config` (the default generation config)
|
| 312 |
+
if generation_config is None:
|
| 313 |
+
# legacy: users may modify the model configuration to control generation -- update the generation config
|
| 314 |
+
# model attribute accordingly, if it was created from the model config
|
| 315 |
+
if self.generation_config._from_model_config:
|
| 316 |
+
new_generation_config = GenerationConfig.from_model_config(self.config)
|
| 317 |
+
if new_generation_config != self.generation_config:
|
| 318 |
+
warnings.warn(
|
| 319 |
+
"You have modified the pretrained model configuration to control generation. This is a"
|
| 320 |
+
" deprecated strategy to control generation and will be removed soon, in a future version."
|
| 321 |
+
" Please use a generation configuration file (see"
|
| 322 |
+
" https://huggingface.co/docs/transformers/main_classes/text_generation)"
|
| 323 |
+
)
|
| 324 |
+
self.generation_config = new_generation_config
|
| 325 |
+
generation_config = self.generation_config
|
| 326 |
+
|
| 327 |
+
generation_config = copy.deepcopy(generation_config)
|
| 328 |
+
model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs
|
| 329 |
+
generation_config.validate()
|
| 330 |
+
self._validate_model_kwargs(model_kwargs.copy())
|
| 331 |
+
|
| 332 |
+
logits_processor = logits_processor if logits_processor is not None else FlaxLogitsProcessorList()
|
| 333 |
+
|
| 334 |
+
# set init values
|
| 335 |
+
prng_key = prng_key if prng_key is not None else jax.random.PRNGKey(0)
|
| 336 |
+
|
| 337 |
+
if generation_config.pad_token_id is None and generation_config.eos_token_id is not None:
|
| 338 |
+
if model_kwargs.get("attention_mask") is None:
|
| 339 |
+
logger.warning(
|
| 340 |
+
"The attention mask and the pad token id were not set. As a consequence, you may observe "
|
| 341 |
+
"unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results."
|
| 342 |
+
)
|
| 343 |
+
eos_token_id = generation_config.eos_token_id
|
| 344 |
+
if isinstance(eos_token_id, list):
|
| 345 |
+
eos_token_id = eos_token_id[0]
|
| 346 |
+
logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.")
|
| 347 |
+
generation_config.pad_token_id = eos_token_id
|
| 348 |
+
|
| 349 |
+
if generation_config.decoder_start_token_id is None and self.config.is_encoder_decoder:
|
| 350 |
+
raise ValueError("`decoder_start_token_id` has to be defined for encoder-decoder generation.")
|
| 351 |
+
|
| 352 |
+
# decoder-only models should use left-padding for generation (can't be checked with `trace=True`)
|
| 353 |
+
if not self.config.is_encoder_decoder and not trace:
|
| 354 |
+
if (
|
| 355 |
+
generation_config.pad_token_id is not None
|
| 356 |
+
and jnp.sum(input_ids[:, -1] == generation_config.pad_token_id) > 0
|
| 357 |
+
):
|
| 358 |
+
logger.warning(
|
| 359 |
+
"A decoder-only architecture is being used, but right-padding was detected! For correct "
|
| 360 |
+
"generation results, please set `padding_side='left'` when initializing the tokenizer."
|
| 361 |
+
)
|
| 362 |
+
|
| 363 |
+
batch_size = input_ids.shape[0]
|
| 364 |
+
|
| 365 |
+
if self.config.is_encoder_decoder:
|
| 366 |
+
# add encoder_outputs to model_kwargs
|
| 367 |
+
if model_kwargs.get("encoder_outputs") is None:
|
| 368 |
+
model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation(input_ids, params, model_kwargs)
|
| 369 |
+
# prepare decoder_input_ids for generation
|
| 370 |
+
input_ids = self._prepare_decoder_input_ids_for_generation(
|
| 371 |
+
batch_size,
|
| 372 |
+
decoder_start_token_id=generation_config.decoder_start_token_id,
|
| 373 |
+
bos_token_id=generation_config.bos_token_id,
|
| 374 |
+
model_kwargs=model_kwargs,
|
| 375 |
+
)
|
| 376 |
+
|
| 377 |
+
# Prepare `max_length` depending on other stopping criteria.
|
| 378 |
+
input_ids_seq_length = input_ids.shape[-1]
|
| 379 |
+
has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
|
| 380 |
+
if has_default_max_length and generation_config.max_new_tokens is None:
|
| 381 |
+
warnings.warn(
|
| 382 |
+
f"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. "
|
| 383 |
+
"This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we"
|
| 384 |
+
" recommend using `max_new_tokens` to control the maximum length of the generation.",
|
| 385 |
+
UserWarning,
|
| 386 |
+
)
|
| 387 |
+
elif generation_config.max_new_tokens is not None:
|
| 388 |
+
generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length
|
| 389 |
+
if not has_default_max_length:
|
| 390 |
+
logger.warn(
|
| 391 |
+
f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(="
|
| 392 |
+
f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. "
|
| 393 |
+
"Please refer to the documentation for more information. "
|
| 394 |
+
"(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)",
|
| 395 |
+
UserWarning,
|
| 396 |
+
)
|
| 397 |
+
|
| 398 |
+
if generation_config.min_length is not None and generation_config.min_length > generation_config.max_length:
|
| 399 |
+
raise ValueError(
|
| 400 |
+
f"Unfeasable length constraints: the minimum length ({generation_config.min_length}) is larger than"
|
| 401 |
+
f" the maximum length ({generation_config.max_length})"
|
| 402 |
+
)
|
| 403 |
+
if input_ids_seq_length >= generation_config.max_length:
|
| 404 |
+
input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids"
|
| 405 |
+
logger.warning(
|
| 406 |
+
f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to"
|
| 407 |
+
f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider"
|
| 408 |
+
" increasing`max_new_tokens`."
|
| 409 |
+
)
|
| 410 |
+
|
| 411 |
+
logits_processor = self._get_logits_processor(
|
| 412 |
+
generation_config=generation_config,
|
| 413 |
+
input_ids_seq_length=input_ids_seq_length,
|
| 414 |
+
logits_processor=logits_processor,
|
| 415 |
+
)
|
| 416 |
+
|
| 417 |
+
if not generation_config.do_sample and generation_config.num_beams == 1:
|
| 418 |
+
return self._greedy_search(
|
| 419 |
+
input_ids,
|
| 420 |
+
generation_config.max_length,
|
| 421 |
+
generation_config.pad_token_id,
|
| 422 |
+
generation_config.eos_token_id,
|
| 423 |
+
logits_processor=logits_processor,
|
| 424 |
+
trace=trace,
|
| 425 |
+
params=params,
|
| 426 |
+
model_kwargs=model_kwargs,
|
| 427 |
+
)
|
| 428 |
+
elif generation_config.do_sample and generation_config.num_beams == 1:
|
| 429 |
+
logits_warper = self._get_logits_warper(generation_config=generation_config)
|
| 430 |
+
return self._sample(
|
| 431 |
+
input_ids,
|
| 432 |
+
generation_config.max_length,
|
| 433 |
+
generation_config.pad_token_id,
|
| 434 |
+
generation_config.eos_token_id,
|
| 435 |
+
prng_key,
|
| 436 |
+
logits_warper=logits_warper,
|
| 437 |
+
logits_processor=logits_processor,
|
| 438 |
+
trace=trace,
|
| 439 |
+
params=params,
|
| 440 |
+
model_kwargs=model_kwargs,
|
| 441 |
+
)
|
| 442 |
+
elif not generation_config.do_sample and generation_config.num_beams > 1:
|
| 443 |
+
# broadcast input_ids & encoder_outputs
|
| 444 |
+
input_ids = self._expand_to_num_beams(input_ids, num_beams=generation_config.num_beams)
|
| 445 |
+
|
| 446 |
+
if "encoder_outputs" in model_kwargs:
|
| 447 |
+
model_kwargs["encoder_outputs"]["last_hidden_state"] = self._expand_to_num_beams(
|
| 448 |
+
model_kwargs["encoder_outputs"]["last_hidden_state"], num_beams=generation_config.num_beams
|
| 449 |
+
)
|
| 450 |
+
|
| 451 |
+
for kwarg in ["attention_mask", "decoder_attention_mask"]:
|
| 452 |
+
if kwarg in model_kwargs:
|
| 453 |
+
model_kwargs[kwarg] = self._expand_to_num_beams(
|
| 454 |
+
model_kwargs[kwarg], num_beams=generation_config.num_beams
|
| 455 |
+
)
|
| 456 |
+
|
| 457 |
+
return self._beam_search(
|
| 458 |
+
input_ids,
|
| 459 |
+
generation_config.max_length,
|
| 460 |
+
generation_config.pad_token_id,
|
| 461 |
+
generation_config.eos_token_id,
|
| 462 |
+
length_penalty=generation_config.length_penalty,
|
| 463 |
+
early_stopping=generation_config.early_stopping,
|
| 464 |
+
logits_processor=logits_processor,
|
| 465 |
+
trace=trace,
|
| 466 |
+
params=params,
|
| 467 |
+
model_kwargs=model_kwargs,
|
| 468 |
+
)
|
| 469 |
+
else:
|
| 470 |
+
raise NotImplementedError("`Beam sampling is currently not implemented.")
|
| 471 |
+
|
| 472 |
+
def _get_logits_warper(self, generation_config: GenerationConfig) -> FlaxLogitsProcessorList:
|
| 473 |
+
"""
|
| 474 |
+
This class returns a [`FlaxLogitsProcessorList`] list object that contains all relevant [`FlaxLogitsWarper`]
|
| 475 |
+
instances used for multinomial sampling.
|
| 476 |
+
"""
|
| 477 |
+
warpers = FlaxLogitsProcessorList()
|
| 478 |
+
|
| 479 |
+
if generation_config.temperature is not None and generation_config.temperature != 1.0:
|
| 480 |
+
warpers.append(FlaxTemperatureLogitsWarper(generation_config.temperature))
|
| 481 |
+
if generation_config.top_k is not None and generation_config.top_k != 0:
|
| 482 |
+
warpers.append(FlaxTopKLogitsWarper(top_k=generation_config.top_k, min_tokens_to_keep=1))
|
| 483 |
+
if generation_config.top_p is not None and generation_config.top_p < 1.0:
|
| 484 |
+
warpers.append(FlaxTopPLogitsWarper(top_p=generation_config.top_p, min_tokens_to_keep=1))
|
| 485 |
+
|
| 486 |
+
return warpers
|
| 487 |
+
|
| 488 |
+
def _get_logits_processor(
|
| 489 |
+
self,
|
| 490 |
+
generation_config: GenerationConfig,
|
| 491 |
+
input_ids_seq_length: int,
|
| 492 |
+
logits_processor: Optional[FlaxLogitsProcessorList],
|
| 493 |
+
) -> FlaxLogitsProcessorList:
|
| 494 |
+
"""
|
| 495 |
+
This class returns a [`FlaxLogitsProcessorList`] list object that contains all relevant [`FlaxLogitsProcessor`]
|
| 496 |
+
instances used to modify the scores of the language model head.
|
| 497 |
+
"""
|
| 498 |
+
processors = FlaxLogitsProcessorList()
|
| 499 |
+
|
| 500 |
+
if (
|
| 501 |
+
generation_config.min_length is not None
|
| 502 |
+
and generation_config.eos_token_id is not None
|
| 503 |
+
and generation_config.min_length > -1
|
| 504 |
+
):
|
| 505 |
+
processors.append(
|
| 506 |
+
FlaxMinLengthLogitsProcessor(generation_config.min_length, generation_config.eos_token_id)
|
| 507 |
+
)
|
| 508 |
+
if generation_config.forced_bos_token_id is not None:
|
| 509 |
+
processors.append(FlaxForcedBOSTokenLogitsProcessor(generation_config.forced_bos_token_id))
|
| 510 |
+
if generation_config.forced_eos_token_id is not None:
|
| 511 |
+
processors.append(
|
| 512 |
+
FlaxForcedEOSTokenLogitsProcessor(generation_config.max_length, generation_config.forced_eos_token_id)
|
| 513 |
+
)
|
| 514 |
+
if generation_config.suppress_tokens is not None:
|
| 515 |
+
processors.append(FlaxSuppressTokensLogitsProcessor(generation_config.suppress_tokens))
|
| 516 |
+
if generation_config.begin_suppress_tokens is not None:
|
| 517 |
+
begin_index = input_ids_seq_length
|
| 518 |
+
begin_index = (
|
| 519 |
+
begin_index
|
| 520 |
+
if (input_ids_seq_length > 1 or generation_config.forced_bos_token_id is None)
|
| 521 |
+
else begin_index + 1
|
| 522 |
+
)
|
| 523 |
+
if generation_config.forced_decoder_ids is not None and len(generation_config.forced_decoder_ids) > 0:
|
| 524 |
+
# generation starts after the last token that is forced
|
| 525 |
+
begin_index += generation_config.forced_decoder_ids[-1][0]
|
| 526 |
+
processors.append(
|
| 527 |
+
FlaxSuppressTokensAtBeginLogitsProcessor(generation_config.begin_suppress_tokens, begin_index)
|
| 528 |
+
)
|
| 529 |
+
if generation_config.forced_decoder_ids is not None:
|
| 530 |
+
forced_decoder_ids = [
|
| 531 |
+
[input_ids_seq_length + i[0] - 1, i[1]] for i in generation_config.forced_decoder_ids
|
| 532 |
+
]
|
| 533 |
+
processors.append(FlaxForceTokensLogitsProcessor(forced_decoder_ids))
|
| 534 |
+
processors = self._merge_criteria_processor_list(processors, logits_processor)
|
| 535 |
+
|
| 536 |
+
return processors
|
| 537 |
+
|
| 538 |
+
def _merge_criteria_processor_list(
|
| 539 |
+
self,
|
| 540 |
+
default_list: FlaxLogitsProcessorList,
|
| 541 |
+
custom_list: FlaxLogitsProcessorList,
|
| 542 |
+
) -> FlaxLogitsProcessorList:
|
| 543 |
+
if len(custom_list) == 0:
|
| 544 |
+
return default_list
|
| 545 |
+
for default in default_list:
|
| 546 |
+
for custom in custom_list:
|
| 547 |
+
if type(custom) is type(default):
|
| 548 |
+
object_type = "logits processor"
|
| 549 |
+
raise ValueError(
|
| 550 |
+
f"A custom {object_type} of type {type(custom)} with values {custom} has been passed to"
|
| 551 |
+
f" `generate`, but it has already been created with the values {default}. {default} has been"
|
| 552 |
+
" created by passing the corresponding arguments to generate or by the model's config default"
|
| 553 |
+
f" values. If you just want to change the default values of {object_type} consider passing"
|
| 554 |
+
f" them as arguments to `generate` instead of using a custom {object_type}."
|
| 555 |
+
)
|
| 556 |
+
default_list.extend(custom_list)
|
| 557 |
+
return default_list
|
| 558 |
+
|
| 559 |
+
def _greedy_search(
|
| 560 |
+
self,
|
| 561 |
+
input_ids: None,
|
| 562 |
+
max_length: Optional[int] = None,
|
| 563 |
+
pad_token_id: Optional[int] = None,
|
| 564 |
+
eos_token_id: Optional[int] = None,
|
| 565 |
+
logits_processor: Optional[FlaxLogitsProcessorList] = None,
|
| 566 |
+
trace: bool = True,
|
| 567 |
+
params: Optional[Dict[str, jnp.ndarray]] = None,
|
| 568 |
+
model_kwargs: Optional[Dict[str, jnp.ndarray]] = None,
|
| 569 |
+
):
|
| 570 |
+
# init values
|
| 571 |
+
max_length = max_length if max_length is not None else self.generation_config.max_length
|
| 572 |
+
pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
|
| 573 |
+
eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
|
| 574 |
+
|
| 575 |
+
batch_size, cur_len = input_ids.shape
|
| 576 |
+
|
| 577 |
+
eos_token_id = jnp.array(eos_token_id, dtype=jnp.int32 if eos_token_id is not None else None)
|
| 578 |
+
pad_token_id = jnp.array(pad_token_id, dtype=jnp.int32)
|
| 579 |
+
cur_len = jnp.array(cur_len)
|
| 580 |
+
|
| 581 |
+
# per batch-item holding current token in loop.
|
| 582 |
+
sequences = jnp.full((batch_size, max_length), pad_token_id, dtype=jnp.int32)
|
| 583 |
+
sequences = lax.dynamic_update_slice(sequences, input_ids, (0, 0))
|
| 584 |
+
|
| 585 |
+
# per batch-item state bit indicating if sentence has finished.
|
| 586 |
+
is_sent_finished = jnp.zeros((batch_size,), dtype=jnp.bool_)
|
| 587 |
+
|
| 588 |
+
# For Seq2Seq generation, we only need to use the decoder instead of the whole model in generation loop
|
| 589 |
+
# and pass it the `encoder_outputs`, which are part of the `model_kwargs`.
|
| 590 |
+
model = self.decode if self.config.is_encoder_decoder else self
|
| 591 |
+
# initialize model specific kwargs
|
| 592 |
+
model_kwargs = self.prepare_inputs_for_generation(input_ids, max_length, **model_kwargs)
|
| 593 |
+
|
| 594 |
+
# initialize state
|
| 595 |
+
state = GreedyState(
|
| 596 |
+
cur_len=cur_len,
|
| 597 |
+
sequences=sequences,
|
| 598 |
+
running_token=input_ids,
|
| 599 |
+
is_sent_finished=is_sent_finished,
|
| 600 |
+
model_kwargs=model_kwargs,
|
| 601 |
+
)
|
| 602 |
+
|
| 603 |
+
def greedy_search_cond_fn(state):
|
| 604 |
+
"""state termination condition fn."""
|
| 605 |
+
has_reached_max_length = state.cur_len == max_length
|
| 606 |
+
all_sequence_finished = jnp.all(state.is_sent_finished)
|
| 607 |
+
finish_generation = jnp.logical_or(has_reached_max_length, all_sequence_finished)
|
| 608 |
+
return ~finish_generation
|
| 609 |
+
|
| 610 |
+
def greedy_search_body_fn(state):
|
| 611 |
+
"""state update fn."""
|
| 612 |
+
model_outputs = model(state.running_token, params=params, **state.model_kwargs)
|
| 613 |
+
logits = model_outputs.logits[:, -1]
|
| 614 |
+
|
| 615 |
+
# apply min_length, ...
|
| 616 |
+
logits = logits_processor(state.sequences, logits, state.cur_len)
|
| 617 |
+
|
| 618 |
+
next_token = jnp.argmax(logits, axis=-1)
|
| 619 |
+
|
| 620 |
+
next_token = next_token * ~state.is_sent_finished + pad_token_id * state.is_sent_finished
|
| 621 |
+
next_is_sent_finished = state.is_sent_finished | (next_token == eos_token_id)
|
| 622 |
+
next_token = next_token[:, None]
|
| 623 |
+
|
| 624 |
+
next_sequences = lax.dynamic_update_slice(state.sequences, next_token, (0, state.cur_len))
|
| 625 |
+
next_model_kwargs = self.update_inputs_for_generation(model_outputs, state.model_kwargs)
|
| 626 |
+
return GreedyState(
|
| 627 |
+
cur_len=state.cur_len + 1,
|
| 628 |
+
sequences=next_sequences,
|
| 629 |
+
running_token=next_token,
|
| 630 |
+
is_sent_finished=next_is_sent_finished,
|
| 631 |
+
model_kwargs=next_model_kwargs,
|
| 632 |
+
)
|
| 633 |
+
|
| 634 |
+
# The very first prompt often has sequence length > 1, so run outside of `lax.while_loop` to comply with TPU
|
| 635 |
+
if input_ids.shape[1] > 1:
|
| 636 |
+
state = greedy_search_body_fn(state)
|
| 637 |
+
|
| 638 |
+
if not trace:
|
| 639 |
+
state = self._run_loop_in_debug(greedy_search_cond_fn, greedy_search_body_fn, state)
|
| 640 |
+
else:
|
| 641 |
+
state = lax.while_loop(greedy_search_cond_fn, greedy_search_body_fn, state)
|
| 642 |
+
|
| 643 |
+
return FlaxGreedySearchOutput(sequences=state.sequences)
|
| 644 |
+
|
| 645 |
+
def _sample(
|
| 646 |
+
self,
|
| 647 |
+
input_ids: None,
|
| 648 |
+
max_length: Optional[int] = None,
|
| 649 |
+
pad_token_id: Optional[int] = None,
|
| 650 |
+
eos_token_id: Optional[int] = None,
|
| 651 |
+
prng_key: Optional[jnp.ndarray] = None,
|
| 652 |
+
logits_processor: Optional[FlaxLogitsProcessorList] = None,
|
| 653 |
+
logits_warper: Optional[FlaxLogitsProcessorList] = None,
|
| 654 |
+
trace: bool = True,
|
| 655 |
+
params: Optional[Dict[str, jnp.ndarray]] = None,
|
| 656 |
+
model_kwargs: Optional[Dict[str, jnp.ndarray]] = None,
|
| 657 |
+
):
|
| 658 |
+
# init values
|
| 659 |
+
max_length = max_length if max_length is not None else self.generation_config.max_length
|
| 660 |
+
pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
|
| 661 |
+
eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
|
| 662 |
+
prng_key = prng_key if prng_key is not None else jax.random.PRNGKey(0)
|
| 663 |
+
|
| 664 |
+
batch_size, cur_len = input_ids.shape
|
| 665 |
+
|
| 666 |
+
eos_token_id = jnp.array(eos_token_id, dtype=jnp.int32 if eos_token_id is not None else None)
|
| 667 |
+
pad_token_id = jnp.array(pad_token_id, dtype=jnp.int32)
|
| 668 |
+
cur_len = jnp.array(cur_len)
|
| 669 |
+
|
| 670 |
+
# per batch-item holding current token in loop.
|
| 671 |
+
sequences = jnp.full((batch_size, max_length), pad_token_id, dtype=jnp.int32)
|
| 672 |
+
sequences = lax.dynamic_update_slice(sequences, input_ids, (0, 0))
|
| 673 |
+
|
| 674 |
+
# per batch-item state bit indicating if sentence has finished.
|
| 675 |
+
is_sent_finished = jnp.zeros((batch_size,), dtype=jnp.bool_)
|
| 676 |
+
|
| 677 |
+
# For Seq2Seq generation, we only need to use the decoder instead of the whole model in generation loop
|
| 678 |
+
# and pass it the `encoder_outputs`, which are part of the `model_kwargs`.
|
| 679 |
+
model = self.decode if self.config.is_encoder_decoder else self
|
| 680 |
+
|
| 681 |
+
# initialize model specific kwargs
|
| 682 |
+
model_kwargs = self.prepare_inputs_for_generation(input_ids, max_length, **model_kwargs)
|
| 683 |
+
|
| 684 |
+
# initialize state
|
| 685 |
+
state = SampleState(
|
| 686 |
+
cur_len=cur_len,
|
| 687 |
+
sequences=sequences,
|
| 688 |
+
running_token=input_ids,
|
| 689 |
+
is_sent_finished=is_sent_finished,
|
| 690 |
+
prng_key=prng_key,
|
| 691 |
+
model_kwargs=model_kwargs,
|
| 692 |
+
)
|
| 693 |
+
|
| 694 |
+
def sample_search_cond_fn(state):
|
| 695 |
+
"""state termination condition fn."""
|
| 696 |
+
has_reached_max_length = state.cur_len == max_length
|
| 697 |
+
all_sequence_finished = jnp.all(state.is_sent_finished)
|
| 698 |
+
finish_generation = jnp.logical_or(has_reached_max_length, all_sequence_finished)
|
| 699 |
+
return ~finish_generation
|
| 700 |
+
|
| 701 |
+
def sample_search_body_fn(state):
|
| 702 |
+
"""state update fn."""
|
| 703 |
+
prng_key, prng_key_next = jax.random.split(state.prng_key)
|
| 704 |
+
model_outputs = model(state.running_token, params=params, **state.model_kwargs)
|
| 705 |
+
|
| 706 |
+
logits = model_outputs.logits[:, -1]
|
| 707 |
+
|
| 708 |
+
# apply min_length, ...
|
| 709 |
+
logits = logits_processor(state.sequences, logits, state.cur_len)
|
| 710 |
+
# apply top_p, top_k, temperature
|
| 711 |
+
logits = logits_warper(logits, logits, state.cur_len)
|
| 712 |
+
|
| 713 |
+
next_token = jax.random.categorical(prng_key, logits, axis=-1)
|
| 714 |
+
|
| 715 |
+
next_is_sent_finished = state.is_sent_finished | (next_token == eos_token_id)
|
| 716 |
+
next_token = next_token * ~next_is_sent_finished + pad_token_id * next_is_sent_finished
|
| 717 |
+
next_token = next_token[:, None]
|
| 718 |
+
|
| 719 |
+
next_sequences = lax.dynamic_update_slice(state.sequences, next_token, (0, state.cur_len))
|
| 720 |
+
next_model_kwargs = self.update_inputs_for_generation(model_outputs, state.model_kwargs)
|
| 721 |
+
|
| 722 |
+
return SampleState(
|
| 723 |
+
cur_len=state.cur_len + 1,
|
| 724 |
+
sequences=next_sequences,
|
| 725 |
+
running_token=next_token,
|
| 726 |
+
is_sent_finished=next_is_sent_finished,
|
| 727 |
+
model_kwargs=next_model_kwargs,
|
| 728 |
+
prng_key=prng_key_next,
|
| 729 |
+
)
|
| 730 |
+
|
| 731 |
+
# The very first prompt often has sequence length > 1, so run outside of `lax.while_loop` to comply with TPU
|
| 732 |
+
if input_ids.shape[1] > 1:
|
| 733 |
+
state = sample_search_body_fn(state)
|
| 734 |
+
|
| 735 |
+
if not trace:
|
| 736 |
+
state = self._run_loop_in_debug(sample_search_cond_fn, sample_search_body_fn, state)
|
| 737 |
+
else:
|
| 738 |
+
state = lax.while_loop(sample_search_cond_fn, sample_search_body_fn, state)
|
| 739 |
+
|
| 740 |
+
return FlaxSampleOutput(sequences=state.sequences)
|
| 741 |
+
|
| 742 |
+
def _beam_search(
|
| 743 |
+
self,
|
| 744 |
+
input_ids: None,
|
| 745 |
+
max_length: Optional[int] = None,
|
| 746 |
+
pad_token_id: Optional[int] = None,
|
| 747 |
+
eos_token_id: Optional[int] = None,
|
| 748 |
+
length_penalty: Optional[float] = None,
|
| 749 |
+
early_stopping: Optional[Union[bool, str]] = None,
|
| 750 |
+
logits_processor: Optional[FlaxLogitsProcessorList] = None,
|
| 751 |
+
trace: bool = True,
|
| 752 |
+
params: Optional[Dict[str, jnp.ndarray]] = None,
|
| 753 |
+
model_kwargs: Optional[Dict[str, jnp.ndarray]] = None,
|
| 754 |
+
):
|
| 755 |
+
"""
|
| 756 |
+
This beam search function is heavily inspired by Flax's official example:
|
| 757 |
+
https://github.com/google/flax/blob/main/examples/wmt/decode.py
|
| 758 |
+
"""
|
| 759 |
+
|
| 760 |
+
def flatten_beam_dim(tensor):
|
| 761 |
+
"""Flattens the first two dimensions of a non-scalar array."""
|
| 762 |
+
# ignore scalars (e.g. cache index)
|
| 763 |
+
if tensor.ndim == 0:
|
| 764 |
+
return tensor
|
| 765 |
+
return tensor.reshape((tensor.shape[0] * tensor.shape[1],) + tensor.shape[2:])
|
| 766 |
+
|
| 767 |
+
def unflatten_beam_dim(tensor, batch_size, num_beams):
|
| 768 |
+
"""Unflattens the first, flat batch*beam dimension of a non-scalar array."""
|
| 769 |
+
# ignore scalars (e.g. cache index)
|
| 770 |
+
if tensor.ndim == 0:
|
| 771 |
+
return tensor
|
| 772 |
+
return tensor.reshape((batch_size, num_beams) + tensor.shape[1:])
|
| 773 |
+
|
| 774 |
+
def gather_beams(nested, beam_indices, batch_size, new_num_beams):
|
| 775 |
+
"""
|
| 776 |
+
Gathers the beam slices indexed by beam_indices into new beam array.
|
| 777 |
+
"""
|
| 778 |
+
batch_indices = jnp.reshape(
|
| 779 |
+
jnp.arange(batch_size * new_num_beams) // new_num_beams, (batch_size, new_num_beams)
|
| 780 |
+
)
|
| 781 |
+
|
| 782 |
+
def gather_fn(tensor):
|
| 783 |
+
# ignore scalars (e.g. cache index)
|
| 784 |
+
if tensor.ndim == 0:
|
| 785 |
+
return tensor
|
| 786 |
+
else:
|
| 787 |
+
return tensor[batch_indices, beam_indices]
|
| 788 |
+
|
| 789 |
+
return jax.tree_util.tree_map(gather_fn, nested)
|
| 790 |
+
|
| 791 |
+
# init values
|
| 792 |
+
max_length = max_length if max_length is not None else self.generation_config.max_length
|
| 793 |
+
pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
|
| 794 |
+
eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
|
| 795 |
+
length_penalty = length_penalty if length_penalty is not None else self.generation_config.length_penalty
|
| 796 |
+
early_stopping = early_stopping if early_stopping is not None else self.generation_config.early_stopping
|
| 797 |
+
|
| 798 |
+
batch_size, num_beams, cur_len = input_ids.shape
|
| 799 |
+
|
| 800 |
+
eos_token_id = jnp.array(eos_token_id, dtype=jnp.int32 if eos_token_id is not None else None)
|
| 801 |
+
pad_token_id = jnp.array(pad_token_id, dtype=jnp.int32)
|
| 802 |
+
cur_len = jnp.array(cur_len)
|
| 803 |
+
|
| 804 |
+
# per batch,beam-item holding current token in loop.
|
| 805 |
+
sequences = jnp.full((batch_size, num_beams, max_length), pad_token_id, dtype=jnp.int32)
|
| 806 |
+
running_sequences = jnp.full((batch_size, num_beams, max_length), pad_token_id, dtype=jnp.int32)
|
| 807 |
+
running_sequences = lax.dynamic_update_slice(sequences, input_ids, (0, 0, 0))
|
| 808 |
+
|
| 809 |
+
# per batch,beam-item state bit indicating if sentence has finished.
|
| 810 |
+
is_sent_finished = jnp.zeros((batch_size, num_beams), dtype=jnp.bool_)
|
| 811 |
+
|
| 812 |
+
# per batch,beam-item score, logprobs
|
| 813 |
+
running_scores = jnp.tile(jnp.array([0.0] + [np.array(-1.0e7)] * (num_beams - 1)), [batch_size, 1])
|
| 814 |
+
scores = jnp.ones((batch_size, num_beams)) * np.array(-1.0e7)
|
| 815 |
+
|
| 816 |
+
# For Seq2Seq generation, we only need to use the decoder instead of the whole model in generation loop
|
| 817 |
+
# and pass it the `encoder_outputs`, which are part of the `model_kwargs`.
|
| 818 |
+
model = self.decode if self.config.is_encoder_decoder else self
|
| 819 |
+
|
| 820 |
+
# flatten beam dim
|
| 821 |
+
if "encoder_outputs" in model_kwargs:
|
| 822 |
+
model_kwargs["encoder_outputs"]["last_hidden_state"] = flatten_beam_dim(
|
| 823 |
+
model_kwargs["encoder_outputs"]["last_hidden_state"]
|
| 824 |
+
)
|
| 825 |
+
for kwarg in ["attention_mask", "decoder_attention_mask"]:
|
| 826 |
+
if kwarg in model_kwargs:
|
| 827 |
+
model_kwargs[kwarg] = flatten_beam_dim(model_kwargs[kwarg])
|
| 828 |
+
|
| 829 |
+
# initialize model specific kwargs
|
| 830 |
+
model_kwargs = self.prepare_inputs_for_generation(flatten_beam_dim(input_ids), max_length, **model_kwargs)
|
| 831 |
+
|
| 832 |
+
# initialize state
|
| 833 |
+
state = BeamSearchState(
|
| 834 |
+
cur_len=cur_len,
|
| 835 |
+
running_sequences=running_sequences,
|
| 836 |
+
running_scores=running_scores,
|
| 837 |
+
sequences=sequences,
|
| 838 |
+
scores=scores,
|
| 839 |
+
is_sent_finished=is_sent_finished,
|
| 840 |
+
model_kwargs=model_kwargs,
|
| 841 |
+
)
|
| 842 |
+
|
| 843 |
+
def beam_search_cond_fn(state):
|
| 844 |
+
"""beam search state termination condition fn."""
|
| 845 |
+
|
| 846 |
+
# 1. is less than max length?
|
| 847 |
+
not_max_length_yet = state.cur_len < max_length
|
| 848 |
+
|
| 849 |
+
# 2. can the new beams still improve?
|
| 850 |
+
# early_stopping == False -> apply heuristic = always get the best score from `cur_len`. See the discussion
|
| 851 |
+
# below for more details.
|
| 852 |
+
# https://github.com/huggingface/transformers/pull/20901#issuecomment-1369845565
|
| 853 |
+
# early_stopping == "never" -> compute the best score from max_length or cur_len, depending on the sign of
|
| 854 |
+
# length_penalty. Positive length_penalty favors longer sequences, thus we use max_length there.
|
| 855 |
+
if early_stopping == "never" and length_penalty > 0.0:
|
| 856 |
+
best_running_score = state.running_scores[:, :1] / (max_length**length_penalty)
|
| 857 |
+
else:
|
| 858 |
+
best_running_score = state.running_scores[:, :1] / (state.cur_len**length_penalty)
|
| 859 |
+
worst_finished_score = jnp.where(
|
| 860 |
+
state.is_sent_finished, jnp.min(state.scores, axis=1, keepdims=True), np.array(-1.0e7)
|
| 861 |
+
)
|
| 862 |
+
improvement_still_possible = jnp.any(best_running_score > worst_finished_score)
|
| 863 |
+
|
| 864 |
+
# 3. is there still a beam that has not finished?
|
| 865 |
+
still_open_beam = ~(jnp.all(state.is_sent_finished) & (early_stopping is True))
|
| 866 |
+
|
| 867 |
+
return not_max_length_yet & still_open_beam & improvement_still_possible
|
| 868 |
+
|
| 869 |
+
def beam_search_body_fn(state, input_ids_length=1):
|
| 870 |
+
"""beam search state update fn."""
|
| 871 |
+
# 1. Forward current tokens
|
| 872 |
+
# Collect the current position slice along length to feed the fast
|
| 873 |
+
# autoregressive decoder model. Flatten the beam dimension into batch
|
| 874 |
+
# dimension for feeding into the model.
|
| 875 |
+
# unflatten beam dimension
|
| 876 |
+
# Unflatten beam dimension in attention cache arrays
|
| 877 |
+
input_token = flatten_beam_dim(
|
| 878 |
+
lax.dynamic_slice(
|
| 879 |
+
state.running_sequences,
|
| 880 |
+
(0, 0, state.cur_len - input_ids_length),
|
| 881 |
+
(batch_size, num_beams, input_ids_length),
|
| 882 |
+
)
|
| 883 |
+
)
|
| 884 |
+
model_outputs = model(input_token, params=params, **state.model_kwargs)
|
| 885 |
+
|
| 886 |
+
logits = unflatten_beam_dim(model_outputs.logits[:, -1], batch_size, num_beams)
|
| 887 |
+
cache = jax.tree_util.tree_map(
|
| 888 |
+
lambda tensor: unflatten_beam_dim(tensor, batch_size, num_beams), model_outputs.past_key_values
|
| 889 |
+
)
|
| 890 |
+
|
| 891 |
+
# adapt logits for FlaxMarianMTModel
|
| 892 |
+
logits = self._adapt_logits_for_beam_search(logits)
|
| 893 |
+
|
| 894 |
+
# 2. Compute log probs
|
| 895 |
+
# get log probabilities from logits,
|
| 896 |
+
# process logits with processors (*e.g.* min_length, ...), and
|
| 897 |
+
# add new logprobs to existing running logprobs scores.
|
| 898 |
+
log_probs = jax.nn.log_softmax(logits)
|
| 899 |
+
log_probs = logits_processor(
|
| 900 |
+
flatten_beam_dim(running_sequences), flatten_beam_dim(log_probs), state.cur_len
|
| 901 |
+
)
|
| 902 |
+
log_probs = unflatten_beam_dim(log_probs, batch_size, num_beams)
|
| 903 |
+
log_probs = log_probs + jnp.expand_dims(state.running_scores, axis=2)
|
| 904 |
+
vocab_size = log_probs.shape[2]
|
| 905 |
+
log_probs = log_probs.reshape((batch_size, num_beams * vocab_size))
|
| 906 |
+
|
| 907 |
+
# 3. Retrieve top-K
|
| 908 |
+
# Each item in batch has num_beams * vocab_size candidate sequences.
|
| 909 |
+
# For each item, get the top 2*k candidates with the highest log-
|
| 910 |
+
# probabilities. We gather the top 2*K beams here so that even if the best
|
| 911 |
+
# K sequences reach EOS simultaneously, we have another K sequences
|
| 912 |
+
# remaining to continue the live beam search.
|
| 913 |
+
# Gather the top 2*K scores from _all_ beams.
|
| 914 |
+
# Gather 2*k top beams.
|
| 915 |
+
# Recover the beam index by floor division.
|
| 916 |
+
# Recover token id by modulo division and expand Id array for broadcasting.
|
| 917 |
+
# Update sequences for the 2*K top-k new sequences.
|
| 918 |
+
beams_to_keep = 2 * num_beams
|
| 919 |
+
topk_log_probs, topk_indices = lax.top_k(log_probs, k=beams_to_keep)
|
| 920 |
+
topk_beam_indices = topk_indices // vocab_size
|
| 921 |
+
topk_running_sequences = gather_beams(
|
| 922 |
+
state.running_sequences, topk_beam_indices, batch_size, beams_to_keep
|
| 923 |
+
)
|
| 924 |
+
topk_ids = jnp.expand_dims(topk_indices % vocab_size, axis=2)
|
| 925 |
+
topk_sequences = lax.dynamic_update_slice(topk_running_sequences, topk_ids, (0, 0, state.cur_len))
|
| 926 |
+
|
| 927 |
+
# 4. Check which sequences have ended
|
| 928 |
+
# Update current sequences:
|
| 929 |
+
# Did any of these sequences reach an end marker?
|
| 930 |
+
# To prevent these just finished sequences from being added to the current sequences
|
| 931 |
+
# set of active beam search sequences, set their log probs to a very large
|
| 932 |
+
# negative value.
|
| 933 |
+
did_topk_just_finished = topk_sequences[:, :, state.cur_len] == eos_token_id
|
| 934 |
+
running_topk_log_probs = topk_log_probs + did_topk_just_finished * np.array(-1.0e7)
|
| 935 |
+
# 5. Get running sequences scores for next
|
| 936 |
+
# Determine the top k beam indices (from top 2*k beams) from log probs
|
| 937 |
+
# and gather top k beams (from top 2*k beams).
|
| 938 |
+
next_topk_indices = lax.top_k(running_topk_log_probs, k=num_beams)[1]
|
| 939 |
+
next_running_sequences, next_running_scores = gather_beams(
|
| 940 |
+
[topk_sequences, running_topk_log_probs], next_topk_indices, batch_size, num_beams
|
| 941 |
+
)
|
| 942 |
+
|
| 943 |
+
# 6. Process topk logits
|
| 944 |
+
# Further process log probs:
|
| 945 |
+
# - add length penalty
|
| 946 |
+
# - make sure no scores can be added anymore if beam is full
|
| 947 |
+
# - make sure still running sequences cannot be chosen as finalized beam
|
| 948 |
+
topk_log_probs = topk_log_probs / (state.cur_len**length_penalty)
|
| 949 |
+
beams_in_batch_are_full = jnp.broadcast_to(
|
| 950 |
+
state.is_sent_finished.all(axis=-1, keepdims=True), did_topk_just_finished.shape
|
| 951 |
+
) & (early_stopping is True)
|
| 952 |
+
add_penalty = ~did_topk_just_finished | beams_in_batch_are_full
|
| 953 |
+
topk_log_probs += add_penalty * np.array(-1.0e7)
|
| 954 |
+
|
| 955 |
+
# 7. Get scores, sequences, is sentence finished for next.
|
| 956 |
+
# Combine sequences, scores, and flags along the beam dimension and compare
|
| 957 |
+
# new finished sequence scores to existing finished scores and select the
|
| 958 |
+
# best from the new set of beams
|
| 959 |
+
merged_sequences = jnp.concatenate([state.sequences, topk_sequences], axis=1)
|
| 960 |
+
merged_scores = jnp.concatenate([state.scores, topk_log_probs], axis=1)
|
| 961 |
+
merged_is_sent_finished = jnp.concatenate([state.is_sent_finished, did_topk_just_finished], axis=1)
|
| 962 |
+
topk_merged_indices = lax.top_k(merged_scores, k=num_beams)[1]
|
| 963 |
+
next_sequences, next_scores, next_is_sent_finished = gather_beams(
|
| 964 |
+
[merged_sequences, merged_scores, merged_is_sent_finished], topk_merged_indices, batch_size, num_beams
|
| 965 |
+
)
|
| 966 |
+
|
| 967 |
+
# 8. Update model kwargs.
|
| 968 |
+
# Determine the top k beam indices from the original set of all beams.
|
| 969 |
+
# With these, gather the top k beam-associated caches.
|
| 970 |
+
next_running_indices = gather_beams(topk_beam_indices, next_topk_indices, batch_size, num_beams)
|
| 971 |
+
next_cache = gather_beams(cache, next_running_indices, batch_size, num_beams)
|
| 972 |
+
model_outputs["past_key_values"] = jax.tree_util.tree_map(lambda x: flatten_beam_dim(x), next_cache)
|
| 973 |
+
next_model_kwargs = self.update_inputs_for_generation(model_outputs, state.model_kwargs)
|
| 974 |
+
|
| 975 |
+
return BeamSearchState(
|
| 976 |
+
cur_len=state.cur_len + 1,
|
| 977 |
+
running_scores=next_running_scores,
|
| 978 |
+
running_sequences=next_running_sequences,
|
| 979 |
+
scores=next_scores,
|
| 980 |
+
sequences=next_sequences,
|
| 981 |
+
is_sent_finished=next_is_sent_finished,
|
| 982 |
+
model_kwargs=next_model_kwargs,
|
| 983 |
+
)
|
| 984 |
+
|
| 985 |
+
# The very first prompt often has sequence length > 1, so run outside of `lax.while_loop` to comply with TPU
|
| 986 |
+
if input_ids.shape[-1] > 1:
|
| 987 |
+
state = partial(beam_search_body_fn, input_ids_length=input_ids.shape[-1])(state)
|
| 988 |
+
|
| 989 |
+
if not trace:
|
| 990 |
+
state = self._run_loop_in_debug(beam_search_cond_fn, beam_search_body_fn, state)
|
| 991 |
+
else:
|
| 992 |
+
state = lax.while_loop(beam_search_cond_fn, beam_search_body_fn, state)
|
| 993 |
+
|
| 994 |
+
# Account for the edge-case where there are no finished sequences for a
|
| 995 |
+
# particular batch item. If so, return running sequences for that batch item.
|
| 996 |
+
none_finished = jnp.any(state.is_sent_finished, axis=1)
|
| 997 |
+
sequences = jnp.where(none_finished[:, None, None], state.sequences, state.running_sequences)
|
| 998 |
+
scores = jnp.where(none_finished[:, None], state.scores, state.running_scores)
|
| 999 |
+
|
| 1000 |
+
# take best beam for each batch
|
| 1001 |
+
sequences = sequences[:, 0]
|
| 1002 |
+
scores = scores[:, 0]
|
| 1003 |
+
|
| 1004 |
+
return FlaxBeamSearchOutput(sequences=sequences, scores=scores)
|
valley/lib/python3.10/site-packages/transformers/generation/logits_process.py
ADDED
|
@@ -0,0 +1,982 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2020 The HuggingFace Inc. team
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
import inspect
|
| 17 |
+
import math
|
| 18 |
+
from typing import Callable, Iterable, List, Optional, Tuple, Union
|
| 19 |
+
|
| 20 |
+
import numpy as np
|
| 21 |
+
import torch
|
| 22 |
+
|
| 23 |
+
from ..utils import add_start_docstrings
|
| 24 |
+
from ..utils.logging import get_logger
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
logger = get_logger(__name__)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
LOGITS_PROCESSOR_INPUTS_DOCSTRING = r"""
|
| 31 |
+
Args:
|
| 32 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
| 33 |
+
Indices of input sequence tokens in the vocabulary.
|
| 34 |
+
|
| 35 |
+
Indices can be obtained using [`BertTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 36 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 37 |
+
|
| 38 |
+
[What are input IDs?](../glossary#input-ids)
|
| 39 |
+
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
|
| 40 |
+
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
|
| 41 |
+
search or log softmax for each vocabulary token when using beam search
|
| 42 |
+
kwargs:
|
| 43 |
+
Additional logits processor specific kwargs.
|
| 44 |
+
|
| 45 |
+
Return:
|
| 46 |
+
`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
|
| 47 |
+
|
| 48 |
+
"""
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
class LogitsProcessor:
|
| 52 |
+
"""Abstract base class for all logit processors that can be applied during generation."""
|
| 53 |
+
|
| 54 |
+
@add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
|
| 55 |
+
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
|
| 56 |
+
"""Torch method for processing logits."""
|
| 57 |
+
raise NotImplementedError(
|
| 58 |
+
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
class LogitsWarper:
|
| 63 |
+
"""Abstract base class for all logit warpers that can be applied during generation with multinomial sampling."""
|
| 64 |
+
|
| 65 |
+
@add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
|
| 66 |
+
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
|
| 67 |
+
"""Torch method for warping logits."""
|
| 68 |
+
raise NotImplementedError(
|
| 69 |
+
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
class LogitsProcessorList(list):
|
| 74 |
+
"""
|
| 75 |
+
This class can be used to create a list of [`LogitsProcessor`] or [`LogitsWarper`] to subsequently process a
|
| 76 |
+
`scores` input tensor. This class inherits from list and adds a specific *__call__* method to apply each
|
| 77 |
+
[`LogitsProcessor`] or [`LogitsWarper`] to the inputs.
|
| 78 |
+
"""
|
| 79 |
+
|
| 80 |
+
@add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
|
| 81 |
+
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.FloatTensor:
|
| 82 |
+
for processor in self:
|
| 83 |
+
function_args = inspect.signature(processor.__call__).parameters
|
| 84 |
+
if len(function_args) > 2:
|
| 85 |
+
if not all(arg in kwargs for arg in list(function_args.keys())[2:]):
|
| 86 |
+
raise ValueError(
|
| 87 |
+
f"Make sure that all the required parameters: {list(function_args.keys())} for "
|
| 88 |
+
f"{processor.__class__} are passed to the logits processor."
|
| 89 |
+
)
|
| 90 |
+
scores = processor(input_ids, scores, **kwargs)
|
| 91 |
+
else:
|
| 92 |
+
scores = processor(input_ids, scores)
|
| 93 |
+
return scores
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
class MinLengthLogitsProcessor(LogitsProcessor):
|
| 97 |
+
r"""
|
| 98 |
+
[`LogitsProcessor`] enforcing a min-length by setting EOS probability to 0.
|
| 99 |
+
|
| 100 |
+
Args:
|
| 101 |
+
min_length (`int`):
|
| 102 |
+
The minimum length below which the score of `eos_token_id` is set to `-float("Inf")`.
|
| 103 |
+
eos_token_id (`Union[int, List[int]]`):
|
| 104 |
+
The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
|
| 105 |
+
"""
|
| 106 |
+
|
| 107 |
+
def __init__(self, min_length: int, eos_token_id: Union[int, List[int]]):
|
| 108 |
+
if not isinstance(min_length, int) or min_length < 0:
|
| 109 |
+
raise ValueError(f"`min_length` has to be a positive integer, but is {min_length}")
|
| 110 |
+
|
| 111 |
+
if isinstance(eos_token_id, int):
|
| 112 |
+
eos_token_id = [eos_token_id]
|
| 113 |
+
if not all([isinstance(i, int) for i in eos_token_id]) or any([i < 0 for i in eos_token_id]):
|
| 114 |
+
raise ValueError(f"`eos_token_id` has to be a list of positive integers, but is {eos_token_id}")
|
| 115 |
+
|
| 116 |
+
self.min_length = min_length
|
| 117 |
+
self.eos_token_id = eos_token_id
|
| 118 |
+
|
| 119 |
+
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
|
| 120 |
+
cur_len = input_ids.shape[-1]
|
| 121 |
+
if cur_len < self.min_length:
|
| 122 |
+
for i in self.eos_token_id:
|
| 123 |
+
scores[:, i] = -float("inf")
|
| 124 |
+
return scores
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
class MinNewTokensLengthLogitsProcessor(LogitsProcessor):
|
| 128 |
+
r"""
|
| 129 |
+
[`LogitsProcessor`] enforcing a min-length of new tokens by setting EOS (End-Of-Sequence) token probability to 0.
|
| 130 |
+
|
| 131 |
+
Args:
|
| 132 |
+
prompt_length_to_skip (`int`):
|
| 133 |
+
The input tokens length.
|
| 134 |
+
min_new_tokens (`int`):
|
| 135 |
+
The minimum *new* tokens length below which the score of `eos_token_id` is set to `-float("Inf")`.
|
| 136 |
+
eos_token_id (`Union[int, List[int]]`):
|
| 137 |
+
The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
|
| 138 |
+
"""
|
| 139 |
+
|
| 140 |
+
def __init__(self, prompt_length_to_skip: int, min_new_tokens: int, eos_token_id: Union[int, List[int]]):
|
| 141 |
+
for arg_name, arg_value in [
|
| 142 |
+
("prompt_length_to_skip", prompt_length_to_skip),
|
| 143 |
+
("min_new_tokens", min_new_tokens),
|
| 144 |
+
]:
|
| 145 |
+
if not isinstance(arg_value, int) or arg_value < 0:
|
| 146 |
+
raise ValueError(f"`{arg_name}` has to be a positive integer, but is {arg_value}")
|
| 147 |
+
|
| 148 |
+
if isinstance(eos_token_id, int):
|
| 149 |
+
eos_token_id = [eos_token_id]
|
| 150 |
+
if not all([isinstance(i, int) for i in eos_token_id]) or any([i < 0 for i in eos_token_id]):
|
| 151 |
+
raise ValueError(f"`eos_token_id` has to be a list of positive integers, but is {eos_token_id}")
|
| 152 |
+
|
| 153 |
+
self.prompt_length_to_skip = prompt_length_to_skip
|
| 154 |
+
self.min_new_tokens = min_new_tokens
|
| 155 |
+
self.eos_token_id = eos_token_id
|
| 156 |
+
|
| 157 |
+
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
|
| 158 |
+
new_tokens_length = input_ids.shape[-1] - self.prompt_length_to_skip
|
| 159 |
+
if new_tokens_length < self.min_new_tokens:
|
| 160 |
+
for i in self.eos_token_id:
|
| 161 |
+
scores[:, i] = -float("inf")
|
| 162 |
+
|
| 163 |
+
return scores
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
class TemperatureLogitsWarper(LogitsWarper):
|
| 167 |
+
r"""
|
| 168 |
+
[`LogitsWarper`] for temperature (exponential scaling output probability distribution).
|
| 169 |
+
|
| 170 |
+
Args:
|
| 171 |
+
temperature (`float`):
|
| 172 |
+
The value used to module the logits distribution.
|
| 173 |
+
"""
|
| 174 |
+
|
| 175 |
+
def __init__(self, temperature: float):
|
| 176 |
+
if not isinstance(temperature, float) or not (temperature > 0):
|
| 177 |
+
raise ValueError(f"`temperature` has to be a strictly positive float, but is {temperature}")
|
| 178 |
+
|
| 179 |
+
self.temperature = temperature
|
| 180 |
+
|
| 181 |
+
def __call__(self, input_ids: torch.Tensor, scores: torch.Tensor) -> torch.FloatTensor:
|
| 182 |
+
scores = scores / self.temperature
|
| 183 |
+
return scores
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
class RepetitionPenaltyLogitsProcessor(LogitsProcessor):
|
| 187 |
+
r"""
|
| 188 |
+
[`LogitsProcessor`] enforcing an exponential penalty on repeated sequences.
|
| 189 |
+
|
| 190 |
+
Args:
|
| 191 |
+
repetition_penalty (`float`):
|
| 192 |
+
The parameter for repetition penalty. 1.0 means no penalty. See [this
|
| 193 |
+
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
|
| 194 |
+
"""
|
| 195 |
+
|
| 196 |
+
def __init__(self, penalty: float):
|
| 197 |
+
if not isinstance(penalty, float) or not (penalty > 0):
|
| 198 |
+
raise ValueError(f"`penalty` has to be a strictly positive float, but is {penalty}")
|
| 199 |
+
|
| 200 |
+
self.penalty = penalty
|
| 201 |
+
|
| 202 |
+
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
|
| 203 |
+
score = torch.gather(scores, 1, input_ids)
|
| 204 |
+
|
| 205 |
+
# if score < 0 then repetition penalty has to be multiplied to reduce the previous token probability
|
| 206 |
+
score = torch.where(score < 0, score * self.penalty, score / self.penalty)
|
| 207 |
+
|
| 208 |
+
scores.scatter_(1, input_ids, score)
|
| 209 |
+
return scores
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
class EncoderRepetitionPenaltyLogitsProcessor(LogitsProcessor):
|
| 213 |
+
r"""
|
| 214 |
+
[`LogitsProcessor`] enforcing an exponential penalty on tokens that are not in the original input.
|
| 215 |
+
|
| 216 |
+
Args:
|
| 217 |
+
hallucination_penalty (`float`):
|
| 218 |
+
The parameter for hallucination penalty. 1.0 means no penalty.
|
| 219 |
+
encoder_input_ids (`torch.LongTensor`):
|
| 220 |
+
The encoder_input_ids that should not be repeated within the decoder ids.
|
| 221 |
+
"""
|
| 222 |
+
|
| 223 |
+
def __init__(self, penalty: float, encoder_input_ids: torch.LongTensor):
|
| 224 |
+
if not isinstance(penalty, float) or not (penalty > 0):
|
| 225 |
+
raise ValueError(f"`penalty` has to be a strictly positive float, but is {penalty}")
|
| 226 |
+
|
| 227 |
+
self.penalty = 1 / penalty
|
| 228 |
+
self.encoder_input_ids = encoder_input_ids
|
| 229 |
+
|
| 230 |
+
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
|
| 231 |
+
score = torch.gather(scores, 1, self.encoder_input_ids)
|
| 232 |
+
|
| 233 |
+
# if score < 0 then repetition penalty has to be multiplied to reduce the previous token probability
|
| 234 |
+
score = torch.where(score < 0, score * self.penalty, score / self.penalty)
|
| 235 |
+
|
| 236 |
+
scores.scatter_(1, self.encoder_input_ids, score)
|
| 237 |
+
return scores
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
class TopPLogitsWarper(LogitsWarper):
|
| 241 |
+
"""
|
| 242 |
+
[`LogitsWarper`] that performs top-p, i.e. restricting to top tokens summing to prob_cut_off <= prob_cut_off.
|
| 243 |
+
|
| 244 |
+
Args:
|
| 245 |
+
top_p (`float`):
|
| 246 |
+
If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
|
| 247 |
+
higher are kept for generation.
|
| 248 |
+
filter_value (`float`, *optional*, defaults to `-float("Inf")`):
|
| 249 |
+
All filtered values will be set to this float value.
|
| 250 |
+
min_tokens_to_keep (`int`, *optional*, defaults to 1):
|
| 251 |
+
Minimum number of tokens that cannot be filtered.
|
| 252 |
+
"""
|
| 253 |
+
|
| 254 |
+
def __init__(self, top_p: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
|
| 255 |
+
top_p = float(top_p)
|
| 256 |
+
if top_p < 0 or top_p > 1.0:
|
| 257 |
+
raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}")
|
| 258 |
+
|
| 259 |
+
self.top_p = top_p
|
| 260 |
+
self.filter_value = filter_value
|
| 261 |
+
self.min_tokens_to_keep = min_tokens_to_keep
|
| 262 |
+
|
| 263 |
+
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
|
| 264 |
+
sorted_logits, sorted_indices = torch.sort(scores, descending=False)
|
| 265 |
+
cumulative_probs = sorted_logits.softmax(dim=-1).cumsum(dim=-1)
|
| 266 |
+
|
| 267 |
+
# Remove tokens with cumulative top_p above the threshold (token with 0 are kept)
|
| 268 |
+
sorted_indices_to_remove = cumulative_probs <= (1 - self.top_p)
|
| 269 |
+
if self.min_tokens_to_keep > 1:
|
| 270 |
+
# Keep at least min_tokens_to_keep
|
| 271 |
+
sorted_indices_to_remove[..., -self.min_tokens_to_keep :] = 0
|
| 272 |
+
|
| 273 |
+
# scatter sorted tensors to original indexing
|
| 274 |
+
indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
|
| 275 |
+
scores = scores.masked_fill(indices_to_remove, self.filter_value)
|
| 276 |
+
return scores
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
class TopKLogitsWarper(LogitsWarper):
|
| 280 |
+
r"""
|
| 281 |
+
[`LogitsWarper`] that performs top-k, i.e. restricting to the k highest probability elements.
|
| 282 |
+
|
| 283 |
+
Args:
|
| 284 |
+
top_k (`int`):
|
| 285 |
+
The number of highest probability vocabulary tokens to keep for top-k-filtering.
|
| 286 |
+
filter_value (`float`, *optional*, defaults to `-float("Inf")`):
|
| 287 |
+
All filtered values will be set to this float value.
|
| 288 |
+
min_tokens_to_keep (`int`, *optional*, defaults to 1):
|
| 289 |
+
Minimum number of tokens that cannot be filtered.
|
| 290 |
+
"""
|
| 291 |
+
|
| 292 |
+
def __init__(self, top_k: int, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
|
| 293 |
+
if not isinstance(top_k, int) or top_k <= 0:
|
| 294 |
+
raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}")
|
| 295 |
+
|
| 296 |
+
self.top_k = max(top_k, min_tokens_to_keep)
|
| 297 |
+
self.filter_value = filter_value
|
| 298 |
+
|
| 299 |
+
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
|
| 300 |
+
top_k = min(self.top_k, scores.size(-1)) # Safety check
|
| 301 |
+
# Remove all tokens with a probability less than the last token of the top-k
|
| 302 |
+
indices_to_remove = scores < torch.topk(scores, top_k)[0][..., -1, None]
|
| 303 |
+
scores = scores.masked_fill(indices_to_remove, self.filter_value)
|
| 304 |
+
return scores
|
| 305 |
+
|
| 306 |
+
|
| 307 |
+
class TypicalLogitsWarper(LogitsWarper):
|
| 308 |
+
r"""
|
| 309 |
+
[`LogitsWarper`] that performs typical decoding. See [Typical Decoding for Natural Language
|
| 310 |
+
Generation](https://arxiv.org/abs/2202.00666) for more information.
|
| 311 |
+
|
| 312 |
+
Args:
|
| 313 |
+
mass (`float`):
|
| 314 |
+
Value of typical_p between 0 and 1 inclusive, defaults to 0.9.
|
| 315 |
+
filter_value (`float`, *optional*, defaults to `-float("Inf")`):
|
| 316 |
+
All filtered values will be set to this float value.
|
| 317 |
+
min_tokens_to_keep (`int`, *optional*, defaults to 1):
|
| 318 |
+
Minimum number of tokens that cannot be filtered.
|
| 319 |
+
"""
|
| 320 |
+
|
| 321 |
+
def __init__(self, mass: float = 0.9, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
|
| 322 |
+
mass = float(mass)
|
| 323 |
+
if not (mass > 0 and mass < 1):
|
| 324 |
+
raise ValueError(f"`typical_p` has to be a float > 0 and < 1, but is {mass}")
|
| 325 |
+
|
| 326 |
+
self.filter_value = filter_value
|
| 327 |
+
self.mass = mass
|
| 328 |
+
self.min_tokens_to_keep = min_tokens_to_keep
|
| 329 |
+
|
| 330 |
+
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
|
| 331 |
+
# calculate entropy
|
| 332 |
+
normalized = torch.nn.functional.log_softmax(scores, dim=-1)
|
| 333 |
+
p = torch.exp(normalized)
|
| 334 |
+
ent = -(normalized * p).nansum(-1, keepdim=True)
|
| 335 |
+
|
| 336 |
+
# shift and sort
|
| 337 |
+
shifted_scores = torch.abs((-normalized) - ent)
|
| 338 |
+
sorted_scores, sorted_indices = torch.sort(shifted_scores, descending=False)
|
| 339 |
+
sorted_logits = scores.gather(-1, sorted_indices)
|
| 340 |
+
cumulative_probs = sorted_logits.softmax(dim=-1).cumsum(dim=-1)
|
| 341 |
+
|
| 342 |
+
# Remove tokens with cumulative mass above the threshold
|
| 343 |
+
last_ind = (cumulative_probs < self.mass).sum(dim=1)
|
| 344 |
+
last_ind[last_ind < 0] = 0
|
| 345 |
+
sorted_indices_to_remove = sorted_scores > sorted_scores.gather(1, last_ind.view(-1, 1))
|
| 346 |
+
if self.min_tokens_to_keep > 1:
|
| 347 |
+
# Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below)
|
| 348 |
+
sorted_indices_to_remove[..., : self.min_tokens_to_keep] = 0
|
| 349 |
+
indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
|
| 350 |
+
|
| 351 |
+
scores = scores.masked_fill(indices_to_remove, self.filter_value)
|
| 352 |
+
return scores
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
class EpsilonLogitsWarper(LogitsWarper):
|
| 356 |
+
r"""
|
| 357 |
+
[`LogitsWarper`] that performs epsilon-sampling, i.e. restricting to tokens with `prob >= epsilon`. Takes the
|
| 358 |
+
largest min_tokens_to_keep tokens if no tokens satisfy this constraint. See [Truncation Sampling as Language Model
|
| 359 |
+
Desmoothing](https://arxiv.org/abs/2210.15191) for more information.
|
| 360 |
+
|
| 361 |
+
Args:
|
| 362 |
+
epsilon (`float`):
|
| 363 |
+
If set to > 0, only the most tokens with probabilities `epsilon` or higher are kept for generation.
|
| 364 |
+
filter_value (`float`, *optional*, defaults to `-float("Inf")`):
|
| 365 |
+
All filtered values will be set to this float value.
|
| 366 |
+
min_tokens_to_keep (`int`, *optional*, defaults to 1):
|
| 367 |
+
Minimum number of tokens that cannot be filtered.
|
| 368 |
+
"""
|
| 369 |
+
|
| 370 |
+
def __init__(self, epsilon: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
|
| 371 |
+
epsilon = float(epsilon)
|
| 372 |
+
if epsilon <= 0 or epsilon >= 1:
|
| 373 |
+
raise ValueError(f"`epsilon_cutoff` has to be a float > 0 and < 1, but is {epsilon}")
|
| 374 |
+
|
| 375 |
+
min_tokens_to_keep = int(min_tokens_to_keep)
|
| 376 |
+
if min_tokens_to_keep < 1:
|
| 377 |
+
raise ValueError(
|
| 378 |
+
f"`min_tokens_to_keep` has to be a strictly positive integer, but is {min_tokens_to_keep}"
|
| 379 |
+
)
|
| 380 |
+
|
| 381 |
+
self.epsilon = epsilon
|
| 382 |
+
self.filter_value = filter_value
|
| 383 |
+
self.min_tokens_to_keep = min_tokens_to_keep
|
| 384 |
+
|
| 385 |
+
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
|
| 386 |
+
# Determine which indices to remove
|
| 387 |
+
probabilities = scores.softmax(dim=-1)
|
| 388 |
+
indices_to_remove = probabilities < self.epsilon
|
| 389 |
+
|
| 390 |
+
# Keep the words with the 'min_tokens_to_keep'-highest probabilities
|
| 391 |
+
top_k = min(self.min_tokens_to_keep, scores.size(-1)) # Safety check
|
| 392 |
+
indices_to_remove = indices_to_remove & (scores < torch.topk(scores, top_k)[0][..., -1, None])
|
| 393 |
+
|
| 394 |
+
scores = scores.masked_fill(indices_to_remove, self.filter_value)
|
| 395 |
+
return scores
|
| 396 |
+
|
| 397 |
+
|
| 398 |
+
class EtaLogitsWarper(LogitsWarper):
|
| 399 |
+
r"""
|
| 400 |
+
[`LogitsWarper`] that performs eta-sampling, i.e. calculates a dynamic cutoff `eta := min(epsilon, sqrt(epsilon,
|
| 401 |
+
e^-entropy(probabilities)))` and restricts to tokens with `prob >= eta`. Takes the largest min_tokens_to_keep
|
| 402 |
+
tokens if no tokens satisfy this constraint. See [Truncation Sampling as Language Model
|
| 403 |
+
Desmoothing](https://arxiv.org/abs/2210.15191) for more information.
|
| 404 |
+
|
| 405 |
+
Args:
|
| 406 |
+
min_tokens_to_keep (`int`, *optional*, defaults to 1):
|
| 407 |
+
Minimum number of tokens that cannot be filtered."""
|
| 408 |
+
|
| 409 |
+
def __init__(self, epsilon: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
|
| 410 |
+
epsilon = float(epsilon)
|
| 411 |
+
if epsilon <= 0 or epsilon >= 1:
|
| 412 |
+
raise ValueError(f"`eta_cutoff` has to be a float > 0 and < 1, but is {epsilon}")
|
| 413 |
+
|
| 414 |
+
min_tokens_to_keep = int(min_tokens_to_keep)
|
| 415 |
+
if min_tokens_to_keep < 1:
|
| 416 |
+
raise ValueError(
|
| 417 |
+
f"`min_tokens_to_keep` has to be a strictly positive integer, but is {min_tokens_to_keep}"
|
| 418 |
+
)
|
| 419 |
+
|
| 420 |
+
self.epsilon = torch.tensor(epsilon)
|
| 421 |
+
self.filter_value = filter_value
|
| 422 |
+
self.min_tokens_to_keep = min_tokens_to_keep
|
| 423 |
+
|
| 424 |
+
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
|
| 425 |
+
# Calculate the adaptive cutoff
|
| 426 |
+
probabilities = scores.softmax(dim=-1)
|
| 427 |
+
entropy = torch.distributions.Categorical(logits=scores).entropy()
|
| 428 |
+
eta = torch.min(self.epsilon, torch.sqrt(self.epsilon) * torch.exp(-entropy))[..., None]
|
| 429 |
+
indices_to_remove = probabilities < eta
|
| 430 |
+
|
| 431 |
+
# Keep the words with the 'min_tokens_to_keep'-highest probabilities
|
| 432 |
+
top_k = min(self.min_tokens_to_keep, scores.size(-1)) # Safety check
|
| 433 |
+
indices_to_remove = indices_to_remove & (scores < torch.topk(scores, top_k)[0][..., -1, None])
|
| 434 |
+
|
| 435 |
+
scores = scores.masked_fill(indices_to_remove, self.filter_value)
|
| 436 |
+
return scores
|
| 437 |
+
|
| 438 |
+
|
| 439 |
+
def _get_ngrams(ngram_size: int, prev_input_ids: torch.Tensor, num_hypos: int):
|
| 440 |
+
generated_ngrams = [{} for _ in range(num_hypos)]
|
| 441 |
+
for idx in range(num_hypos):
|
| 442 |
+
gen_tokens = prev_input_ids[idx].tolist()
|
| 443 |
+
generated_ngram = generated_ngrams[idx]
|
| 444 |
+
for ngram in zip(*[gen_tokens[i:] for i in range(ngram_size)]):
|
| 445 |
+
prev_ngram_tuple = tuple(ngram[:-1])
|
| 446 |
+
generated_ngram[prev_ngram_tuple] = generated_ngram.get(prev_ngram_tuple, []) + [ngram[-1]]
|
| 447 |
+
return generated_ngrams
|
| 448 |
+
|
| 449 |
+
|
| 450 |
+
def _get_generated_ngrams(banned_ngrams, prev_input_ids, ngram_size, cur_len):
|
| 451 |
+
# Before decoding the next token, prevent decoding of ngrams that have already appeared
|
| 452 |
+
start_idx = cur_len + 1 - ngram_size
|
| 453 |
+
ngram_idx = tuple(prev_input_ids[start_idx:cur_len].tolist())
|
| 454 |
+
return banned_ngrams.get(ngram_idx, [])
|
| 455 |
+
|
| 456 |
+
|
| 457 |
+
def _calc_banned_ngram_tokens(
|
| 458 |
+
ngram_size: int, prev_input_ids: torch.Tensor, num_hypos: int, cur_len: int
|
| 459 |
+
) -> List[Iterable[int]]:
|
| 460 |
+
"""Copied from fairseq for no_repeat_ngram in beam_search"""
|
| 461 |
+
if cur_len + 1 < ngram_size:
|
| 462 |
+
# return no banned tokens if we haven't generated no_repeat_ngram_size tokens yet
|
| 463 |
+
return [[] for _ in range(num_hypos)]
|
| 464 |
+
|
| 465 |
+
generated_ngrams = _get_ngrams(ngram_size, prev_input_ids, num_hypos)
|
| 466 |
+
|
| 467 |
+
banned_tokens = [
|
| 468 |
+
_get_generated_ngrams(generated_ngrams[hypo_idx], prev_input_ids[hypo_idx], ngram_size, cur_len)
|
| 469 |
+
for hypo_idx in range(num_hypos)
|
| 470 |
+
]
|
| 471 |
+
return banned_tokens
|
| 472 |
+
|
| 473 |
+
|
| 474 |
+
class NoRepeatNGramLogitsProcessor(LogitsProcessor):
|
| 475 |
+
r"""
|
| 476 |
+
[`LogitsProcessor`] that enforces no repetition of n-grams. See
|
| 477 |
+
[Fairseq](https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345).
|
| 478 |
+
|
| 479 |
+
Args:
|
| 480 |
+
ngram_size (`int`):
|
| 481 |
+
All ngrams of size `ngram_size` can only occur once.
|
| 482 |
+
"""
|
| 483 |
+
|
| 484 |
+
def __init__(self, ngram_size: int):
|
| 485 |
+
if not isinstance(ngram_size, int) or ngram_size <= 0:
|
| 486 |
+
raise ValueError(f"`ngram_size` has to be a strictly positive integer, but is {ngram_size}")
|
| 487 |
+
self.ngram_size = ngram_size
|
| 488 |
+
|
| 489 |
+
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
|
| 490 |
+
num_batch_hypotheses = scores.shape[0]
|
| 491 |
+
cur_len = input_ids.shape[-1]
|
| 492 |
+
banned_batch_tokens = _calc_banned_ngram_tokens(self.ngram_size, input_ids, num_batch_hypotheses, cur_len)
|
| 493 |
+
|
| 494 |
+
for i, banned_tokens in enumerate(banned_batch_tokens):
|
| 495 |
+
scores[i, banned_tokens] = -float("inf")
|
| 496 |
+
|
| 497 |
+
return scores
|
| 498 |
+
|
| 499 |
+
|
| 500 |
+
class EncoderNoRepeatNGramLogitsProcessor(LogitsProcessor):
|
| 501 |
+
r"""
|
| 502 |
+
[`LogitsProcessor`] that enforces no repetition of encoder input ids n-grams for the decoder ids. See
|
| 503 |
+
[ParlAI](https://github.com/facebookresearch/ParlAI/blob/master/parlai/core/torch_generator_agent.py#L1350).
|
| 504 |
+
|
| 505 |
+
Args:
|
| 506 |
+
encoder_ngram_size (`int`):
|
| 507 |
+
All ngrams of size `ngram_size` can only occur within the encoder input ids.
|
| 508 |
+
encoder_input_ids (`int`):
|
| 509 |
+
The encoder_input_ids that should not be repeated within the decoder ids.
|
| 510 |
+
"""
|
| 511 |
+
|
| 512 |
+
def __init__(self, encoder_ngram_size: int, encoder_input_ids: torch.LongTensor):
|
| 513 |
+
if not isinstance(encoder_ngram_size, int) or encoder_ngram_size <= 0:
|
| 514 |
+
raise ValueError(
|
| 515 |
+
f"`encoder_ngram_size` has to be a strictly positive integer, but is {encoder_ngram_size}"
|
| 516 |
+
)
|
| 517 |
+
self.ngram_size = encoder_ngram_size
|
| 518 |
+
if len(encoder_input_ids.shape) == 1:
|
| 519 |
+
encoder_input_ids = encoder_input_ids.unsqueeze(0)
|
| 520 |
+
self.batch_size = encoder_input_ids.shape[0]
|
| 521 |
+
self.generated_ngrams = _get_ngrams(encoder_ngram_size, encoder_input_ids, self.batch_size)
|
| 522 |
+
|
| 523 |
+
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
|
| 524 |
+
# B x num_beams
|
| 525 |
+
num_hypos = scores.shape[0]
|
| 526 |
+
num_beams = num_hypos // self.batch_size
|
| 527 |
+
cur_len = input_ids.shape[-1]
|
| 528 |
+
banned_batch_tokens = [
|
| 529 |
+
_get_generated_ngrams(
|
| 530 |
+
self.generated_ngrams[hypo_idx // num_beams], input_ids[hypo_idx], self.ngram_size, cur_len
|
| 531 |
+
)
|
| 532 |
+
for hypo_idx in range(num_hypos)
|
| 533 |
+
]
|
| 534 |
+
|
| 535 |
+
for i, banned_tokens in enumerate(banned_batch_tokens):
|
| 536 |
+
scores[i, banned_tokens] = -float("inf")
|
| 537 |
+
|
| 538 |
+
return scores
|
| 539 |
+
|
| 540 |
+
|
| 541 |
+
class NoBadWordsLogitsProcessor(LogitsProcessor):
|
| 542 |
+
"""
|
| 543 |
+
[`LogitsProcessor`] that enforces that specified sequences will never be sampled.
|
| 544 |
+
|
| 545 |
+
Args:
|
| 546 |
+
bad_words_ids (`List[List[int]]`):
|
| 547 |
+
List of list of token ids that are not allowed to be generated. In order to get the token ids of the words
|
| 548 |
+
that should not appear in the generated text, use `tokenizer(bad_words, add_prefix_space=True,
|
| 549 |
+
add_special_tokens=False).input_ids`.
|
| 550 |
+
eos_token_id (`Union[int, List[int]]`):
|
| 551 |
+
The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
|
| 552 |
+
"""
|
| 553 |
+
|
| 554 |
+
def __init__(self, bad_words_ids: List[List[int]], eos_token_id: Union[int, List[int]]):
|
| 555 |
+
if not isinstance(bad_words_ids, List) or len(bad_words_ids) == 0:
|
| 556 |
+
raise ValueError(f"`bad_words_ids` has to be a non-empty list, but is {bad_words_ids}.")
|
| 557 |
+
if any(not isinstance(bad_word_ids, list) for bad_word_ids in bad_words_ids):
|
| 558 |
+
raise ValueError(f"`bad_words_ids` has to be a list of lists, but is {bad_words_ids}.")
|
| 559 |
+
if any(
|
| 560 |
+
any((not isinstance(token_id, (int, np.integer)) or token_id < 0) for token_id in bad_word_ids)
|
| 561 |
+
for bad_word_ids in bad_words_ids
|
| 562 |
+
):
|
| 563 |
+
raise ValueError(
|
| 564 |
+
f"Each list in `bad_words_ids` has to be a list of positive integers, but is {bad_words_ids}."
|
| 565 |
+
)
|
| 566 |
+
|
| 567 |
+
if eos_token_id is None:
|
| 568 |
+
eos_token_id = []
|
| 569 |
+
if isinstance(eos_token_id, int):
|
| 570 |
+
eos_token_id = [eos_token_id]
|
| 571 |
+
|
| 572 |
+
bad_words_ids = list(
|
| 573 |
+
filter(lambda bad_token_seq: all([bad_token_seq != [i] for i in eos_token_id]), bad_words_ids)
|
| 574 |
+
)
|
| 575 |
+
self.bad_words_id_length_1 = []
|
| 576 |
+
self.bad_words_id_length_greater_than_1 = []
|
| 577 |
+
for word in bad_words_ids:
|
| 578 |
+
if len(word) == 1:
|
| 579 |
+
self.bad_words_id_length_1.append(word[0])
|
| 580 |
+
else:
|
| 581 |
+
self.bad_words_id_length_greater_than_1.append(word)
|
| 582 |
+
|
| 583 |
+
self.static_bad_words_mask: Optional[torch.LongTensor] = None
|
| 584 |
+
|
| 585 |
+
for banned_token_seq in self.bad_words_id_length_greater_than_1:
|
| 586 |
+
if len(banned_token_seq) == 0:
|
| 587 |
+
raise ValueError(f"Banned words token sequences {bad_words_ids} cannot have an empty list")
|
| 588 |
+
|
| 589 |
+
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
|
| 590 |
+
if self.static_bad_words_mask is None and len(self.bad_words_id_length_1) > 0:
|
| 591 |
+
self.static_bad_words_mask = self._calc_static_bad_word_mask(scores)
|
| 592 |
+
|
| 593 |
+
dynamic_banned_tokens = self._calc_banned_bad_words_ids(input_ids.tolist())
|
| 594 |
+
scores = self._set_scores_to_inf_for_banned_tokens(scores, dynamic_banned_tokens)
|
| 595 |
+
|
| 596 |
+
return scores
|
| 597 |
+
|
| 598 |
+
def _calc_static_bad_word_mask(self, scores: torch.FloatTensor) -> torch.BoolTensor:
|
| 599 |
+
static_bad_words_mask = torch.zeros(scores.shape[1])
|
| 600 |
+
static_bad_words_mask[self.bad_words_id_length_1] = 1
|
| 601 |
+
return static_bad_words_mask.unsqueeze(0).to(scores.device).bool()
|
| 602 |
+
|
| 603 |
+
def _tokens_match(self, prev_tokens: List[int], tokens: List[int]) -> bool:
|
| 604 |
+
if len(tokens) == 0:
|
| 605 |
+
# if bad word tokens is just one token always ban it
|
| 606 |
+
return True
|
| 607 |
+
elif len(tokens) > len(prev_tokens):
|
| 608 |
+
# if bad word tokens are longer then prev input_ids they can't be equal
|
| 609 |
+
return False
|
| 610 |
+
else:
|
| 611 |
+
return prev_tokens[-len(tokens) :] == tokens
|
| 612 |
+
|
| 613 |
+
def _calc_banned_bad_words_ids(self, prev_input_ids: List[List[int]]) -> Iterable[int]:
|
| 614 |
+
banned_tokens = []
|
| 615 |
+
for prev_input_ids_slice in prev_input_ids:
|
| 616 |
+
banned_tokens_slice = []
|
| 617 |
+
for banned_token_seq in self.bad_words_id_length_greater_than_1:
|
| 618 |
+
if self._tokens_match(prev_input_ids_slice, banned_token_seq[:-1]):
|
| 619 |
+
banned_tokens_slice.append(banned_token_seq[-1])
|
| 620 |
+
|
| 621 |
+
banned_tokens.append(banned_tokens_slice)
|
| 622 |
+
|
| 623 |
+
return banned_tokens
|
| 624 |
+
|
| 625 |
+
def _set_scores_to_inf_for_banned_tokens(
|
| 626 |
+
self, scores: torch.Tensor, banned_tokens: List[List[int]]
|
| 627 |
+
) -> torch.Tensor:
|
| 628 |
+
"""
|
| 629 |
+
Modifies the scores in place by setting the banned token positions to `-inf`. Banned token is expected to be a
|
| 630 |
+
list of list of banned tokens to ban in the format [[batch index, vocabulary position],...
|
| 631 |
+
|
| 632 |
+
Args:
|
| 633 |
+
scores: logits distribution of shape (batch size, vocabulary size)
|
| 634 |
+
banned_tokens: list of list of tokens to ban of length (batch_size)
|
| 635 |
+
"""
|
| 636 |
+
banned_mask_list = []
|
| 637 |
+
for idx, batch_banned_tokens in enumerate(banned_tokens):
|
| 638 |
+
for token in batch_banned_tokens:
|
| 639 |
+
# Eliminates invalid bad word IDs that are over the vocabulary size.
|
| 640 |
+
if token <= scores.shape[1]:
|
| 641 |
+
banned_mask_list.append([idx, token])
|
| 642 |
+
else:
|
| 643 |
+
logger.error(
|
| 644 |
+
f"An invalid bad word ID is defined: {token}. This ID is not contained in the "
|
| 645 |
+
"vocabulary, and is therefore ignored."
|
| 646 |
+
)
|
| 647 |
+
if not banned_mask_list and self.static_bad_words_mask is None:
|
| 648 |
+
return scores
|
| 649 |
+
|
| 650 |
+
else:
|
| 651 |
+
if banned_mask_list:
|
| 652 |
+
banned_mask = torch.LongTensor(banned_mask_list)
|
| 653 |
+
indices = torch.ones(len(banned_mask))
|
| 654 |
+
# A sparse tensor is generated from a list of coordinates: [[0, 1], [0, 2], [2, 0]]. A conversion to dense tensor generates:
|
| 655 |
+
# [ 0 1 1 ]
|
| 656 |
+
# [ 0 0 0 ]
|
| 657 |
+
# [ 1 0 0 ]
|
| 658 |
+
|
| 659 |
+
banned_mask = (
|
| 660 |
+
torch.sparse.LongTensor(banned_mask.t(), indices, scores.size())
|
| 661 |
+
.to(scores.device)
|
| 662 |
+
.to_dense()
|
| 663 |
+
.bool()
|
| 664 |
+
)
|
| 665 |
+
|
| 666 |
+
if self.static_bad_words_mask is not None:
|
| 667 |
+
banned_mask = torch.bitwise_or(banned_mask, self.static_bad_words_mask)
|
| 668 |
+
else:
|
| 669 |
+
banned_mask = self.static_bad_words_mask
|
| 670 |
+
|
| 671 |
+
scores = scores.masked_fill(banned_mask, -float("inf"))
|
| 672 |
+
return scores
|
| 673 |
+
|
| 674 |
+
|
| 675 |
+
class PrefixConstrainedLogitsProcessor(LogitsProcessor):
|
| 676 |
+
r"""
|
| 677 |
+
[`LogitsProcessor`] that enforces constrained generation and is useful for prefix-conditioned constrained
|
| 678 |
+
generation. See [Autoregressive Entity Retrieval](https://arxiv.org/abs/2010.00904) for more information.
|
| 679 |
+
|
| 680 |
+
Args:
|
| 681 |
+
prefix_allowed_tokens_fn: (`Callable[[int, torch.Tensor], List[int]]`):
|
| 682 |
+
This function constraints the beam search to allowed tokens only at each step. This function takes 2
|
| 683 |
+
arguments `inputs_ids` and the batch ID `batch_id`. It has to return a list with the allowed tokens for the
|
| 684 |
+
next generation step conditioned on the previously generated tokens `inputs_ids` and the batch ID
|
| 685 |
+
`batch_id`.
|
| 686 |
+
"""
|
| 687 |
+
|
| 688 |
+
def __init__(self, prefix_allowed_tokens_fn: Callable[[int, torch.Tensor], List[int]], num_beams: int):
|
| 689 |
+
self._prefix_allowed_tokens_fn = prefix_allowed_tokens_fn
|
| 690 |
+
self._num_beams = num_beams
|
| 691 |
+
|
| 692 |
+
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
|
| 693 |
+
mask = torch.full_like(scores, -math.inf)
|
| 694 |
+
for batch_id, beam_sent in enumerate(input_ids.view(-1, self._num_beams, input_ids.shape[-1])):
|
| 695 |
+
for beam_id, sent in enumerate(beam_sent):
|
| 696 |
+
mask[batch_id * self._num_beams + beam_id, self._prefix_allowed_tokens_fn(batch_id, sent)] = 0
|
| 697 |
+
|
| 698 |
+
return scores + mask
|
| 699 |
+
|
| 700 |
+
|
| 701 |
+
class HammingDiversityLogitsProcessor(LogitsProcessor):
|
| 702 |
+
r"""
|
| 703 |
+
[`LogitsProcessor`] that enforces diverse beam search. Note that this logits processor is only effective for
|
| 704 |
+
[`PreTrainedModel.group_beam_search`]. See [Diverse Beam Search: Decoding Diverse Solutions from Neural Sequence
|
| 705 |
+
Models](https://arxiv.org/pdf/1610.02424.pdf) for more details.
|
| 706 |
+
|
| 707 |
+
Args:
|
| 708 |
+
diversity_penalty (`float`):
|
| 709 |
+
This value is subtracted from a beam's score if it generates a token same as any beam from other group at a
|
| 710 |
+
particular time. Note that `diversity_penalty` is only effective if `group beam search` is enabled.
|
| 711 |
+
num_beams (`int`):
|
| 712 |
+
Number of beams used for group beam search. See [this paper](https://arxiv.org/pdf/1610.02424.pdf) for more
|
| 713 |
+
details.
|
| 714 |
+
num_beam_groups (`int`):
|
| 715 |
+
Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams.
|
| 716 |
+
See [this paper](https://arxiv.org/pdf/1610.02424.pdf) for more details.
|
| 717 |
+
"""
|
| 718 |
+
|
| 719 |
+
def __init__(self, diversity_penalty: float, num_beams: int, num_beam_groups: int):
|
| 720 |
+
if not isinstance(diversity_penalty, float) or (not diversity_penalty > 0.0):
|
| 721 |
+
raise ValueError("`diversity_penalty` should be a float strictly larger than 0.")
|
| 722 |
+
self._diversity_penalty = diversity_penalty
|
| 723 |
+
if not isinstance(num_beams, int) or num_beams < 2:
|
| 724 |
+
raise ValueError("`num_beams` should be an integer strictly larger than 1.")
|
| 725 |
+
self._num_beams = num_beams
|
| 726 |
+
if not isinstance(num_beam_groups, int) or num_beam_groups < 2:
|
| 727 |
+
raise ValueError("`num_beam_groups` should be an integer strictly larger than 1.")
|
| 728 |
+
if num_beam_groups > num_beams:
|
| 729 |
+
raise ValueError("`beam_groups` has to be smaller or equal to `num_beams`.")
|
| 730 |
+
self._num_sub_beams = num_beams // num_beam_groups
|
| 731 |
+
|
| 732 |
+
def __call__(
|
| 733 |
+
self,
|
| 734 |
+
input_ids: torch.LongTensor,
|
| 735 |
+
scores: torch.FloatTensor,
|
| 736 |
+
current_tokens: torch.LongTensor,
|
| 737 |
+
beam_group_idx: int,
|
| 738 |
+
) -> torch.FloatTensor:
|
| 739 |
+
# hamming diversity: penalise using same token in current group which was used in previous groups at
|
| 740 |
+
# the same time step
|
| 741 |
+
batch_size = current_tokens.shape[0] // self._num_beams
|
| 742 |
+
group_start_idx = beam_group_idx * self._num_sub_beams
|
| 743 |
+
group_end_idx = min(group_start_idx + self._num_sub_beams, self._num_beams)
|
| 744 |
+
group_size = group_end_idx - group_start_idx
|
| 745 |
+
vocab_size = scores.shape[-1]
|
| 746 |
+
|
| 747 |
+
if group_start_idx == 0:
|
| 748 |
+
return scores
|
| 749 |
+
|
| 750 |
+
for batch_idx in range(batch_size):
|
| 751 |
+
# predicted tokens of last time step of previous groups
|
| 752 |
+
previous_group_tokens = current_tokens[
|
| 753 |
+
batch_idx * self._num_beams : batch_idx * self._num_beams + group_start_idx
|
| 754 |
+
]
|
| 755 |
+
token_frequency = torch.bincount(previous_group_tokens, minlength=vocab_size).to(scores.device)
|
| 756 |
+
scores[batch_idx * group_size : (batch_idx + 1) * group_size] -= self._diversity_penalty * token_frequency
|
| 757 |
+
|
| 758 |
+
return scores
|
| 759 |
+
|
| 760 |
+
|
| 761 |
+
class ForcedBOSTokenLogitsProcessor(LogitsProcessor):
|
| 762 |
+
r"""
|
| 763 |
+
[`LogitsProcessor`] that enforces the specified token as the first generated token.
|
| 764 |
+
|
| 765 |
+
Args:
|
| 766 |
+
bos_token_id (`int`):
|
| 767 |
+
The id of the token to force as the first generated token.
|
| 768 |
+
"""
|
| 769 |
+
|
| 770 |
+
def __init__(self, bos_token_id: int):
|
| 771 |
+
self.bos_token_id = bos_token_id
|
| 772 |
+
|
| 773 |
+
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
|
| 774 |
+
cur_len = input_ids.shape[-1]
|
| 775 |
+
if cur_len == 1:
|
| 776 |
+
num_tokens = scores.shape[1]
|
| 777 |
+
scores[:, [i for i in range(num_tokens) if i != self.bos_token_id]] = -float("inf")
|
| 778 |
+
scores[:, self.bos_token_id] = 0
|
| 779 |
+
return scores
|
| 780 |
+
|
| 781 |
+
|
| 782 |
+
class ForcedEOSTokenLogitsProcessor(LogitsProcessor):
|
| 783 |
+
r"""
|
| 784 |
+
[`LogitsProcessor`] that enforces the specified token as the last generated token when `max_length` is reached.
|
| 785 |
+
|
| 786 |
+
Args:
|
| 787 |
+
max_length (`int`):
|
| 788 |
+
The maximum length of the sequence to be generated.
|
| 789 |
+
eos_token_id (`Union[int, List[int]]`):
|
| 790 |
+
The id of the token to force as the last generated token when `max_length` is reached. Optionally, use a
|
| 791 |
+
list to set multiple *end-of-sequence* tokens.
|
| 792 |
+
"""
|
| 793 |
+
|
| 794 |
+
def __init__(self, max_length: int, eos_token_id: Union[int, List[int]]):
|
| 795 |
+
self.max_length = max_length
|
| 796 |
+
if isinstance(eos_token_id, int):
|
| 797 |
+
eos_token_id = [eos_token_id]
|
| 798 |
+
self.eos_token_id = eos_token_id
|
| 799 |
+
|
| 800 |
+
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
|
| 801 |
+
cur_len = input_ids.shape[-1]
|
| 802 |
+
if cur_len == self.max_length - 1:
|
| 803 |
+
num_tokens = scores.shape[1]
|
| 804 |
+
scores[:, [i for i in range(num_tokens) if i not in self.eos_token_id]] = -float("inf")
|
| 805 |
+
for i in self.eos_token_id:
|
| 806 |
+
scores[:, i] = 0
|
| 807 |
+
return scores
|
| 808 |
+
|
| 809 |
+
|
| 810 |
+
class InfNanRemoveLogitsProcessor(LogitsProcessor):
|
| 811 |
+
r"""
|
| 812 |
+
[`LogitsProcessor`] that removes all `nan` and `inf` values to avoid the generation method to fail. Note that using
|
| 813 |
+
the logits processor should only be used if necessary since it can slow down the generation method.
|
| 814 |
+
"""
|
| 815 |
+
|
| 816 |
+
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
|
| 817 |
+
# set all nan values to 0.0
|
| 818 |
+
scores[scores != scores] = 0.0
|
| 819 |
+
|
| 820 |
+
# set all inf values to max possible value
|
| 821 |
+
scores[scores == float("inf")] = torch.finfo(scores.dtype).max
|
| 822 |
+
|
| 823 |
+
return scores
|
| 824 |
+
|
| 825 |
+
|
| 826 |
+
class ExponentialDecayLengthPenalty(LogitsProcessor):
|
| 827 |
+
r"""
|
| 828 |
+
[`LogitsProcessor`] that exponentially increases the score of the eos_token_id after regulation_start has been
|
| 829 |
+
reached.
|
| 830 |
+
|
| 831 |
+
Args:
|
| 832 |
+
exponential_decay_length_penalty (`tuple(int, float)`):
|
| 833 |
+
This tuple shall consist of: `(start_index, decay_factor)` where `start_index` indicates where penalty
|
| 834 |
+
starts and `decay_factor` represents the factor of exponential decay
|
| 835 |
+
eos_token_id (`Union[int, List[int]]`):
|
| 836 |
+
The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
|
| 837 |
+
input_ids_seq_length (`int`):
|
| 838 |
+
The length of the input sequence.
|
| 839 |
+
"""
|
| 840 |
+
|
| 841 |
+
def __init__(
|
| 842 |
+
self,
|
| 843 |
+
exponential_decay_length_penalty: Tuple[int, float],
|
| 844 |
+
eos_token_id: Union[int, List[int]],
|
| 845 |
+
input_ids_seq_length: int,
|
| 846 |
+
):
|
| 847 |
+
self.regulation_start = exponential_decay_length_penalty[0] + input_ids_seq_length
|
| 848 |
+
self.regulation_factor = exponential_decay_length_penalty[1]
|
| 849 |
+
if isinstance(eos_token_id, int):
|
| 850 |
+
eos_token_id = [eos_token_id]
|
| 851 |
+
self.eos_token_id = eos_token_id
|
| 852 |
+
|
| 853 |
+
def __call__(self, input_ids: torch.Tensor, scores: torch.Tensor) -> torch.FloatTensor:
|
| 854 |
+
cur_len = input_ids.shape[-1]
|
| 855 |
+
if cur_len > self.regulation_start:
|
| 856 |
+
for i in self.eos_token_id:
|
| 857 |
+
scores[:, i] = scores[:, i] * pow(self.regulation_factor, cur_len - self.regulation_start)
|
| 858 |
+
return scores
|
| 859 |
+
|
| 860 |
+
|
| 861 |
+
class LogitNormalization(LogitsProcessor, LogitsWarper):
|
| 862 |
+
r"""
|
| 863 |
+
[`LogitsWarper`] and [`LogitsProcessor`] for normalizing the scores using log-softmax. It's important to normalize
|
| 864 |
+
the scores during beam search, after applying the logits processors or warpers, since the search algorithm used in
|
| 865 |
+
this library doesn't do it (it only does it before, but they may need re-normalization) but it still supposes that
|
| 866 |
+
the scores are normalized when comparing the hypotheses.
|
| 867 |
+
"""
|
| 868 |
+
|
| 869 |
+
def __call__(self, input_ids: torch.Tensor, scores: torch.Tensor) -> torch.Tensor:
|
| 870 |
+
scores = scores.log_softmax(dim=-1)
|
| 871 |
+
return scores
|
| 872 |
+
|
| 873 |
+
|
| 874 |
+
class SuppressTokensAtBeginLogitsProcessor(LogitsProcessor):
|
| 875 |
+
r"""
|
| 876 |
+
[`SuppressTokensAtBeginLogitsProcessor`] supresses a list of tokens as soon as the `generate` function starts
|
| 877 |
+
generating using `begin_index` tokens. This should ensure that the tokens defined by `begin_suppress_tokens` at not
|
| 878 |
+
sampled at the begining of the generation.
|
| 879 |
+
"""
|
| 880 |
+
|
| 881 |
+
def __init__(self, begin_suppress_tokens, begin_index):
|
| 882 |
+
self.begin_suppress_tokens = list(begin_suppress_tokens)
|
| 883 |
+
self.begin_index = begin_index
|
| 884 |
+
|
| 885 |
+
def __call__(self, input_ids, scores):
|
| 886 |
+
if input_ids.shape[1] == self.begin_index:
|
| 887 |
+
scores[:, self.begin_suppress_tokens] = -float("inf")
|
| 888 |
+
|
| 889 |
+
return scores
|
| 890 |
+
|
| 891 |
+
|
| 892 |
+
class SuppressTokensLogitsProcessor(LogitsProcessor):
|
| 893 |
+
r"""This processor can be used to suppress a list of tokens. The processor will set their log probs to `-inf` so that they
|
| 894 |
+
are not sampled."""
|
| 895 |
+
|
| 896 |
+
def __init__(self, suppress_tokens):
|
| 897 |
+
self.suppress_tokens = list(suppress_tokens)
|
| 898 |
+
|
| 899 |
+
def __call__(self, input_ids, scores):
|
| 900 |
+
scores[:, self.suppress_tokens] = -float("inf")
|
| 901 |
+
return scores
|
| 902 |
+
|
| 903 |
+
|
| 904 |
+
class ForceTokensLogitsProcessor(LogitsProcessor):
|
| 905 |
+
r"""This processor takes a list of pairs of integers which indicates a mapping from generation indices to token
|
| 906 |
+
indices that will be forced before sampling. The processor will set their log probs to `inf` so that they are
|
| 907 |
+
sampled at their corresponding index."""
|
| 908 |
+
|
| 909 |
+
def __init__(self, force_token_map: List[List[int]]):
|
| 910 |
+
self.force_token_map = dict(force_token_map)
|
| 911 |
+
|
| 912 |
+
def __call__(self, input_ids, scores):
|
| 913 |
+
generation_idx = input_ids.shape[-1]
|
| 914 |
+
current_token = self.force_token_map.get(generation_idx, None)
|
| 915 |
+
if current_token is not None:
|
| 916 |
+
scores[:, :] = -float("inf")
|
| 917 |
+
scores[:, current_token] = 0
|
| 918 |
+
return scores
|
| 919 |
+
|
| 920 |
+
|
| 921 |
+
class WhisperTimeStampLogitsProcessor(LogitsProcessor):
|
| 922 |
+
r"""
|
| 923 |
+
Whisper specific Processor. This processor can be used to force a list of tokens. The processor will set their log
|
| 924 |
+
probs to `inf` so that they are sampled at their corresponding index.
|
| 925 |
+
|
| 926 |
+
Args:
|
| 927 |
+
generate_config (`GenerateConfig`):
|
| 928 |
+
The generate config used to generate the output. The following parameters are required:
|
| 929 |
+
eos_token_id (`int`, *optional*, defaults to 50257):
|
| 930 |
+
The id of the *end-of-sequence* token.
|
| 931 |
+
no_timestamps_token_id (`int`, *optional*, defaults to 50363):
|
| 932 |
+
The id of the `"<|notimestamps|>"` token.
|
| 933 |
+
max_initial_timestamp_index (`int`, *optional*, defaults to 1):
|
| 934 |
+
Used to set the maximum value of the initial timestamp. This is used to prevent the model from
|
| 935 |
+
predicting timestamps that are too far in the future.
|
| 936 |
+
"""
|
| 937 |
+
|
| 938 |
+
def __init__(self, generate_config): # support for the kwargs
|
| 939 |
+
self.eos_token_id = generate_config.eos_token_id
|
| 940 |
+
self.no_timestamps_token_id = generate_config.no_timestamps_token_id
|
| 941 |
+
self.timestamp_begin = generate_config.no_timestamps_token_id + 1
|
| 942 |
+
|
| 943 |
+
self.begin_index = len(generate_config.forced_decoder_ids) + 2
|
| 944 |
+
if generate_config.forced_decoder_ids[-1][1] == self.no_timestamps_token_id:
|
| 945 |
+
self.begin_index -= 1
|
| 946 |
+
self.max_initial_timestamp_index = generate_config.max_initial_timestamp_index
|
| 947 |
+
|
| 948 |
+
def __call__(self, input_ids, scores):
|
| 949 |
+
# suppress <|notimestamps|> which is handled by without_timestamps
|
| 950 |
+
scores[:, self.no_timestamps_token_id] = -float("inf")
|
| 951 |
+
|
| 952 |
+
if input_ids.shape[1] == self.begin_index - 1:
|
| 953 |
+
scores[:, :] = -float("inf")
|
| 954 |
+
scores[:, self.timestamp_begin] = 0
|
| 955 |
+
return scores
|
| 956 |
+
|
| 957 |
+
# timestamps have to appear in pairs, except directly before eos_token; mask logits accordingly
|
| 958 |
+
for k in range(input_ids.shape[0]):
|
| 959 |
+
seq = list(input_ids[k, self.begin_index :].tolist())
|
| 960 |
+
last_was_timestamp = len(seq) >= 1 and seq[-1] >= self.timestamp_begin
|
| 961 |
+
penultimate_was_timestamp = len(seq) < 2 or seq[-2] >= self.timestamp_begin
|
| 962 |
+
|
| 963 |
+
if last_was_timestamp:
|
| 964 |
+
if penultimate_was_timestamp: # has to be non-timestamp
|
| 965 |
+
scores[k, self.timestamp_begin :] = -float("inf")
|
| 966 |
+
else: # cannot be normal text tokens
|
| 967 |
+
scores[k, : self.eos_token_id] = -float("inf")
|
| 968 |
+
|
| 969 |
+
# apply the `max_initial_timestamp` option
|
| 970 |
+
if input_ids.shape[1] == self.begin_index and self.max_initial_timestamp_index is not None:
|
| 971 |
+
last_allowed = self.timestamp_begin + self.max_initial_timestamp_index
|
| 972 |
+
scores[:, last_allowed + 1 :] = -float("inf")
|
| 973 |
+
|
| 974 |
+
# if sum of probability over timestamps is above any other token, sample timestamp
|
| 975 |
+
logprobs = torch.nn.functional.log_softmax(scores.float(), dim=-1)
|
| 976 |
+
for k in range(input_ids.shape[0]):
|
| 977 |
+
timestamp_logprob = logprobs[k, self.timestamp_begin :].logsumexp(dim=-1)
|
| 978 |
+
max_text_token_logprob = logprobs[k, : self.timestamp_begin].max()
|
| 979 |
+
if timestamp_logprob > max_text_token_logprob:
|
| 980 |
+
scores[k, : self.timestamp_begin] = -float("inf")
|
| 981 |
+
|
| 982 |
+
return scores
|
valley/lib/python3.10/site-packages/transformers/generation/stopping_criteria.py
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import time
|
| 2 |
+
import warnings
|
| 3 |
+
from abc import ABC
|
| 4 |
+
from copy import deepcopy
|
| 5 |
+
from typing import Optional
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
|
| 9 |
+
from ..utils import add_start_docstrings
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
STOPPING_CRITERIA_INPUTS_DOCSTRING = r"""
|
| 13 |
+
Args:
|
| 14 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
| 15 |
+
Indices of input sequence tokens in the vocabulary.
|
| 16 |
+
|
| 17 |
+
Indices can be obtained using [`BertTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 18 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 19 |
+
|
| 20 |
+
[What are input IDs?](../glossary#input-ids)
|
| 21 |
+
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
|
| 22 |
+
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
|
| 23 |
+
or scores for each vocabulary token after SoftMax.
|
| 24 |
+
kwargs:
|
| 25 |
+
Additional stopping criteria specific kwargs.
|
| 26 |
+
|
| 27 |
+
Return:
|
| 28 |
+
`bool`. `False` indicates we should continue, `True` indicates we should stop.
|
| 29 |
+
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class StoppingCriteria(ABC):
|
| 34 |
+
"""Abstract base class for all stopping criteria that can be applied during generation."""
|
| 35 |
+
|
| 36 |
+
@add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING)
|
| 37 |
+
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
|
| 38 |
+
raise NotImplementedError("StoppingCriteria needs to be subclassed")
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class MaxLengthCriteria(StoppingCriteria):
|
| 42 |
+
"""
|
| 43 |
+
This class can be used to stop generation whenever the full generated number of tokens exceeds `max_length`. Keep
|
| 44 |
+
in mind for decoder-only type of transformers, this will include the initial prompted tokens.
|
| 45 |
+
|
| 46 |
+
Args:
|
| 47 |
+
max_length (`int`):
|
| 48 |
+
The maximum length that the output sequence can have in number of tokens.
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
def __init__(self, max_length: int):
|
| 52 |
+
self.max_length = max_length
|
| 53 |
+
|
| 54 |
+
@add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING)
|
| 55 |
+
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
|
| 56 |
+
return input_ids.shape[-1] >= self.max_length
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
class MaxNewTokensCriteria(StoppingCriteria):
|
| 60 |
+
"""
|
| 61 |
+
This class can be used to stop generation whenever the generated number of tokens exceeds `max_new_tokens`. Keep in
|
| 62 |
+
mind for decoder-only type of transformers, this will **not** include the initial prompted tokens. This is very
|
| 63 |
+
close to `MaxLengthCriteria` but ignores the number of initial tokens.
|
| 64 |
+
|
| 65 |
+
Args:
|
| 66 |
+
start_length (`int`):
|
| 67 |
+
The number of initial tokens.
|
| 68 |
+
max_new_tokens (`int`):
|
| 69 |
+
The maximum number of tokens to generate.
|
| 70 |
+
"""
|
| 71 |
+
|
| 72 |
+
def __init__(self, start_length: int, max_new_tokens: int):
|
| 73 |
+
warnings.warn(
|
| 74 |
+
"The class `MaxNewTokensCriteria` is deprecated. "
|
| 75 |
+
f"Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` "
|
| 76 |
+
"with `max_length = start_length + max_new_tokens` instead.",
|
| 77 |
+
FutureWarning,
|
| 78 |
+
)
|
| 79 |
+
self.start_length = start_length
|
| 80 |
+
self.max_new_tokens = max_new_tokens
|
| 81 |
+
self.max_length = start_length + max_new_tokens
|
| 82 |
+
|
| 83 |
+
@add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING)
|
| 84 |
+
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
|
| 85 |
+
return input_ids.shape[-1] >= self.max_length
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
class MaxTimeCriteria(StoppingCriteria):
|
| 89 |
+
"""
|
| 90 |
+
This class can be used to stop generation whenever the full generation exceeds some amount of time. By default, the
|
| 91 |
+
time will start being counted when you initialize this function. You can override this by passing an
|
| 92 |
+
`initial_time`.
|
| 93 |
+
|
| 94 |
+
Args:
|
| 95 |
+
max_time (`float`):
|
| 96 |
+
The maximum allowed time in seconds for the generation.
|
| 97 |
+
initial_time (`float`, *optional*, defaults to `time.time()`):
|
| 98 |
+
The start of the generation allowed time.
|
| 99 |
+
"""
|
| 100 |
+
|
| 101 |
+
def __init__(self, max_time: float, initial_timestamp: Optional[float] = None):
|
| 102 |
+
self.max_time = max_time
|
| 103 |
+
self.initial_timestamp = time.time() if initial_timestamp is None else initial_timestamp
|
| 104 |
+
|
| 105 |
+
@add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING)
|
| 106 |
+
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
|
| 107 |
+
return time.time() - self.initial_timestamp > self.max_time
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
class StoppingCriteriaList(list):
|
| 111 |
+
@add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING)
|
| 112 |
+
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
|
| 113 |
+
return any(criteria(input_ids, scores) for criteria in self)
|
| 114 |
+
|
| 115 |
+
@property
|
| 116 |
+
def max_length(self) -> Optional[int]:
|
| 117 |
+
for stopping_criterium in self:
|
| 118 |
+
if isinstance(stopping_criterium, MaxLengthCriteria):
|
| 119 |
+
return stopping_criterium.max_length
|
| 120 |
+
elif isinstance(stopping_criterium, MaxNewTokensCriteria):
|
| 121 |
+
return stopping_criterium.max_length
|
| 122 |
+
return None
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def validate_stopping_criteria(stopping_criteria: StoppingCriteriaList, max_length: int) -> StoppingCriteriaList:
|
| 126 |
+
stopping_max_length = stopping_criteria.max_length
|
| 127 |
+
new_stopping_criteria = deepcopy(stopping_criteria)
|
| 128 |
+
if stopping_max_length is not None and stopping_max_length != max_length:
|
| 129 |
+
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter", UserWarning)
|
| 130 |
+
elif stopping_max_length is None:
|
| 131 |
+
new_stopping_criteria.append(MaxLengthCriteria(max_length=max_length))
|
| 132 |
+
return new_stopping_criteria
|
valley/lib/python3.10/site-packages/transformers/onnx/__main__.py
ADDED
|
@@ -0,0 +1,240 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2021 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
import subprocess
|
| 15 |
+
import sys
|
| 16 |
+
import warnings
|
| 17 |
+
from argparse import ArgumentParser
|
| 18 |
+
from pathlib import Path
|
| 19 |
+
|
| 20 |
+
from packaging import version
|
| 21 |
+
|
| 22 |
+
from .. import AutoFeatureExtractor, AutoProcessor, AutoTokenizer
|
| 23 |
+
from ..utils import logging
|
| 24 |
+
from ..utils.import_utils import is_optimum_available
|
| 25 |
+
from .convert import export, validate_model_outputs
|
| 26 |
+
from .features import FeaturesManager
|
| 27 |
+
from .utils import get_preprocessor
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
MIN_OPTIMUM_VERSION = "1.5.0"
|
| 31 |
+
|
| 32 |
+
ENCODER_DECODER_MODELS = ["vision-encoder-decoder"]
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def export_with_optimum(args):
|
| 36 |
+
if is_optimum_available():
|
| 37 |
+
from optimum.version import __version__ as optimum_version
|
| 38 |
+
|
| 39 |
+
parsed_optimum_version = version.parse(optimum_version)
|
| 40 |
+
if parsed_optimum_version < version.parse(MIN_OPTIMUM_VERSION):
|
| 41 |
+
raise RuntimeError(
|
| 42 |
+
f"transformers.onnx requires optimum >= {MIN_OPTIMUM_VERSION} but {optimum_version} is installed. You "
|
| 43 |
+
"can upgrade optimum by running: pip install -U optimum[exporters]"
|
| 44 |
+
)
|
| 45 |
+
else:
|
| 46 |
+
raise RuntimeError(
|
| 47 |
+
"transformers.onnx requires optimum to run, you can install the library by running: pip install "
|
| 48 |
+
"optimum[exporters]"
|
| 49 |
+
)
|
| 50 |
+
cmd_line = [
|
| 51 |
+
sys.executable,
|
| 52 |
+
"-m",
|
| 53 |
+
"optimum.exporters.onnx",
|
| 54 |
+
f"--model {args.model}",
|
| 55 |
+
f"--task {args.feature}",
|
| 56 |
+
f"--framework {args.framework}" if args.framework is not None else "",
|
| 57 |
+
f"{args.output}",
|
| 58 |
+
]
|
| 59 |
+
proc = subprocess.Popen(" ".join(cmd_line), stdout=subprocess.PIPE, shell=True)
|
| 60 |
+
proc.wait()
|
| 61 |
+
|
| 62 |
+
logger.info(
|
| 63 |
+
"The export was done by optimum.exporters.onnx. We recommend using to use this package directly in future, as "
|
| 64 |
+
"transformers.onnx is deprecated, and will be removed in v5. You can find more information here: "
|
| 65 |
+
"https://huggingface.co/docs/optimum/exporters/onnx/usage_guides/export_a_model."
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def export_with_transformers(args):
|
| 70 |
+
args.output = args.output if args.output.is_file() else args.output.joinpath("model.onnx")
|
| 71 |
+
if not args.output.parent.exists():
|
| 72 |
+
args.output.parent.mkdir(parents=True)
|
| 73 |
+
|
| 74 |
+
# Allocate the model
|
| 75 |
+
model = FeaturesManager.get_model_from_feature(
|
| 76 |
+
args.feature, args.model, framework=args.framework, cache_dir=args.cache_dir
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
model_kind, model_onnx_config = FeaturesManager.check_supported_model_or_raise(model, feature=args.feature)
|
| 80 |
+
onnx_config = model_onnx_config(model.config)
|
| 81 |
+
|
| 82 |
+
if model_kind in ENCODER_DECODER_MODELS:
|
| 83 |
+
encoder_model = model.get_encoder()
|
| 84 |
+
decoder_model = model.get_decoder()
|
| 85 |
+
|
| 86 |
+
encoder_onnx_config = onnx_config.get_encoder_config(encoder_model.config)
|
| 87 |
+
decoder_onnx_config = onnx_config.get_decoder_config(
|
| 88 |
+
encoder_model.config, decoder_model.config, feature=args.feature
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
if args.opset is None:
|
| 92 |
+
args.opset = max(encoder_onnx_config.default_onnx_opset, decoder_onnx_config.default_onnx_opset)
|
| 93 |
+
|
| 94 |
+
if args.opset < min(encoder_onnx_config.default_onnx_opset, decoder_onnx_config.default_onnx_opset):
|
| 95 |
+
raise ValueError(
|
| 96 |
+
f"Opset {args.opset} is not sufficient to export {model_kind}. At least "
|
| 97 |
+
f" {min(encoder_onnx_config.default_onnx_opset, decoder_onnx_config.default_onnx_opset)} is required."
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
preprocessor = AutoFeatureExtractor.from_pretrained(args.model)
|
| 101 |
+
|
| 102 |
+
onnx_inputs, onnx_outputs = export(
|
| 103 |
+
preprocessor,
|
| 104 |
+
encoder_model,
|
| 105 |
+
encoder_onnx_config,
|
| 106 |
+
args.opset,
|
| 107 |
+
args.output.parent.joinpath("encoder_model.onnx"),
|
| 108 |
+
)
|
| 109 |
+
|
| 110 |
+
validate_model_outputs(
|
| 111 |
+
encoder_onnx_config,
|
| 112 |
+
preprocessor,
|
| 113 |
+
encoder_model,
|
| 114 |
+
args.output.parent.joinpath("encoder_model.onnx"),
|
| 115 |
+
onnx_outputs,
|
| 116 |
+
args.atol if args.atol else encoder_onnx_config.atol_for_validation,
|
| 117 |
+
)
|
| 118 |
+
|
| 119 |
+
preprocessor = AutoTokenizer.from_pretrained(args.model)
|
| 120 |
+
|
| 121 |
+
onnx_inputs, onnx_outputs = export(
|
| 122 |
+
preprocessor,
|
| 123 |
+
decoder_model,
|
| 124 |
+
decoder_onnx_config,
|
| 125 |
+
args.opset,
|
| 126 |
+
args.output.parent.joinpath("decoder_model.onnx"),
|
| 127 |
+
)
|
| 128 |
+
|
| 129 |
+
validate_model_outputs(
|
| 130 |
+
decoder_onnx_config,
|
| 131 |
+
preprocessor,
|
| 132 |
+
decoder_model,
|
| 133 |
+
args.output.parent.joinpath("decoder_model.onnx"),
|
| 134 |
+
onnx_outputs,
|
| 135 |
+
args.atol if args.atol else decoder_onnx_config.atol_for_validation,
|
| 136 |
+
)
|
| 137 |
+
logger.info(
|
| 138 |
+
f"All good, model saved at: {args.output.parent.joinpath('encoder_model.onnx').as_posix()},"
|
| 139 |
+
f" {args.output.parent.joinpath('decoder_model.onnx').as_posix()}"
|
| 140 |
+
)
|
| 141 |
+
|
| 142 |
+
else:
|
| 143 |
+
# Instantiate the appropriate preprocessor
|
| 144 |
+
if args.preprocessor == "auto":
|
| 145 |
+
preprocessor = get_preprocessor(args.model)
|
| 146 |
+
elif args.preprocessor == "tokenizer":
|
| 147 |
+
preprocessor = AutoTokenizer.from_pretrained(args.model)
|
| 148 |
+
elif args.preprocessor == "feature_extractor":
|
| 149 |
+
preprocessor = AutoFeatureExtractor.from_pretrained(args.model)
|
| 150 |
+
elif args.preprocessor == "processor":
|
| 151 |
+
preprocessor = AutoProcessor.from_pretrained(args.model)
|
| 152 |
+
else:
|
| 153 |
+
raise ValueError(f"Unknown preprocessor type '{args.preprocessor}'")
|
| 154 |
+
|
| 155 |
+
# Ensure the requested opset is sufficient
|
| 156 |
+
if args.opset is None:
|
| 157 |
+
args.opset = onnx_config.default_onnx_opset
|
| 158 |
+
|
| 159 |
+
if args.opset < onnx_config.default_onnx_opset:
|
| 160 |
+
raise ValueError(
|
| 161 |
+
f"Opset {args.opset} is not sufficient to export {model_kind}. "
|
| 162 |
+
f"At least {onnx_config.default_onnx_opset} is required."
|
| 163 |
+
)
|
| 164 |
+
|
| 165 |
+
onnx_inputs, onnx_outputs = export(
|
| 166 |
+
preprocessor,
|
| 167 |
+
model,
|
| 168 |
+
onnx_config,
|
| 169 |
+
args.opset,
|
| 170 |
+
args.output,
|
| 171 |
+
)
|
| 172 |
+
|
| 173 |
+
if args.atol is None:
|
| 174 |
+
args.atol = onnx_config.atol_for_validation
|
| 175 |
+
|
| 176 |
+
validate_model_outputs(onnx_config, preprocessor, model, args.output, onnx_outputs, args.atol)
|
| 177 |
+
logger.info(f"All good, model saved at: {args.output.as_posix()}")
|
| 178 |
+
warnings.warn(
|
| 179 |
+
"The export was done by transformers.onnx which is deprecated and will be removed in v5. We recommend"
|
| 180 |
+
" using optimum.exporters.onnx in future. You can find more information here:"
|
| 181 |
+
" https://huggingface.co/docs/optimum/exporters/onnx/usage_guides/export_a_model.",
|
| 182 |
+
FutureWarning,
|
| 183 |
+
)
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
def main():
|
| 187 |
+
parser = ArgumentParser("Hugging Face Transformers ONNX exporter")
|
| 188 |
+
parser.add_argument(
|
| 189 |
+
"-m", "--model", type=str, required=True, help="Model ID on huggingface.co or path on disk to load model from."
|
| 190 |
+
)
|
| 191 |
+
parser.add_argument(
|
| 192 |
+
"--feature",
|
| 193 |
+
default="default",
|
| 194 |
+
help="The type of features to export the model with.",
|
| 195 |
+
)
|
| 196 |
+
parser.add_argument("--opset", type=int, default=None, help="ONNX opset version to export the model with.")
|
| 197 |
+
parser.add_argument(
|
| 198 |
+
"--atol", type=float, default=None, help="Absolute difference tolerance when validating the model."
|
| 199 |
+
)
|
| 200 |
+
parser.add_argument(
|
| 201 |
+
"--framework",
|
| 202 |
+
type=str,
|
| 203 |
+
choices=["pt", "tf"],
|
| 204 |
+
default=None,
|
| 205 |
+
help=(
|
| 206 |
+
"The framework to use for the ONNX export."
|
| 207 |
+
" If not provided, will attempt to use the local checkpoint's original framework"
|
| 208 |
+
" or what is available in the environment."
|
| 209 |
+
),
|
| 210 |
+
)
|
| 211 |
+
parser.add_argument("output", type=Path, help="Path indicating where to store generated ONNX model.")
|
| 212 |
+
parser.add_argument("--cache_dir", type=str, default=None, help="Path indicating where to store cache.")
|
| 213 |
+
parser.add_argument(
|
| 214 |
+
"--preprocessor",
|
| 215 |
+
type=str,
|
| 216 |
+
choices=["auto", "tokenizer", "feature_extractor", "processor"],
|
| 217 |
+
default="auto",
|
| 218 |
+
help="Which type of preprocessor to use. 'auto' tries to automatically detect it.",
|
| 219 |
+
)
|
| 220 |
+
parser.add_argument(
|
| 221 |
+
"--export_with_transformers",
|
| 222 |
+
action="store_true",
|
| 223 |
+
help=(
|
| 224 |
+
"Whether to use transformers.onnx instead of optimum.exporters.onnx to perform the ONNX export. It can be "
|
| 225 |
+
"useful when exporting a model supported in transformers but not in optimum, otherwise it is not "
|
| 226 |
+
"recommended."
|
| 227 |
+
),
|
| 228 |
+
)
|
| 229 |
+
|
| 230 |
+
args = parser.parse_args()
|
| 231 |
+
if args.export_with_transformers or not is_optimum_available():
|
| 232 |
+
export_with_transformers(args)
|
| 233 |
+
else:
|
| 234 |
+
export_with_optimum(args)
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
if __name__ == "__main__":
|
| 238 |
+
logger = logging.get_logger("transformers.onnx") # pylint: disable=invalid-name
|
| 239 |
+
logger.setLevel(logging.INFO)
|
| 240 |
+
main()
|
valley/lib/python3.10/site-packages/transformers/onnx/convert.py
ADDED
|
@@ -0,0 +1,494 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2021 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import warnings
|
| 16 |
+
from inspect import signature
|
| 17 |
+
from itertools import chain
|
| 18 |
+
from pathlib import Path
|
| 19 |
+
from typing import TYPE_CHECKING, Iterable, List, Tuple, Union
|
| 20 |
+
|
| 21 |
+
import numpy as np
|
| 22 |
+
from packaging.version import Version, parse
|
| 23 |
+
|
| 24 |
+
from ..tokenization_utils_base import PreTrainedTokenizerBase
|
| 25 |
+
from ..utils import (
|
| 26 |
+
TensorType,
|
| 27 |
+
is_tf_available,
|
| 28 |
+
is_torch_available,
|
| 29 |
+
logging,
|
| 30 |
+
)
|
| 31 |
+
from .config import OnnxConfig
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
if is_torch_available():
|
| 35 |
+
from ..modeling_utils import PreTrainedModel
|
| 36 |
+
from ..pytorch_utils import is_torch_less_than_1_11
|
| 37 |
+
|
| 38 |
+
if is_tf_available():
|
| 39 |
+
from ..modeling_tf_utils import TFPreTrainedModel
|
| 40 |
+
|
| 41 |
+
if TYPE_CHECKING:
|
| 42 |
+
from ..feature_extraction_utils import FeatureExtractionMixin
|
| 43 |
+
from ..processing_utils import ProcessorMixin
|
| 44 |
+
from ..tokenization_utils import PreTrainedTokenizer
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
# This is the minimal required version to support some ONNX Runtime features
|
| 51 |
+
ORT_QUANTIZE_MINIMUM_VERSION = parse("1.4.0")
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def check_onnxruntime_requirements(minimum_version: Version):
|
| 55 |
+
"""
|
| 56 |
+
Check onnxruntime is installed and if the installed version match is recent enough
|
| 57 |
+
|
| 58 |
+
Raises:
|
| 59 |
+
ImportError: If onnxruntime is not installed or too old version is found
|
| 60 |
+
"""
|
| 61 |
+
try:
|
| 62 |
+
import onnxruntime
|
| 63 |
+
|
| 64 |
+
# Parse the version of the installed onnxruntime
|
| 65 |
+
ort_version = parse(onnxruntime.__version__)
|
| 66 |
+
|
| 67 |
+
# We require 1.4.0 minimum
|
| 68 |
+
if ort_version < ORT_QUANTIZE_MINIMUM_VERSION:
|
| 69 |
+
raise ImportError(
|
| 70 |
+
f"We found an older version of onnxruntime ({onnxruntime.__version__}) "
|
| 71 |
+
f"but we require onnxruntime to be >= {minimum_version} to enable all the conversions options.\n"
|
| 72 |
+
"Please update onnxruntime by running `pip install --upgrade onnxruntime`"
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
except ImportError:
|
| 76 |
+
raise ImportError(
|
| 77 |
+
"onnxruntime doesn't seem to be currently installed. "
|
| 78 |
+
"Please install the onnxruntime by running `pip install onnxruntime`"
|
| 79 |
+
" and relaunch the conversion."
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def export_pytorch(
|
| 84 |
+
preprocessor: Union["PreTrainedTokenizer", "FeatureExtractionMixin", "ProcessorMixin"],
|
| 85 |
+
model: "PreTrainedModel",
|
| 86 |
+
config: OnnxConfig,
|
| 87 |
+
opset: int,
|
| 88 |
+
output: Path,
|
| 89 |
+
tokenizer: "PreTrainedTokenizer" = None,
|
| 90 |
+
device: str = "cpu",
|
| 91 |
+
) -> Tuple[List[str], List[str]]:
|
| 92 |
+
"""
|
| 93 |
+
Export a PyTorch model to an ONNX Intermediate Representation (IR)
|
| 94 |
+
|
| 95 |
+
Args:
|
| 96 |
+
preprocessor: ([`PreTrainedTokenizer`], [`FeatureExtractionMixin`] or [`ProcessorMixin`]):
|
| 97 |
+
The preprocessor used for encoding the data.
|
| 98 |
+
model ([`PreTrainedModel`]):
|
| 99 |
+
The model to export.
|
| 100 |
+
config ([`~onnx.config.OnnxConfig`]):
|
| 101 |
+
The ONNX configuration associated with the exported model.
|
| 102 |
+
opset (`int`):
|
| 103 |
+
The version of the ONNX operator set to use.
|
| 104 |
+
output (`Path`):
|
| 105 |
+
Directory to store the exported ONNX model.
|
| 106 |
+
device (`str`, *optional*, defaults to `cpu`):
|
| 107 |
+
The device on which the ONNX model will be exported. Either `cpu` or `cuda`.
|
| 108 |
+
|
| 109 |
+
Returns:
|
| 110 |
+
`Tuple[List[str], List[str]]`: A tuple with an ordered list of the model's inputs, and the named inputs from
|
| 111 |
+
the ONNX configuration.
|
| 112 |
+
"""
|
| 113 |
+
|
| 114 |
+
if isinstance(preprocessor, PreTrainedTokenizerBase) and tokenizer is not None:
|
| 115 |
+
raise ValueError("You cannot provide both a tokenizer and a preprocessor to export the model.")
|
| 116 |
+
if tokenizer is not None:
|
| 117 |
+
warnings.warn(
|
| 118 |
+
"The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use"
|
| 119 |
+
" `preprocessor` instead.",
|
| 120 |
+
FutureWarning,
|
| 121 |
+
)
|
| 122 |
+
logger.info("Overwriting the `preprocessor` argument with `tokenizer` to generate dummmy inputs.")
|
| 123 |
+
preprocessor = tokenizer
|
| 124 |
+
|
| 125 |
+
if issubclass(type(model), PreTrainedModel):
|
| 126 |
+
import torch
|
| 127 |
+
from torch.onnx import export as onnx_export
|
| 128 |
+
|
| 129 |
+
logger.info(f"Using framework PyTorch: {torch.__version__}")
|
| 130 |
+
with torch.no_grad():
|
| 131 |
+
model.config.return_dict = True
|
| 132 |
+
model.eval()
|
| 133 |
+
|
| 134 |
+
# Check if we need to override certain configuration item
|
| 135 |
+
if config.values_override is not None:
|
| 136 |
+
logger.info(f"Overriding {len(config.values_override)} configuration item(s)")
|
| 137 |
+
for override_config_key, override_config_value in config.values_override.items():
|
| 138 |
+
logger.info(f"\t- {override_config_key} -> {override_config_value}")
|
| 139 |
+
setattr(model.config, override_config_key, override_config_value)
|
| 140 |
+
|
| 141 |
+
# Ensure inputs match
|
| 142 |
+
# TODO: Check when exporting QA we provide "is_pair=True"
|
| 143 |
+
model_inputs = config.generate_dummy_inputs(preprocessor, framework=TensorType.PYTORCH)
|
| 144 |
+
device = torch.device(device)
|
| 145 |
+
if device.type == "cuda" and torch.cuda.is_available():
|
| 146 |
+
model.to(device)
|
| 147 |
+
model_inputs_device = {}
|
| 148 |
+
for k, v in model_inputs.items():
|
| 149 |
+
if isinstance(v, Tuple):
|
| 150 |
+
model_inputs_device[k] = tuple(
|
| 151 |
+
x.to(device) if isinstance(x, torch.Tensor) else None for x in v
|
| 152 |
+
)
|
| 153 |
+
elif isinstance(v, List):
|
| 154 |
+
model_inputs_device[k] = [
|
| 155 |
+
tuple(x.to(device) if isinstance(x, torch.Tensor) else None for x in t) for t in v
|
| 156 |
+
]
|
| 157 |
+
else:
|
| 158 |
+
model_inputs_device[k] = v.to(device)
|
| 159 |
+
|
| 160 |
+
model_inputs = model_inputs_device
|
| 161 |
+
|
| 162 |
+
inputs_match, matched_inputs = ensure_model_and_config_inputs_match(model, model_inputs.keys())
|
| 163 |
+
onnx_outputs = list(config.outputs.keys())
|
| 164 |
+
|
| 165 |
+
if not inputs_match:
|
| 166 |
+
raise ValueError("Model and config inputs doesn't match")
|
| 167 |
+
|
| 168 |
+
config.patch_ops()
|
| 169 |
+
|
| 170 |
+
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
|
| 171 |
+
# so we check the torch version for backwards compatibility
|
| 172 |
+
if is_torch_less_than_1_11:
|
| 173 |
+
# export can work with named args but the dict containing named args
|
| 174 |
+
# has to be the last element of the args tuple.
|
| 175 |
+
try:
|
| 176 |
+
onnx_export(
|
| 177 |
+
model,
|
| 178 |
+
(model_inputs,),
|
| 179 |
+
f=output.as_posix(),
|
| 180 |
+
input_names=list(config.inputs.keys()),
|
| 181 |
+
output_names=onnx_outputs,
|
| 182 |
+
dynamic_axes=dict(chain(config.inputs.items(), config.outputs.items())),
|
| 183 |
+
do_constant_folding=True,
|
| 184 |
+
use_external_data_format=config.use_external_data_format(model.num_parameters()),
|
| 185 |
+
enable_onnx_checker=True,
|
| 186 |
+
opset_version=opset,
|
| 187 |
+
)
|
| 188 |
+
except RuntimeError as err:
|
| 189 |
+
message = str(err)
|
| 190 |
+
if (
|
| 191 |
+
message
|
| 192 |
+
== "Exporting model exceed maximum protobuf size of 2GB. Please call torch.onnx.export without"
|
| 193 |
+
" setting use_external_data_format parameter."
|
| 194 |
+
):
|
| 195 |
+
message = (
|
| 196 |
+
"Exporting model exceed maximum protobuf size of 2GB. Please call torch.onnx.export"
|
| 197 |
+
" without setting use_external_data_format parameter or try with torch 1.10+."
|
| 198 |
+
)
|
| 199 |
+
raise RuntimeError(message)
|
| 200 |
+
else:
|
| 201 |
+
raise err
|
| 202 |
+
else:
|
| 203 |
+
onnx_export(
|
| 204 |
+
model,
|
| 205 |
+
(model_inputs,),
|
| 206 |
+
f=output.as_posix(),
|
| 207 |
+
input_names=list(config.inputs.keys()),
|
| 208 |
+
output_names=onnx_outputs,
|
| 209 |
+
dynamic_axes=dict(chain(config.inputs.items(), config.outputs.items())),
|
| 210 |
+
do_constant_folding=True,
|
| 211 |
+
opset_version=opset,
|
| 212 |
+
)
|
| 213 |
+
|
| 214 |
+
config.restore_ops()
|
| 215 |
+
|
| 216 |
+
return matched_inputs, onnx_outputs
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
def export_tensorflow(
|
| 220 |
+
preprocessor: Union["PreTrainedTokenizer", "FeatureExtractionMixin"],
|
| 221 |
+
model: "TFPreTrainedModel",
|
| 222 |
+
config: OnnxConfig,
|
| 223 |
+
opset: int,
|
| 224 |
+
output: Path,
|
| 225 |
+
tokenizer: "PreTrainedTokenizer" = None,
|
| 226 |
+
) -> Tuple[List[str], List[str]]:
|
| 227 |
+
"""
|
| 228 |
+
Export a TensorFlow model to an ONNX Intermediate Representation (IR)
|
| 229 |
+
|
| 230 |
+
Args:
|
| 231 |
+
preprocessor: ([`PreTrainedTokenizer`] or [`FeatureExtractionMixin`]):
|
| 232 |
+
The preprocessor used for encoding the data.
|
| 233 |
+
model ([`TFPreTrainedModel`]):
|
| 234 |
+
The model to export.
|
| 235 |
+
config ([`~onnx.config.OnnxConfig`]):
|
| 236 |
+
The ONNX configuration associated with the exported model.
|
| 237 |
+
opset (`int`):
|
| 238 |
+
The version of the ONNX operator set to use.
|
| 239 |
+
output (`Path`):
|
| 240 |
+
Directory to store the exported ONNX model.
|
| 241 |
+
|
| 242 |
+
Returns:
|
| 243 |
+
`Tuple[List[str], List[str]]`: A tuple with an ordered list of the model's inputs, and the named inputs from
|
| 244 |
+
the ONNX configuration.
|
| 245 |
+
"""
|
| 246 |
+
import onnx
|
| 247 |
+
import tensorflow as tf
|
| 248 |
+
import tf2onnx
|
| 249 |
+
|
| 250 |
+
if isinstance(preprocessor, PreTrainedTokenizerBase) and tokenizer is not None:
|
| 251 |
+
raise ValueError("You cannot provide both a tokenizer and preprocessor to export the model.")
|
| 252 |
+
if tokenizer is not None:
|
| 253 |
+
warnings.warn(
|
| 254 |
+
"The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use"
|
| 255 |
+
" `preprocessor` instead.",
|
| 256 |
+
FutureWarning,
|
| 257 |
+
)
|
| 258 |
+
logger.info("Overwriting the `preprocessor` argument with `tokenizer` to generate dummmy inputs.")
|
| 259 |
+
preprocessor = tokenizer
|
| 260 |
+
|
| 261 |
+
model.config.return_dict = True
|
| 262 |
+
|
| 263 |
+
# Check if we need to override certain configuration item
|
| 264 |
+
if config.values_override is not None:
|
| 265 |
+
logger.info(f"Overriding {len(config.values_override)} configuration item(s)")
|
| 266 |
+
for override_config_key, override_config_value in config.values_override.items():
|
| 267 |
+
logger.info(f"\t- {override_config_key} -> {override_config_value}")
|
| 268 |
+
setattr(model.config, override_config_key, override_config_value)
|
| 269 |
+
|
| 270 |
+
# Ensure inputs match
|
| 271 |
+
model_inputs = config.generate_dummy_inputs(preprocessor, framework=TensorType.TENSORFLOW)
|
| 272 |
+
inputs_match, matched_inputs = ensure_model_and_config_inputs_match(model, model_inputs.keys())
|
| 273 |
+
onnx_outputs = list(config.outputs.keys())
|
| 274 |
+
|
| 275 |
+
input_signature = [
|
| 276 |
+
tf.TensorSpec([None] * tensor.ndim, dtype=tensor.dtype, name=key) for key, tensor in model_inputs.items()
|
| 277 |
+
]
|
| 278 |
+
onnx_model, _ = tf2onnx.convert.from_keras(model, input_signature, opset=opset)
|
| 279 |
+
onnx.save(onnx_model, output.as_posix())
|
| 280 |
+
config.restore_ops()
|
| 281 |
+
|
| 282 |
+
return matched_inputs, onnx_outputs
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
def export(
|
| 286 |
+
preprocessor: Union["PreTrainedTokenizer", "FeatureExtractionMixin", "ProcessorMixin"],
|
| 287 |
+
model: Union["PreTrainedModel", "TFPreTrainedModel"],
|
| 288 |
+
config: OnnxConfig,
|
| 289 |
+
opset: int,
|
| 290 |
+
output: Path,
|
| 291 |
+
tokenizer: "PreTrainedTokenizer" = None,
|
| 292 |
+
device: str = "cpu",
|
| 293 |
+
) -> Tuple[List[str], List[str]]:
|
| 294 |
+
"""
|
| 295 |
+
Export a Pytorch or TensorFlow model to an ONNX Intermediate Representation (IR)
|
| 296 |
+
|
| 297 |
+
Args:
|
| 298 |
+
preprocessor: ([`PreTrainedTokenizer`], [`FeatureExtractionMixin`] or [`ProcessorMixin`]):
|
| 299 |
+
The preprocessor used for encoding the data.
|
| 300 |
+
model ([`PreTrainedModel`] or [`TFPreTrainedModel`]):
|
| 301 |
+
The model to export.
|
| 302 |
+
config ([`~onnx.config.OnnxConfig`]):
|
| 303 |
+
The ONNX configuration associated with the exported model.
|
| 304 |
+
opset (`int`):
|
| 305 |
+
The version of the ONNX operator set to use.
|
| 306 |
+
output (`Path`):
|
| 307 |
+
Directory to store the exported ONNX model.
|
| 308 |
+
device (`str`, *optional*, defaults to `cpu`):
|
| 309 |
+
The device on which the ONNX model will be exported. Either `cpu` or `cuda`. Only PyTorch is supported for
|
| 310 |
+
export on CUDA devices.
|
| 311 |
+
|
| 312 |
+
Returns:
|
| 313 |
+
`Tuple[List[str], List[str]]`: A tuple with an ordered list of the model's inputs, and the named inputs from
|
| 314 |
+
the ONNX configuration.
|
| 315 |
+
"""
|
| 316 |
+
if not (is_torch_available() or is_tf_available()):
|
| 317 |
+
raise ImportError(
|
| 318 |
+
"Cannot convert because neither PyTorch nor TensorFlow are not installed. "
|
| 319 |
+
"Please install torch or tensorflow first."
|
| 320 |
+
)
|
| 321 |
+
|
| 322 |
+
if is_tf_available() and isinstance(model, TFPreTrainedModel) and device == "cuda":
|
| 323 |
+
raise RuntimeError("`tf2onnx` does not support export on CUDA device.")
|
| 324 |
+
|
| 325 |
+
if isinstance(preprocessor, PreTrainedTokenizerBase) and tokenizer is not None:
|
| 326 |
+
raise ValueError("You cannot provide both a tokenizer and a preprocessor to export the model.")
|
| 327 |
+
if tokenizer is not None:
|
| 328 |
+
warnings.warn(
|
| 329 |
+
"The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use"
|
| 330 |
+
" `preprocessor` instead.",
|
| 331 |
+
FutureWarning,
|
| 332 |
+
)
|
| 333 |
+
logger.info("Overwriting the `preprocessor` argument with `tokenizer` to generate dummmy inputs.")
|
| 334 |
+
preprocessor = tokenizer
|
| 335 |
+
|
| 336 |
+
if is_torch_available():
|
| 337 |
+
from ..utils import torch_version
|
| 338 |
+
|
| 339 |
+
if not config.is_torch_support_available:
|
| 340 |
+
logger.warning(
|
| 341 |
+
f"Unsupported PyTorch version for this model. Minimum required is {config.torch_onnx_minimum_version},"
|
| 342 |
+
f" got: {torch_version}"
|
| 343 |
+
)
|
| 344 |
+
|
| 345 |
+
if is_torch_available() and issubclass(type(model), PreTrainedModel):
|
| 346 |
+
return export_pytorch(preprocessor, model, config, opset, output, tokenizer=tokenizer, device=device)
|
| 347 |
+
elif is_tf_available() and issubclass(type(model), TFPreTrainedModel):
|
| 348 |
+
return export_tensorflow(preprocessor, model, config, opset, output, tokenizer=tokenizer)
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
def validate_model_outputs(
|
| 352 |
+
config: OnnxConfig,
|
| 353 |
+
preprocessor: Union["PreTrainedTokenizer", "FeatureExtractionMixin", "ProcessorMixin"],
|
| 354 |
+
reference_model: Union["PreTrainedModel", "TFPreTrainedModel"],
|
| 355 |
+
onnx_model: Path,
|
| 356 |
+
onnx_named_outputs: List[str],
|
| 357 |
+
atol: float,
|
| 358 |
+
tokenizer: "PreTrainedTokenizer" = None,
|
| 359 |
+
):
|
| 360 |
+
from onnxruntime import InferenceSession, SessionOptions
|
| 361 |
+
|
| 362 |
+
logger.info("Validating ONNX model...")
|
| 363 |
+
|
| 364 |
+
if isinstance(preprocessor, PreTrainedTokenizerBase) and tokenizer is not None:
|
| 365 |
+
raise ValueError("You cannot provide both a tokenizer and a preprocessor to validate the model outputs.")
|
| 366 |
+
if tokenizer is not None:
|
| 367 |
+
warnings.warn(
|
| 368 |
+
"The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use"
|
| 369 |
+
" `preprocessor` instead.",
|
| 370 |
+
FutureWarning,
|
| 371 |
+
)
|
| 372 |
+
logger.info("Overwriting the `preprocessor` argument with `tokenizer` to generate dummmy inputs.")
|
| 373 |
+
preprocessor = tokenizer
|
| 374 |
+
|
| 375 |
+
# generate inputs with a different batch_size and seq_len that was used for conversion to properly test
|
| 376 |
+
# dynamic input shapes.
|
| 377 |
+
if is_torch_available() and issubclass(type(reference_model), PreTrainedModel):
|
| 378 |
+
reference_model_inputs = config.generate_dummy_inputs(
|
| 379 |
+
preprocessor,
|
| 380 |
+
batch_size=config.default_fixed_batch + 1,
|
| 381 |
+
seq_length=config.default_fixed_sequence + 1,
|
| 382 |
+
framework=TensorType.PYTORCH,
|
| 383 |
+
)
|
| 384 |
+
else:
|
| 385 |
+
reference_model_inputs = config.generate_dummy_inputs(
|
| 386 |
+
preprocessor,
|
| 387 |
+
batch_size=config.default_fixed_batch + 1,
|
| 388 |
+
seq_length=config.default_fixed_sequence + 1,
|
| 389 |
+
framework=TensorType.TENSORFLOW,
|
| 390 |
+
)
|
| 391 |
+
|
| 392 |
+
# Create ONNX Runtime session
|
| 393 |
+
options = SessionOptions()
|
| 394 |
+
session = InferenceSession(onnx_model.as_posix(), options, providers=["CPUExecutionProvider"])
|
| 395 |
+
|
| 396 |
+
# Compute outputs from the reference model
|
| 397 |
+
if is_torch_available() and issubclass(type(reference_model), PreTrainedModel):
|
| 398 |
+
reference_model.to("cpu")
|
| 399 |
+
ref_outputs = reference_model(**reference_model_inputs)
|
| 400 |
+
ref_outputs_dict = {}
|
| 401 |
+
|
| 402 |
+
# We flatten potential collection of outputs (i.e. past_keys) to a flat structure
|
| 403 |
+
for name, value in ref_outputs.items():
|
| 404 |
+
# Overwriting the output name as "present" since it is the name used for the ONNX outputs
|
| 405 |
+
# ("past_key_values" being taken for the ONNX inputs)
|
| 406 |
+
if name == "past_key_values":
|
| 407 |
+
name = "present"
|
| 408 |
+
if isinstance(value, (list, tuple)):
|
| 409 |
+
value = config.flatten_output_collection_property(name, value)
|
| 410 |
+
ref_outputs_dict.update(value)
|
| 411 |
+
else:
|
| 412 |
+
ref_outputs_dict[name] = value
|
| 413 |
+
|
| 414 |
+
# Create onnxruntime inputs from the reference model inputs
|
| 415 |
+
reference_model_inputs_onnxruntime = config.generate_dummy_inputs_onnxruntime(reference_model_inputs)
|
| 416 |
+
|
| 417 |
+
# We flatten potential collection of inputs (i.e. past_keys)
|
| 418 |
+
onnx_inputs = {}
|
| 419 |
+
for name, value in reference_model_inputs_onnxruntime.items():
|
| 420 |
+
if isinstance(value, (list, tuple)):
|
| 421 |
+
value = config.flatten_output_collection_property(name, value)
|
| 422 |
+
onnx_inputs.update({tensor_name: pt_tensor.numpy() for tensor_name, pt_tensor in value.items()})
|
| 423 |
+
else:
|
| 424 |
+
onnx_inputs[name] = value.numpy()
|
| 425 |
+
|
| 426 |
+
# Compute outputs from the ONNX model
|
| 427 |
+
onnx_outputs = session.run(onnx_named_outputs, onnx_inputs)
|
| 428 |
+
|
| 429 |
+
# Check we have a subset of the keys into onnx_outputs against ref_outputs
|
| 430 |
+
ref_outputs_set, onnx_outputs_set = set(ref_outputs_dict.keys()), set(onnx_named_outputs)
|
| 431 |
+
if not onnx_outputs_set.issubset(ref_outputs_set):
|
| 432 |
+
logger.info(
|
| 433 |
+
f"\t-[x] ONNX model output names {onnx_outputs_set} do not match reference model {ref_outputs_set}"
|
| 434 |
+
)
|
| 435 |
+
|
| 436 |
+
raise ValueError(
|
| 437 |
+
"Outputs doesn't match between reference model and ONNX exported model: "
|
| 438 |
+
f"{onnx_outputs_set.difference(ref_outputs_set)}"
|
| 439 |
+
)
|
| 440 |
+
else:
|
| 441 |
+
logger.info(f"\t-[✓] ONNX model output names match reference model ({onnx_outputs_set})")
|
| 442 |
+
|
| 443 |
+
# Check the shape and values match
|
| 444 |
+
for name, ort_value in zip(onnx_named_outputs, onnx_outputs):
|
| 445 |
+
if is_torch_available() and issubclass(type(reference_model), PreTrainedModel):
|
| 446 |
+
ref_value = ref_outputs_dict[name].detach().numpy()
|
| 447 |
+
else:
|
| 448 |
+
ref_value = ref_outputs_dict[name].numpy()
|
| 449 |
+
logger.info(f'\t- Validating ONNX Model output "{name}":')
|
| 450 |
+
|
| 451 |
+
# Shape
|
| 452 |
+
if not ort_value.shape == ref_value.shape:
|
| 453 |
+
logger.info(f"\t\t-[x] shape {ort_value.shape} doesn't match {ref_value.shape}")
|
| 454 |
+
raise ValueError(
|
| 455 |
+
"Outputs shape doesn't match between reference model and ONNX exported model: "
|
| 456 |
+
f"Got {ref_value.shape} (reference) and {ort_value.shape} (ONNX)"
|
| 457 |
+
)
|
| 458 |
+
else:
|
| 459 |
+
logger.info(f"\t\t-[✓] {ort_value.shape} matches {ref_value.shape}")
|
| 460 |
+
|
| 461 |
+
# Values
|
| 462 |
+
if not np.allclose(ref_value, ort_value, atol=atol):
|
| 463 |
+
bad_indices = np.logical_not(np.isclose(ref_value, ort_value, atol=atol))
|
| 464 |
+
logger.info(f"\t\t-[x] values not close enough (atol: {atol})")
|
| 465 |
+
raise ValueError(
|
| 466 |
+
"Outputs values doesn't match between reference model and ONNX exported model: "
|
| 467 |
+
f"Got max absolute difference of: {np.amax(np.abs(ref_value - ort_value))} for "
|
| 468 |
+
f"{ref_value[bad_indices]} vs {ort_value[bad_indices]}"
|
| 469 |
+
)
|
| 470 |
+
else:
|
| 471 |
+
logger.info(f"\t\t-[✓] all values close (atol: {atol})")
|
| 472 |
+
|
| 473 |
+
|
| 474 |
+
def ensure_model_and_config_inputs_match(
|
| 475 |
+
model: Union["PreTrainedModel", "TFPreTrainedModel"], model_inputs: Iterable[str]
|
| 476 |
+
) -> Tuple[bool, List[str]]:
|
| 477 |
+
"""
|
| 478 |
+
|
| 479 |
+
:param model_inputs: :param config_inputs: :return:
|
| 480 |
+
"""
|
| 481 |
+
if is_torch_available() and issubclass(type(model), PreTrainedModel):
|
| 482 |
+
forward_parameters = signature(model.forward).parameters
|
| 483 |
+
else:
|
| 484 |
+
forward_parameters = signature(model.call).parameters
|
| 485 |
+
model_inputs_set = set(model_inputs)
|
| 486 |
+
|
| 487 |
+
# We are fine if config_inputs has more keys than model_inputs
|
| 488 |
+
forward_inputs_set = set(forward_parameters.keys())
|
| 489 |
+
is_ok = model_inputs_set.issubset(forward_inputs_set)
|
| 490 |
+
|
| 491 |
+
# Make sure the input order match (VERY IMPORTANT !!!!)
|
| 492 |
+
matching_inputs = forward_inputs_set.intersection(model_inputs_set)
|
| 493 |
+
ordered_inputs = [parameter for parameter in forward_parameters.keys() if parameter in matching_inputs]
|
| 494 |
+
return is_ok, ordered_inputs
|
valley/lib/python3.10/site-packages/transformers/pipelines/__init__.py
ADDED
|
@@ -0,0 +1,976 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2018 The HuggingFace Inc. team.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
import io
|
| 16 |
+
import json
|
| 17 |
+
import os
|
| 18 |
+
import warnings
|
| 19 |
+
from pathlib import Path
|
| 20 |
+
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
|
| 21 |
+
|
| 22 |
+
from huggingface_hub import model_info
|
| 23 |
+
from numpy import isin
|
| 24 |
+
|
| 25 |
+
from ..configuration_utils import PretrainedConfig
|
| 26 |
+
from ..dynamic_module_utils import get_class_from_dynamic_module
|
| 27 |
+
from ..feature_extraction_utils import PreTrainedFeatureExtractor
|
| 28 |
+
from ..image_processing_utils import BaseImageProcessor
|
| 29 |
+
from ..models.auto.configuration_auto import AutoConfig
|
| 30 |
+
from ..models.auto.feature_extraction_auto import FEATURE_EXTRACTOR_MAPPING, AutoFeatureExtractor
|
| 31 |
+
from ..models.auto.image_processing_auto import IMAGE_PROCESSOR_MAPPING, AutoImageProcessor
|
| 32 |
+
from ..models.auto.modeling_auto import AutoModelForDepthEstimation
|
| 33 |
+
from ..models.auto.tokenization_auto import TOKENIZER_MAPPING, AutoTokenizer
|
| 34 |
+
from ..tokenization_utils import PreTrainedTokenizer
|
| 35 |
+
from ..tokenization_utils_fast import PreTrainedTokenizerFast
|
| 36 |
+
from ..utils import (
|
| 37 |
+
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
|
| 38 |
+
is_kenlm_available,
|
| 39 |
+
is_offline_mode,
|
| 40 |
+
is_pyctcdecode_available,
|
| 41 |
+
is_tf_available,
|
| 42 |
+
is_torch_available,
|
| 43 |
+
logging,
|
| 44 |
+
)
|
| 45 |
+
from .audio_classification import AudioClassificationPipeline
|
| 46 |
+
from .automatic_speech_recognition import AutomaticSpeechRecognitionPipeline
|
| 47 |
+
from .base import (
|
| 48 |
+
ArgumentHandler,
|
| 49 |
+
CsvPipelineDataFormat,
|
| 50 |
+
JsonPipelineDataFormat,
|
| 51 |
+
PipedPipelineDataFormat,
|
| 52 |
+
Pipeline,
|
| 53 |
+
PipelineDataFormat,
|
| 54 |
+
PipelineException,
|
| 55 |
+
PipelineRegistry,
|
| 56 |
+
get_default_model_and_revision,
|
| 57 |
+
infer_framework_load_model,
|
| 58 |
+
)
|
| 59 |
+
from .conversational import Conversation, ConversationalPipeline
|
| 60 |
+
from .depth_estimation import DepthEstimationPipeline
|
| 61 |
+
from .document_question_answering import DocumentQuestionAnsweringPipeline
|
| 62 |
+
from .feature_extraction import FeatureExtractionPipeline
|
| 63 |
+
from .fill_mask import FillMaskPipeline
|
| 64 |
+
from .image_classification import ImageClassificationPipeline
|
| 65 |
+
from .image_segmentation import ImageSegmentationPipeline
|
| 66 |
+
from .image_to_text import ImageToTextPipeline
|
| 67 |
+
from .object_detection import ObjectDetectionPipeline
|
| 68 |
+
from .question_answering import QuestionAnsweringArgumentHandler, QuestionAnsweringPipeline
|
| 69 |
+
from .table_question_answering import TableQuestionAnsweringArgumentHandler, TableQuestionAnsweringPipeline
|
| 70 |
+
from .text2text_generation import SummarizationPipeline, Text2TextGenerationPipeline, TranslationPipeline
|
| 71 |
+
from .text_classification import TextClassificationPipeline
|
| 72 |
+
from .text_generation import TextGenerationPipeline
|
| 73 |
+
from .token_classification import (
|
| 74 |
+
AggregationStrategy,
|
| 75 |
+
NerPipeline,
|
| 76 |
+
TokenClassificationArgumentHandler,
|
| 77 |
+
TokenClassificationPipeline,
|
| 78 |
+
)
|
| 79 |
+
from .video_classification import VideoClassificationPipeline
|
| 80 |
+
from .visual_question_answering import VisualQuestionAnsweringPipeline
|
| 81 |
+
from .zero_shot_audio_classification import ZeroShotAudioClassificationPipeline
|
| 82 |
+
from .zero_shot_classification import ZeroShotClassificationArgumentHandler, ZeroShotClassificationPipeline
|
| 83 |
+
from .zero_shot_image_classification import ZeroShotImageClassificationPipeline
|
| 84 |
+
from .zero_shot_object_detection import ZeroShotObjectDetectionPipeline
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
if is_tf_available():
|
| 88 |
+
import tensorflow as tf
|
| 89 |
+
|
| 90 |
+
from ..models.auto.modeling_tf_auto import (
|
| 91 |
+
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
|
| 92 |
+
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
|
| 93 |
+
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
|
| 94 |
+
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
|
| 95 |
+
TF_MODEL_WITH_LM_HEAD_MAPPING,
|
| 96 |
+
TFAutoModel,
|
| 97 |
+
TFAutoModelForCausalLM,
|
| 98 |
+
TFAutoModelForImageClassification,
|
| 99 |
+
TFAutoModelForMaskedLM,
|
| 100 |
+
TFAutoModelForQuestionAnswering,
|
| 101 |
+
TFAutoModelForSeq2SeqLM,
|
| 102 |
+
TFAutoModelForSequenceClassification,
|
| 103 |
+
TFAutoModelForTableQuestionAnswering,
|
| 104 |
+
TFAutoModelForTokenClassification,
|
| 105 |
+
TFAutoModelForVision2Seq,
|
| 106 |
+
TFAutoModelForZeroShotImageClassification,
|
| 107 |
+
)
|
| 108 |
+
|
| 109 |
+
if is_torch_available():
|
| 110 |
+
import torch
|
| 111 |
+
|
| 112 |
+
from ..models.auto.modeling_auto import (
|
| 113 |
+
MODEL_FOR_MASKED_LM_MAPPING,
|
| 114 |
+
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
|
| 115 |
+
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
|
| 116 |
+
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
|
| 117 |
+
MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING,
|
| 118 |
+
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
|
| 119 |
+
MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING,
|
| 120 |
+
AutoModel,
|
| 121 |
+
AutoModelForAudioClassification,
|
| 122 |
+
AutoModelForCausalLM,
|
| 123 |
+
AutoModelForCTC,
|
| 124 |
+
AutoModelForDocumentQuestionAnswering,
|
| 125 |
+
AutoModelForImageClassification,
|
| 126 |
+
AutoModelForImageSegmentation,
|
| 127 |
+
AutoModelForMaskedLM,
|
| 128 |
+
AutoModelForObjectDetection,
|
| 129 |
+
AutoModelForQuestionAnswering,
|
| 130 |
+
AutoModelForSemanticSegmentation,
|
| 131 |
+
AutoModelForSeq2SeqLM,
|
| 132 |
+
AutoModelForSequenceClassification,
|
| 133 |
+
AutoModelForSpeechSeq2Seq,
|
| 134 |
+
AutoModelForTableQuestionAnswering,
|
| 135 |
+
AutoModelForTokenClassification,
|
| 136 |
+
AutoModelForVideoClassification,
|
| 137 |
+
AutoModelForVision2Seq,
|
| 138 |
+
AutoModelForVisualQuestionAnswering,
|
| 139 |
+
AutoModelForZeroShotImageClassification,
|
| 140 |
+
AutoModelForZeroShotObjectDetection,
|
| 141 |
+
)
|
| 142 |
+
if TYPE_CHECKING:
|
| 143 |
+
from ..modeling_tf_utils import TFPreTrainedModel
|
| 144 |
+
from ..modeling_utils import PreTrainedModel
|
| 145 |
+
|
| 146 |
+
logger = logging.get_logger(__name__)
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
# Register all the supported tasks here
|
| 150 |
+
TASK_ALIASES = {
|
| 151 |
+
"sentiment-analysis": "text-classification",
|
| 152 |
+
"ner": "token-classification",
|
| 153 |
+
"vqa": "visual-question-answering",
|
| 154 |
+
}
|
| 155 |
+
SUPPORTED_TASKS = {
|
| 156 |
+
"audio-classification": {
|
| 157 |
+
"impl": AudioClassificationPipeline,
|
| 158 |
+
"tf": (),
|
| 159 |
+
"pt": (AutoModelForAudioClassification,) if is_torch_available() else (),
|
| 160 |
+
"default": {"model": {"pt": ("superb/wav2vec2-base-superb-ks", "372e048")}},
|
| 161 |
+
"type": "audio",
|
| 162 |
+
},
|
| 163 |
+
"automatic-speech-recognition": {
|
| 164 |
+
"impl": AutomaticSpeechRecognitionPipeline,
|
| 165 |
+
"tf": (),
|
| 166 |
+
"pt": (AutoModelForCTC, AutoModelForSpeechSeq2Seq) if is_torch_available() else (),
|
| 167 |
+
"default": {"model": {"pt": ("facebook/wav2vec2-base-960h", "55bb623")}},
|
| 168 |
+
"type": "multimodal",
|
| 169 |
+
},
|
| 170 |
+
"feature-extraction": {
|
| 171 |
+
"impl": FeatureExtractionPipeline,
|
| 172 |
+
"tf": (TFAutoModel,) if is_tf_available() else (),
|
| 173 |
+
"pt": (AutoModel,) if is_torch_available() else (),
|
| 174 |
+
"default": {"model": {"pt": ("distilbert-base-cased", "935ac13"), "tf": ("distilbert-base-cased", "935ac13")}},
|
| 175 |
+
"type": "multimodal",
|
| 176 |
+
},
|
| 177 |
+
"text-classification": {
|
| 178 |
+
"impl": TextClassificationPipeline,
|
| 179 |
+
"tf": (TFAutoModelForSequenceClassification,) if is_tf_available() else (),
|
| 180 |
+
"pt": (AutoModelForSequenceClassification,) if is_torch_available() else (),
|
| 181 |
+
"default": {
|
| 182 |
+
"model": {
|
| 183 |
+
"pt": ("distilbert-base-uncased-finetuned-sst-2-english", "af0f99b"),
|
| 184 |
+
"tf": ("distilbert-base-uncased-finetuned-sst-2-english", "af0f99b"),
|
| 185 |
+
},
|
| 186 |
+
},
|
| 187 |
+
"type": "text",
|
| 188 |
+
},
|
| 189 |
+
"token-classification": {
|
| 190 |
+
"impl": TokenClassificationPipeline,
|
| 191 |
+
"tf": (TFAutoModelForTokenClassification,) if is_tf_available() else (),
|
| 192 |
+
"pt": (AutoModelForTokenClassification,) if is_torch_available() else (),
|
| 193 |
+
"default": {
|
| 194 |
+
"model": {
|
| 195 |
+
"pt": ("dbmdz/bert-large-cased-finetuned-conll03-english", "f2482bf"),
|
| 196 |
+
"tf": ("dbmdz/bert-large-cased-finetuned-conll03-english", "f2482bf"),
|
| 197 |
+
},
|
| 198 |
+
},
|
| 199 |
+
"type": "text",
|
| 200 |
+
},
|
| 201 |
+
"question-answering": {
|
| 202 |
+
"impl": QuestionAnsweringPipeline,
|
| 203 |
+
"tf": (TFAutoModelForQuestionAnswering,) if is_tf_available() else (),
|
| 204 |
+
"pt": (AutoModelForQuestionAnswering,) if is_torch_available() else (),
|
| 205 |
+
"default": {
|
| 206 |
+
"model": {
|
| 207 |
+
"pt": ("distilbert-base-cased-distilled-squad", "626af31"),
|
| 208 |
+
"tf": ("distilbert-base-cased-distilled-squad", "626af31"),
|
| 209 |
+
},
|
| 210 |
+
},
|
| 211 |
+
"type": "text",
|
| 212 |
+
},
|
| 213 |
+
"table-question-answering": {
|
| 214 |
+
"impl": TableQuestionAnsweringPipeline,
|
| 215 |
+
"pt": (AutoModelForTableQuestionAnswering,) if is_torch_available() else (),
|
| 216 |
+
"tf": (TFAutoModelForTableQuestionAnswering,) if is_tf_available() else (),
|
| 217 |
+
"default": {
|
| 218 |
+
"model": {
|
| 219 |
+
"pt": ("google/tapas-base-finetuned-wtq", "69ceee2"),
|
| 220 |
+
"tf": ("google/tapas-base-finetuned-wtq", "69ceee2"),
|
| 221 |
+
},
|
| 222 |
+
},
|
| 223 |
+
"type": "text",
|
| 224 |
+
},
|
| 225 |
+
"visual-question-answering": {
|
| 226 |
+
"impl": VisualQuestionAnsweringPipeline,
|
| 227 |
+
"pt": (AutoModelForVisualQuestionAnswering,) if is_torch_available() else (),
|
| 228 |
+
"tf": (),
|
| 229 |
+
"default": {
|
| 230 |
+
"model": {"pt": ("dandelin/vilt-b32-finetuned-vqa", "4355f59")},
|
| 231 |
+
},
|
| 232 |
+
"type": "multimodal",
|
| 233 |
+
},
|
| 234 |
+
"document-question-answering": {
|
| 235 |
+
"impl": DocumentQuestionAnsweringPipeline,
|
| 236 |
+
"pt": (AutoModelForDocumentQuestionAnswering,) if is_torch_available() else (),
|
| 237 |
+
"tf": (),
|
| 238 |
+
"default": {
|
| 239 |
+
"model": {"pt": ("impira/layoutlm-document-qa", "52e01b3")},
|
| 240 |
+
},
|
| 241 |
+
"type": "multimodal",
|
| 242 |
+
},
|
| 243 |
+
"fill-mask": {
|
| 244 |
+
"impl": FillMaskPipeline,
|
| 245 |
+
"tf": (TFAutoModelForMaskedLM,) if is_tf_available() else (),
|
| 246 |
+
"pt": (AutoModelForMaskedLM,) if is_torch_available() else (),
|
| 247 |
+
"default": {"model": {"pt": ("distilroberta-base", "ec58a5b"), "tf": ("distilroberta-base", "ec58a5b")}},
|
| 248 |
+
"type": "text",
|
| 249 |
+
},
|
| 250 |
+
"summarization": {
|
| 251 |
+
"impl": SummarizationPipeline,
|
| 252 |
+
"tf": (TFAutoModelForSeq2SeqLM,) if is_tf_available() else (),
|
| 253 |
+
"pt": (AutoModelForSeq2SeqLM,) if is_torch_available() else (),
|
| 254 |
+
"default": {"model": {"pt": ("sshleifer/distilbart-cnn-12-6", "a4f8f3e"), "tf": ("t5-small", "d769bba")}},
|
| 255 |
+
"type": "text",
|
| 256 |
+
},
|
| 257 |
+
# This task is a special case as it's parametrized by SRC, TGT languages.
|
| 258 |
+
"translation": {
|
| 259 |
+
"impl": TranslationPipeline,
|
| 260 |
+
"tf": (TFAutoModelForSeq2SeqLM,) if is_tf_available() else (),
|
| 261 |
+
"pt": (AutoModelForSeq2SeqLM,) if is_torch_available() else (),
|
| 262 |
+
"default": {
|
| 263 |
+
("en", "fr"): {"model": {"pt": ("t5-base", "686f1db"), "tf": ("t5-base", "686f1db")}},
|
| 264 |
+
("en", "de"): {"model": {"pt": ("t5-base", "686f1db"), "tf": ("t5-base", "686f1db")}},
|
| 265 |
+
("en", "ro"): {"model": {"pt": ("t5-base", "686f1db"), "tf": ("t5-base", "686f1db")}},
|
| 266 |
+
},
|
| 267 |
+
"type": "text",
|
| 268 |
+
},
|
| 269 |
+
"text2text-generation": {
|
| 270 |
+
"impl": Text2TextGenerationPipeline,
|
| 271 |
+
"tf": (TFAutoModelForSeq2SeqLM,) if is_tf_available() else (),
|
| 272 |
+
"pt": (AutoModelForSeq2SeqLM,) if is_torch_available() else (),
|
| 273 |
+
"default": {"model": {"pt": ("t5-base", "686f1db"), "tf": ("t5-base", "686f1db")}},
|
| 274 |
+
"type": "text",
|
| 275 |
+
},
|
| 276 |
+
"text-generation": {
|
| 277 |
+
"impl": TextGenerationPipeline,
|
| 278 |
+
"tf": (TFAutoModelForCausalLM,) if is_tf_available() else (),
|
| 279 |
+
"pt": (AutoModelForCausalLM,) if is_torch_available() else (),
|
| 280 |
+
"default": {"model": {"pt": ("gpt2", "6c0e608"), "tf": ("gpt2", "6c0e608")}},
|
| 281 |
+
"type": "text",
|
| 282 |
+
},
|
| 283 |
+
"zero-shot-classification": {
|
| 284 |
+
"impl": ZeroShotClassificationPipeline,
|
| 285 |
+
"tf": (TFAutoModelForSequenceClassification,) if is_tf_available() else (),
|
| 286 |
+
"pt": (AutoModelForSequenceClassification,) if is_torch_available() else (),
|
| 287 |
+
"default": {
|
| 288 |
+
"model": {"pt": ("facebook/bart-large-mnli", "c626438"), "tf": ("roberta-large-mnli", "130fb28")},
|
| 289 |
+
"config": {"pt": ("facebook/bart-large-mnli", "c626438"), "tf": ("roberta-large-mnli", "130fb28")},
|
| 290 |
+
},
|
| 291 |
+
"type": "text",
|
| 292 |
+
},
|
| 293 |
+
"zero-shot-image-classification": {
|
| 294 |
+
"impl": ZeroShotImageClassificationPipeline,
|
| 295 |
+
"tf": (TFAutoModelForZeroShotImageClassification,) if is_tf_available() else (),
|
| 296 |
+
"pt": (AutoModelForZeroShotImageClassification,) if is_torch_available() else (),
|
| 297 |
+
"default": {
|
| 298 |
+
"model": {
|
| 299 |
+
"pt": ("openai/clip-vit-base-patch32", "f4881ba"),
|
| 300 |
+
"tf": ("openai/clip-vit-base-patch32", "f4881ba"),
|
| 301 |
+
}
|
| 302 |
+
},
|
| 303 |
+
"type": "multimodal",
|
| 304 |
+
},
|
| 305 |
+
"zero-shot-audio-classification": {
|
| 306 |
+
"impl": ZeroShotAudioClassificationPipeline,
|
| 307 |
+
"tf": (),
|
| 308 |
+
"pt": (AutoModel,) if is_torch_available() else (),
|
| 309 |
+
"default": {
|
| 310 |
+
"model": {
|
| 311 |
+
"pt": ("laion/clap-htsat-fused", "973b6e5"),
|
| 312 |
+
}
|
| 313 |
+
},
|
| 314 |
+
"type": "multimodal",
|
| 315 |
+
},
|
| 316 |
+
"conversational": {
|
| 317 |
+
"impl": ConversationalPipeline,
|
| 318 |
+
"tf": (TFAutoModelForSeq2SeqLM, TFAutoModelForCausalLM) if is_tf_available() else (),
|
| 319 |
+
"pt": (AutoModelForSeq2SeqLM, AutoModelForCausalLM) if is_torch_available() else (),
|
| 320 |
+
"default": {
|
| 321 |
+
"model": {"pt": ("microsoft/DialoGPT-medium", "8bada3b"), "tf": ("microsoft/DialoGPT-medium", "8bada3b")}
|
| 322 |
+
},
|
| 323 |
+
"type": "text",
|
| 324 |
+
},
|
| 325 |
+
"image-classification": {
|
| 326 |
+
"impl": ImageClassificationPipeline,
|
| 327 |
+
"tf": (TFAutoModelForImageClassification,) if is_tf_available() else (),
|
| 328 |
+
"pt": (AutoModelForImageClassification,) if is_torch_available() else (),
|
| 329 |
+
"default": {
|
| 330 |
+
"model": {
|
| 331 |
+
"pt": ("google/vit-base-patch16-224", "5dca96d"),
|
| 332 |
+
"tf": ("google/vit-base-patch16-224", "5dca96d"),
|
| 333 |
+
}
|
| 334 |
+
},
|
| 335 |
+
"type": "image",
|
| 336 |
+
},
|
| 337 |
+
"image-segmentation": {
|
| 338 |
+
"impl": ImageSegmentationPipeline,
|
| 339 |
+
"tf": (),
|
| 340 |
+
"pt": (AutoModelForImageSegmentation, AutoModelForSemanticSegmentation) if is_torch_available() else (),
|
| 341 |
+
"default": {"model": {"pt": ("facebook/detr-resnet-50-panoptic", "fc15262")}},
|
| 342 |
+
"type": "multimodal",
|
| 343 |
+
},
|
| 344 |
+
"image-to-text": {
|
| 345 |
+
"impl": ImageToTextPipeline,
|
| 346 |
+
"tf": (TFAutoModelForVision2Seq,) if is_tf_available() else (),
|
| 347 |
+
"pt": (AutoModelForVision2Seq,) if is_torch_available() else (),
|
| 348 |
+
"default": {
|
| 349 |
+
"model": {
|
| 350 |
+
"pt": ("ydshieh/vit-gpt2-coco-en", "65636df"),
|
| 351 |
+
"tf": ("ydshieh/vit-gpt2-coco-en", "65636df"),
|
| 352 |
+
}
|
| 353 |
+
},
|
| 354 |
+
"type": "multimodal",
|
| 355 |
+
},
|
| 356 |
+
"object-detection": {
|
| 357 |
+
"impl": ObjectDetectionPipeline,
|
| 358 |
+
"tf": (),
|
| 359 |
+
"pt": (AutoModelForObjectDetection,) if is_torch_available() else (),
|
| 360 |
+
"default": {"model": {"pt": ("facebook/detr-resnet-50", "2729413")}},
|
| 361 |
+
"type": "multimodal",
|
| 362 |
+
},
|
| 363 |
+
"zero-shot-object-detection": {
|
| 364 |
+
"impl": ZeroShotObjectDetectionPipeline,
|
| 365 |
+
"tf": (),
|
| 366 |
+
"pt": (AutoModelForZeroShotObjectDetection,) if is_torch_available() else (),
|
| 367 |
+
"default": {"model": {"pt": ("google/owlvit-base-patch32", "17740e1")}},
|
| 368 |
+
"type": "multimodal",
|
| 369 |
+
},
|
| 370 |
+
"depth-estimation": {
|
| 371 |
+
"impl": DepthEstimationPipeline,
|
| 372 |
+
"tf": (),
|
| 373 |
+
"pt": (AutoModelForDepthEstimation,) if is_torch_available() else (),
|
| 374 |
+
"default": {"model": {"pt": ("Intel/dpt-large", "e93beec")}},
|
| 375 |
+
"type": "image",
|
| 376 |
+
},
|
| 377 |
+
"video-classification": {
|
| 378 |
+
"impl": VideoClassificationPipeline,
|
| 379 |
+
"tf": (),
|
| 380 |
+
"pt": (AutoModelForVideoClassification,) if is_torch_available() else (),
|
| 381 |
+
"default": {"model": {"pt": ("MCG-NJU/videomae-base-finetuned-kinetics", "4800870")}},
|
| 382 |
+
"type": "video",
|
| 383 |
+
},
|
| 384 |
+
}
|
| 385 |
+
|
| 386 |
+
NO_FEATURE_EXTRACTOR_TASKS = set()
|
| 387 |
+
NO_IMAGE_PROCESSOR_TASKS = set()
|
| 388 |
+
NO_TOKENIZER_TASKS = set()
|
| 389 |
+
# Those model configs are special, they are generic over their task, meaning
|
| 390 |
+
# any tokenizer/feature_extractor might be use for a given model so we cannot
|
| 391 |
+
# use the statically defined TOKENIZER_MAPPING and FEATURE_EXTRACTOR_MAPPING to
|
| 392 |
+
# see if the model defines such objects or not.
|
| 393 |
+
MULTI_MODEL_CONFIGS = {"SpeechEncoderDecoderConfig", "VisionEncoderDecoderConfig", "VisionTextDualEncoderConfig"}
|
| 394 |
+
for task, values in SUPPORTED_TASKS.items():
|
| 395 |
+
if values["type"] == "text":
|
| 396 |
+
NO_FEATURE_EXTRACTOR_TASKS.add(task)
|
| 397 |
+
NO_IMAGE_PROCESSOR_TASKS.add(task)
|
| 398 |
+
elif values["type"] in {"image", "video"}:
|
| 399 |
+
NO_TOKENIZER_TASKS.add(task)
|
| 400 |
+
elif values["type"] in {"audio"}:
|
| 401 |
+
NO_TOKENIZER_TASKS.add(task)
|
| 402 |
+
NO_IMAGE_PROCESSOR_TASKS.add(task)
|
| 403 |
+
elif values["type"] != "multimodal":
|
| 404 |
+
raise ValueError(f"SUPPORTED_TASK {task} contains invalid type {values['type']}")
|
| 405 |
+
|
| 406 |
+
PIPELINE_REGISTRY = PipelineRegistry(supported_tasks=SUPPORTED_TASKS, task_aliases=TASK_ALIASES)
|
| 407 |
+
|
| 408 |
+
|
| 409 |
+
def get_supported_tasks() -> List[str]:
|
| 410 |
+
"""
|
| 411 |
+
Returns a list of supported task strings.
|
| 412 |
+
"""
|
| 413 |
+
return PIPELINE_REGISTRY.get_supported_tasks()
|
| 414 |
+
|
| 415 |
+
|
| 416 |
+
def get_task(model: str, use_auth_token: Optional[str] = None) -> str:
|
| 417 |
+
if is_offline_mode():
|
| 418 |
+
raise RuntimeError("You cannot infer task automatically within `pipeline` when using offline mode")
|
| 419 |
+
try:
|
| 420 |
+
info = model_info(model, token=use_auth_token)
|
| 421 |
+
except Exception as e:
|
| 422 |
+
raise RuntimeError(f"Instantiating a pipeline without a task set raised an error: {e}")
|
| 423 |
+
if not info.pipeline_tag:
|
| 424 |
+
raise RuntimeError(
|
| 425 |
+
f"The model {model} does not seem to have a correct `pipeline_tag` set to infer the task automatically"
|
| 426 |
+
)
|
| 427 |
+
if getattr(info, "library_name", "transformers") != "transformers":
|
| 428 |
+
raise RuntimeError(f"This model is meant to be used with {info.library_name} not with transformers")
|
| 429 |
+
task = info.pipeline_tag
|
| 430 |
+
return task
|
| 431 |
+
|
| 432 |
+
|
| 433 |
+
def check_task(task: str) -> Tuple[str, Dict, Any]:
|
| 434 |
+
"""
|
| 435 |
+
Checks an incoming task string, to validate it's correct and return the default Pipeline and Model classes, and
|
| 436 |
+
default models if they exist.
|
| 437 |
+
|
| 438 |
+
Args:
|
| 439 |
+
task (`str`):
|
| 440 |
+
The task defining which pipeline will be returned. Currently accepted tasks are:
|
| 441 |
+
|
| 442 |
+
- `"audio-classification"`
|
| 443 |
+
- `"automatic-speech-recognition"`
|
| 444 |
+
- `"conversational"`
|
| 445 |
+
- `"depth-estimation"`
|
| 446 |
+
- `"document-question-answering"`
|
| 447 |
+
- `"feature-extraction"`
|
| 448 |
+
- `"fill-mask"`
|
| 449 |
+
- `"image-classification"`
|
| 450 |
+
- `"image-segmentation"`
|
| 451 |
+
- `"image-to-text"`
|
| 452 |
+
- `"object-detection"`
|
| 453 |
+
- `"question-answering"`
|
| 454 |
+
- `"summarization"`
|
| 455 |
+
- `"table-question-answering"`
|
| 456 |
+
- `"text2text-generation"`
|
| 457 |
+
- `"text-classification"` (alias `"sentiment-analysis"` available)
|
| 458 |
+
- `"text-generation"`
|
| 459 |
+
- `"token-classification"` (alias `"ner"` available)
|
| 460 |
+
- `"translation"`
|
| 461 |
+
- `"translation_xx_to_yy"`
|
| 462 |
+
- `"video-classification"`
|
| 463 |
+
- `"visual-question-answering"`
|
| 464 |
+
- `"zero-shot-classification"`
|
| 465 |
+
- `"zero-shot-image-classification"`
|
| 466 |
+
- `"zero-shot-object-detection"`
|
| 467 |
+
|
| 468 |
+
Returns:
|
| 469 |
+
(normalized_task: `str`, task_defaults: `dict`, task_options: (`tuple`, None)) The normalized task name
|
| 470 |
+
(removed alias and options). The actual dictionary required to initialize the pipeline and some extra task
|
| 471 |
+
options for parametrized tasks like "translation_XX_to_YY"
|
| 472 |
+
|
| 473 |
+
|
| 474 |
+
"""
|
| 475 |
+
return PIPELINE_REGISTRY.check_task(task)
|
| 476 |
+
|
| 477 |
+
|
| 478 |
+
def clean_custom_task(task_info):
|
| 479 |
+
import transformers
|
| 480 |
+
|
| 481 |
+
if "impl" not in task_info:
|
| 482 |
+
raise RuntimeError("This model introduces a custom pipeline without specifying its implementation.")
|
| 483 |
+
pt_class_names = task_info.get("pt", ())
|
| 484 |
+
if isinstance(pt_class_names, str):
|
| 485 |
+
pt_class_names = [pt_class_names]
|
| 486 |
+
task_info["pt"] = tuple(getattr(transformers, c) for c in pt_class_names)
|
| 487 |
+
tf_class_names = task_info.get("tf", ())
|
| 488 |
+
if isinstance(tf_class_names, str):
|
| 489 |
+
tf_class_names = [tf_class_names]
|
| 490 |
+
task_info["tf"] = tuple(getattr(transformers, c) for c in tf_class_names)
|
| 491 |
+
return task_info, None
|
| 492 |
+
|
| 493 |
+
|
| 494 |
+
def pipeline(
|
| 495 |
+
task: str = None,
|
| 496 |
+
model: Optional = None,
|
| 497 |
+
config: Optional[Union[str, PretrainedConfig]] = None,
|
| 498 |
+
tokenizer: Optional[Union[str, PreTrainedTokenizer, PreTrainedTokenizerFast]] = None,
|
| 499 |
+
feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None,
|
| 500 |
+
image_processor: Optional[Union[str, BaseImageProcessor]] = None,
|
| 501 |
+
framework: Optional[str] = None,
|
| 502 |
+
revision: Optional[str] = None,
|
| 503 |
+
use_fast: bool = True,
|
| 504 |
+
use_auth_token: Optional[Union[str, bool]] = None,
|
| 505 |
+
device: Optional[Union[int, str, "torch.device"]] = None,
|
| 506 |
+
device_map=None,
|
| 507 |
+
torch_dtype=None,
|
| 508 |
+
trust_remote_code: Optional[bool] = None,
|
| 509 |
+
model_kwargs: Dict[str, Any] = None,
|
| 510 |
+
pipeline_class: Optional[Any] = None,
|
| 511 |
+
**kwargs,
|
| 512 |
+
) -> Pipeline:
|
| 513 |
+
"""
|
| 514 |
+
Utility factory method to build a [`Pipeline`].
|
| 515 |
+
|
| 516 |
+
Pipelines are made of:
|
| 517 |
+
|
| 518 |
+
- A [tokenizer](tokenizer) in charge of mapping raw textual input to token.
|
| 519 |
+
- A [model](model) to make predictions from the inputs.
|
| 520 |
+
- Some (optional) post processing for enhancing model's output.
|
| 521 |
+
|
| 522 |
+
Args:
|
| 523 |
+
task (`str`):
|
| 524 |
+
The task defining which pipeline will be returned. Currently accepted tasks are:
|
| 525 |
+
|
| 526 |
+
- `"audio-classification"`: will return a [`AudioClassificationPipeline`].
|
| 527 |
+
- `"automatic-speech-recognition"`: will return a [`AutomaticSpeechRecognitionPipeline`].
|
| 528 |
+
- `"conversational"`: will return a [`ConversationalPipeline`].
|
| 529 |
+
- `"depth-estimation"`: will return a [`DepthEstimationPipeline`].
|
| 530 |
+
- `"document-question-answering"`: will return a [`DocumentQuestionAnsweringPipeline`].
|
| 531 |
+
- `"feature-extraction"`: will return a [`FeatureExtractionPipeline`].
|
| 532 |
+
- `"fill-mask"`: will return a [`FillMaskPipeline`]:.
|
| 533 |
+
- `"image-classification"`: will return a [`ImageClassificationPipeline`].
|
| 534 |
+
- `"image-segmentation"`: will return a [`ImageSegmentationPipeline`].
|
| 535 |
+
- `"image-to-text"`: will return a [`ImageToTextPipeline`].
|
| 536 |
+
- `"object-detection"`: will return a [`ObjectDetectionPipeline`].
|
| 537 |
+
- `"question-answering"`: will return a [`QuestionAnsweringPipeline`].
|
| 538 |
+
- `"summarization"`: will return a [`SummarizationPipeline`].
|
| 539 |
+
- `"table-question-answering"`: will return a [`TableQuestionAnsweringPipeline`].
|
| 540 |
+
- `"text2text-generation"`: will return a [`Text2TextGenerationPipeline`].
|
| 541 |
+
- `"text-classification"` (alias `"sentiment-analysis"` available): will return a
|
| 542 |
+
[`TextClassificationPipeline`].
|
| 543 |
+
- `"text-generation"`: will return a [`TextGenerationPipeline`]:.
|
| 544 |
+
- `"token-classification"` (alias `"ner"` available): will return a [`TokenClassificationPipeline`].
|
| 545 |
+
- `"translation"`: will return a [`TranslationPipeline`].
|
| 546 |
+
- `"translation_xx_to_yy"`: will return a [`TranslationPipeline`].
|
| 547 |
+
- `"video-classification"`: will return a [`VideoClassificationPipeline`].
|
| 548 |
+
- `"visual-question-answering"`: will return a [`VisualQuestionAnsweringPipeline`].
|
| 549 |
+
- `"zero-shot-classification"`: will return a [`ZeroShotClassificationPipeline`].
|
| 550 |
+
- `"zero-shot-image-classification"`: will return a [`ZeroShotImageClassificationPipeline`].
|
| 551 |
+
- `"zero-shot-audio-classification"`: will return a [`ZeroShotAudioClassificationPipeline`].
|
| 552 |
+
- `"zero-shot-object-detection"`: will return a [`ZeroShotObjectDetectionPipeline`].
|
| 553 |
+
|
| 554 |
+
model (`str` or [`PreTrainedModel`] or [`TFPreTrainedModel`], *optional*):
|
| 555 |
+
The model that will be used by the pipeline to make predictions. This can be a model identifier or an
|
| 556 |
+
actual instance of a pretrained model inheriting from [`PreTrainedModel`] (for PyTorch) or
|
| 557 |
+
[`TFPreTrainedModel`] (for TensorFlow).
|
| 558 |
+
|
| 559 |
+
If not provided, the default for the `task` will be loaded.
|
| 560 |
+
config (`str` or [`PretrainedConfig`], *optional*):
|
| 561 |
+
The configuration that will be used by the pipeline to instantiate the model. This can be a model
|
| 562 |
+
identifier or an actual pretrained model configuration inheriting from [`PretrainedConfig`].
|
| 563 |
+
|
| 564 |
+
If not provided, the default configuration file for the requested model will be used. That means that if
|
| 565 |
+
`model` is given, its default configuration will be used. However, if `model` is not supplied, this
|
| 566 |
+
`task`'s default model's config is used instead.
|
| 567 |
+
tokenizer (`str` or [`PreTrainedTokenizer`], *optional*):
|
| 568 |
+
The tokenizer that will be used by the pipeline to encode data for the model. This can be a model
|
| 569 |
+
identifier or an actual pretrained tokenizer inheriting from [`PreTrainedTokenizer`].
|
| 570 |
+
|
| 571 |
+
If not provided, the default tokenizer for the given `model` will be loaded (if it is a string). If `model`
|
| 572 |
+
is not specified or not a string, then the default tokenizer for `config` is loaded (if it is a string).
|
| 573 |
+
However, if `config` is also not given or not a string, then the default tokenizer for the given `task`
|
| 574 |
+
will be loaded.
|
| 575 |
+
feature_extractor (`str` or [`PreTrainedFeatureExtractor`], *optional*):
|
| 576 |
+
The feature extractor that will be used by the pipeline to encode data for the model. This can be a model
|
| 577 |
+
identifier or an actual pretrained feature extractor inheriting from [`PreTrainedFeatureExtractor`].
|
| 578 |
+
|
| 579 |
+
Feature extractors are used for non-NLP models, such as Speech or Vision models as well as multi-modal
|
| 580 |
+
models. Multi-modal models will also require a tokenizer to be passed.
|
| 581 |
+
|
| 582 |
+
If not provided, the default feature extractor for the given `model` will be loaded (if it is a string). If
|
| 583 |
+
`model` is not specified or not a string, then the default feature extractor for `config` is loaded (if it
|
| 584 |
+
is a string). However, if `config` is also not given or not a string, then the default feature extractor
|
| 585 |
+
for the given `task` will be loaded.
|
| 586 |
+
framework (`str`, *optional*):
|
| 587 |
+
The framework to use, either `"pt"` for PyTorch or `"tf"` for TensorFlow. The specified framework must be
|
| 588 |
+
installed.
|
| 589 |
+
|
| 590 |
+
If no framework is specified, will default to the one currently installed. If no framework is specified and
|
| 591 |
+
both frameworks are installed, will default to the framework of the `model`, or to PyTorch if no model is
|
| 592 |
+
provided.
|
| 593 |
+
revision (`str`, *optional*, defaults to `"main"`):
|
| 594 |
+
When passing a task name or a string model identifier: The specific model version to use. It can be a
|
| 595 |
+
branch name, a tag name, or a commit id, since we use a git-based system for storing models and other
|
| 596 |
+
artifacts on huggingface.co, so `revision` can be any identifier allowed by git.
|
| 597 |
+
use_fast (`bool`, *optional*, defaults to `True`):
|
| 598 |
+
Whether or not to use a Fast tokenizer if possible (a [`PreTrainedTokenizerFast`]).
|
| 599 |
+
use_auth_token (`str` or *bool*, *optional*):
|
| 600 |
+
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
|
| 601 |
+
when running `huggingface-cli login` (stored in `~/.huggingface`).
|
| 602 |
+
device (`int` or `str` or `torch.device`):
|
| 603 |
+
Defines the device (*e.g.*, `"cpu"`, `"cuda:1"`, `"mps"`, or a GPU ordinal rank like `1`) on which this
|
| 604 |
+
pipeline will be allocated.
|
| 605 |
+
device_map (`str` or `Dict[str, Union[int, str, torch.device]`, *optional*):
|
| 606 |
+
Sent directly as `model_kwargs` (just a simpler shortcut). When `accelerate` library is present, set
|
| 607 |
+
`device_map="auto"` to compute the most optimized `device_map` automatically (see
|
| 608 |
+
[here](https://huggingface.co/docs/accelerate/main/en/package_reference/big_modeling#accelerate.cpu_offload)
|
| 609 |
+
for more information).
|
| 610 |
+
|
| 611 |
+
<Tip warning={true}>
|
| 612 |
+
|
| 613 |
+
Do not use `device_map` AND `device` at the same time as they will conflict
|
| 614 |
+
|
| 615 |
+
</Tip>
|
| 616 |
+
|
| 617 |
+
torch_dtype (`str` or `torch.dtype`, *optional*):
|
| 618 |
+
Sent directly as `model_kwargs` (just a simpler shortcut) to use the available precision for this model
|
| 619 |
+
(`torch.float16`, `torch.bfloat16`, ... or `"auto"`).
|
| 620 |
+
trust_remote_code (`bool`, *optional*, defaults to `False`):
|
| 621 |
+
Whether or not to allow for custom code defined on the Hub in their own modeling, configuration,
|
| 622 |
+
tokenization or even pipeline files. This option should only be set to `True` for repositories you trust
|
| 623 |
+
and in which you have read the code, as it will execute code present on the Hub on your local machine.
|
| 624 |
+
model_kwargs:
|
| 625 |
+
Additional dictionary of keyword arguments passed along to the model's `from_pretrained(...,
|
| 626 |
+
**model_kwargs)` function.
|
| 627 |
+
kwargs:
|
| 628 |
+
Additional keyword arguments passed along to the specific pipeline init (see the documentation for the
|
| 629 |
+
corresponding pipeline class for possible values).
|
| 630 |
+
|
| 631 |
+
Returns:
|
| 632 |
+
[`Pipeline`]: A suitable pipeline for the task.
|
| 633 |
+
|
| 634 |
+
Examples:
|
| 635 |
+
|
| 636 |
+
```python
|
| 637 |
+
>>> from transformers import pipeline, AutoModelForTokenClassification, AutoTokenizer
|
| 638 |
+
|
| 639 |
+
>>> # Sentiment analysis pipeline
|
| 640 |
+
>>> analyzer = pipeline("sentiment-analysis")
|
| 641 |
+
|
| 642 |
+
>>> # Question answering pipeline, specifying the checkpoint identifier
|
| 643 |
+
>>> oracle = pipeline(
|
| 644 |
+
... "question-answering", model="distilbert-base-cased-distilled-squad", tokenizer="bert-base-cased"
|
| 645 |
+
... )
|
| 646 |
+
|
| 647 |
+
>>> # Named entity recognition pipeline, passing in a specific model and tokenizer
|
| 648 |
+
>>> model = AutoModelForTokenClassification.from_pretrained("dbmdz/bert-large-cased-finetuned-conll03-english")
|
| 649 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
|
| 650 |
+
>>> recognizer = pipeline("ner", model=model, tokenizer=tokenizer)
|
| 651 |
+
```"""
|
| 652 |
+
if model_kwargs is None:
|
| 653 |
+
model_kwargs = {}
|
| 654 |
+
# Make sure we only pass use_auth_token once as a kwarg (it used to be possible to pass it in model_kwargs,
|
| 655 |
+
# this is to keep BC).
|
| 656 |
+
use_auth_token = model_kwargs.pop("use_auth_token", use_auth_token)
|
| 657 |
+
hub_kwargs = {
|
| 658 |
+
"revision": revision,
|
| 659 |
+
"use_auth_token": use_auth_token,
|
| 660 |
+
"trust_remote_code": trust_remote_code,
|
| 661 |
+
"_commit_hash": None,
|
| 662 |
+
}
|
| 663 |
+
|
| 664 |
+
if task is None and model is None:
|
| 665 |
+
raise RuntimeError(
|
| 666 |
+
"Impossible to instantiate a pipeline without either a task or a model "
|
| 667 |
+
"being specified. "
|
| 668 |
+
"Please provide a task class or a model"
|
| 669 |
+
)
|
| 670 |
+
|
| 671 |
+
if model is None and tokenizer is not None:
|
| 672 |
+
raise RuntimeError(
|
| 673 |
+
"Impossible to instantiate a pipeline with tokenizer specified but not the model as the provided tokenizer"
|
| 674 |
+
" may not be compatible with the default model. Please provide a PreTrainedModel class or a"
|
| 675 |
+
" path/identifier to a pretrained model when providing tokenizer."
|
| 676 |
+
)
|
| 677 |
+
if model is None and feature_extractor is not None:
|
| 678 |
+
raise RuntimeError(
|
| 679 |
+
"Impossible to instantiate a pipeline with feature_extractor specified but not the model as the provided"
|
| 680 |
+
" feature_extractor may not be compatible with the default model. Please provide a PreTrainedModel class"
|
| 681 |
+
" or a path/identifier to a pretrained model when providing feature_extractor."
|
| 682 |
+
)
|
| 683 |
+
if isinstance(model, Path):
|
| 684 |
+
model = str(model)
|
| 685 |
+
|
| 686 |
+
# Config is the primordial information item.
|
| 687 |
+
# Instantiate config if needed
|
| 688 |
+
if isinstance(config, str):
|
| 689 |
+
config = AutoConfig.from_pretrained(config, _from_pipeline=task, **hub_kwargs, **model_kwargs)
|
| 690 |
+
hub_kwargs["_commit_hash"] = config._commit_hash
|
| 691 |
+
elif config is None and isinstance(model, str):
|
| 692 |
+
config = AutoConfig.from_pretrained(model, _from_pipeline=task, **hub_kwargs, **model_kwargs)
|
| 693 |
+
hub_kwargs["_commit_hash"] = config._commit_hash
|
| 694 |
+
|
| 695 |
+
custom_tasks = {}
|
| 696 |
+
if config is not None and len(getattr(config, "custom_pipelines", {})) > 0:
|
| 697 |
+
custom_tasks = config.custom_pipelines
|
| 698 |
+
if task is None and trust_remote_code is not False:
|
| 699 |
+
if len(custom_tasks) == 1:
|
| 700 |
+
task = list(custom_tasks.keys())[0]
|
| 701 |
+
else:
|
| 702 |
+
raise RuntimeError(
|
| 703 |
+
"We can't infer the task automatically for this model as there are multiple tasks available. Pick "
|
| 704 |
+
f"one in {', '.join(custom_tasks.keys())}"
|
| 705 |
+
)
|
| 706 |
+
|
| 707 |
+
if task is None and model is not None:
|
| 708 |
+
if not isinstance(model, str):
|
| 709 |
+
raise RuntimeError(
|
| 710 |
+
"Inferring the task automatically requires to check the hub with a model_id defined as a `str`."
|
| 711 |
+
f"{model} is not a valid model_id."
|
| 712 |
+
)
|
| 713 |
+
task = get_task(model, use_auth_token)
|
| 714 |
+
|
| 715 |
+
# Retrieve the task
|
| 716 |
+
if task in custom_tasks:
|
| 717 |
+
normalized_task = task
|
| 718 |
+
targeted_task, task_options = clean_custom_task(custom_tasks[task])
|
| 719 |
+
if pipeline_class is None:
|
| 720 |
+
if not trust_remote_code:
|
| 721 |
+
raise ValueError(
|
| 722 |
+
"Loading this pipeline requires you to execute the code in the pipeline file in that"
|
| 723 |
+
" repo on your local machine. Make sure you have read the code there to avoid malicious use, then"
|
| 724 |
+
" set the option `trust_remote_code=True` to remove this error."
|
| 725 |
+
)
|
| 726 |
+
class_ref = targeted_task["impl"]
|
| 727 |
+
module_file, class_name = class_ref.split(".")
|
| 728 |
+
pipeline_class = get_class_from_dynamic_module(
|
| 729 |
+
model, module_file + ".py", class_name, revision=revision, use_auth_token=use_auth_token
|
| 730 |
+
)
|
| 731 |
+
else:
|
| 732 |
+
normalized_task, targeted_task, task_options = check_task(task)
|
| 733 |
+
if pipeline_class is None:
|
| 734 |
+
pipeline_class = targeted_task["impl"]
|
| 735 |
+
|
| 736 |
+
# Use default model/config/tokenizer for the task if no model is provided
|
| 737 |
+
if model is None:
|
| 738 |
+
# At that point framework might still be undetermined
|
| 739 |
+
model, default_revision = get_default_model_and_revision(targeted_task, framework, task_options)
|
| 740 |
+
revision = revision if revision is not None else default_revision
|
| 741 |
+
logger.warning(
|
| 742 |
+
f"No model was supplied, defaulted to {model} and revision"
|
| 743 |
+
f" {revision} ({HUGGINGFACE_CO_RESOLVE_ENDPOINT}/{model}).\n"
|
| 744 |
+
"Using a pipeline without specifying a model name and revision in production is not recommended."
|
| 745 |
+
)
|
| 746 |
+
if config is None and isinstance(model, str):
|
| 747 |
+
config = AutoConfig.from_pretrained(model, _from_pipeline=task, **hub_kwargs, **model_kwargs)
|
| 748 |
+
hub_kwargs["_commit_hash"] = config._commit_hash
|
| 749 |
+
|
| 750 |
+
if device_map is not None:
|
| 751 |
+
if "device_map" in model_kwargs:
|
| 752 |
+
raise ValueError(
|
| 753 |
+
'You cannot use both `pipeline(... device_map=..., model_kwargs={"device_map":...})` as those'
|
| 754 |
+
" arguments might conflict, use only one.)"
|
| 755 |
+
)
|
| 756 |
+
if device is not None:
|
| 757 |
+
logger.warning(
|
| 758 |
+
"Both `device` and `device_map` are specified. `device` will override `device_map`. You"
|
| 759 |
+
" will most likely encounter unexpected behavior. Please remove `device` and keep `device_map`."
|
| 760 |
+
)
|
| 761 |
+
model_kwargs["device_map"] = device_map
|
| 762 |
+
if torch_dtype is not None:
|
| 763 |
+
if "torch_dtype" in model_kwargs:
|
| 764 |
+
raise ValueError(
|
| 765 |
+
'You cannot use both `pipeline(... torch_dtype=..., model_kwargs={"torch_dtype":...})` as those'
|
| 766 |
+
" arguments might conflict, use only one.)"
|
| 767 |
+
)
|
| 768 |
+
model_kwargs["torch_dtype"] = torch_dtype
|
| 769 |
+
|
| 770 |
+
model_name = model if isinstance(model, str) else None
|
| 771 |
+
|
| 772 |
+
# Infer the framework from the model
|
| 773 |
+
# Forced if framework already defined, inferred if it's None
|
| 774 |
+
# Will load the correct model if possible
|
| 775 |
+
model_classes = {"tf": targeted_task["tf"], "pt": targeted_task["pt"]}
|
| 776 |
+
framework, model = infer_framework_load_model(
|
| 777 |
+
model,
|
| 778 |
+
model_classes=model_classes,
|
| 779 |
+
config=config,
|
| 780 |
+
framework=framework,
|
| 781 |
+
task=task,
|
| 782 |
+
**hub_kwargs,
|
| 783 |
+
**model_kwargs,
|
| 784 |
+
)
|
| 785 |
+
|
| 786 |
+
model_config = model.config
|
| 787 |
+
hub_kwargs["_commit_hash"] = model.config._commit_hash
|
| 788 |
+
|
| 789 |
+
load_tokenizer = type(model_config) in TOKENIZER_MAPPING or model_config.tokenizer_class is not None
|
| 790 |
+
load_feature_extractor = type(model_config) in FEATURE_EXTRACTOR_MAPPING or feature_extractor is not None
|
| 791 |
+
load_image_processor = type(model_config) in IMAGE_PROCESSOR_MAPPING or image_processor is not None
|
| 792 |
+
|
| 793 |
+
# If `model` (instance of `PretrainedModel` instead of `str`) is passed (and/or same for config), while
|
| 794 |
+
# `image_processor` or `feature_extractor` is `None`, the loading will fail. This happens particularly for some
|
| 795 |
+
# vision tasks when calling `pipeline()` with `model` and only one of the `image_processor` and `feature_extractor`.
|
| 796 |
+
# TODO: we need to make `NO_IMAGE_PROCESSOR_TASKS` and `NO_FEATURE_EXTRACTOR_TASKS` more robust to avoid such issue.
|
| 797 |
+
# This block is only temporarily to make CI green.
|
| 798 |
+
if load_image_processor and load_feature_extractor:
|
| 799 |
+
load_feature_extractor = False
|
| 800 |
+
|
| 801 |
+
if (
|
| 802 |
+
tokenizer is None
|
| 803 |
+
and not load_tokenizer
|
| 804 |
+
and normalized_task not in NO_TOKENIZER_TASKS
|
| 805 |
+
# Using class name to avoid importing the real class.
|
| 806 |
+
and model_config.__class__.__name__ in MULTI_MODEL_CONFIGS
|
| 807 |
+
):
|
| 808 |
+
# This is a special category of models, that are fusions of multiple models
|
| 809 |
+
# so the model_config might not define a tokenizer, but it seems to be
|
| 810 |
+
# necessary for the task, so we're force-trying to load it.
|
| 811 |
+
load_tokenizer = True
|
| 812 |
+
if (
|
| 813 |
+
image_processor is None
|
| 814 |
+
and not load_image_processor
|
| 815 |
+
and normalized_task not in NO_IMAGE_PROCESSOR_TASKS
|
| 816 |
+
# Using class name to avoid importing the real class.
|
| 817 |
+
and model_config.__class__.__name__ in MULTI_MODEL_CONFIGS
|
| 818 |
+
and normalized_task != "automatic-speech-recognition"
|
| 819 |
+
):
|
| 820 |
+
# This is a special category of models, that are fusions of multiple models
|
| 821 |
+
# so the model_config might not define a tokenizer, but it seems to be
|
| 822 |
+
# necessary for the task, so we're force-trying to load it.
|
| 823 |
+
load_image_processor = True
|
| 824 |
+
if (
|
| 825 |
+
feature_extractor is None
|
| 826 |
+
and not load_feature_extractor
|
| 827 |
+
and normalized_task not in NO_FEATURE_EXTRACTOR_TASKS
|
| 828 |
+
# Using class name to avoid importing the real class.
|
| 829 |
+
and model_config.__class__.__name__ in MULTI_MODEL_CONFIGS
|
| 830 |
+
):
|
| 831 |
+
# This is a special category of models, that are fusions of multiple models
|
| 832 |
+
# so the model_config might not define a tokenizer, but it seems to be
|
| 833 |
+
# necessary for the task, so we're force-trying to load it.
|
| 834 |
+
load_feature_extractor = True
|
| 835 |
+
|
| 836 |
+
if task in NO_TOKENIZER_TASKS:
|
| 837 |
+
# These will never require a tokenizer.
|
| 838 |
+
# the model on the other hand might have a tokenizer, but
|
| 839 |
+
# the files could be missing from the hub, instead of failing
|
| 840 |
+
# on such repos, we just force to not load it.
|
| 841 |
+
load_tokenizer = False
|
| 842 |
+
|
| 843 |
+
if task in NO_FEATURE_EXTRACTOR_TASKS:
|
| 844 |
+
load_feature_extractor = False
|
| 845 |
+
if task in NO_IMAGE_PROCESSOR_TASKS:
|
| 846 |
+
load_image_processor = False
|
| 847 |
+
|
| 848 |
+
if load_tokenizer:
|
| 849 |
+
# Try to infer tokenizer from model or config name (if provided as str)
|
| 850 |
+
if tokenizer is None:
|
| 851 |
+
if isinstance(model_name, str):
|
| 852 |
+
tokenizer = model_name
|
| 853 |
+
elif isinstance(config, str):
|
| 854 |
+
tokenizer = config
|
| 855 |
+
else:
|
| 856 |
+
# Impossible to guess what is the right tokenizer here
|
| 857 |
+
raise Exception(
|
| 858 |
+
"Impossible to guess which tokenizer to use. "
|
| 859 |
+
"Please provide a PreTrainedTokenizer class or a path/identifier to a pretrained tokenizer."
|
| 860 |
+
)
|
| 861 |
+
|
| 862 |
+
# Instantiate tokenizer if needed
|
| 863 |
+
if isinstance(tokenizer, (str, tuple)):
|
| 864 |
+
if isinstance(tokenizer, tuple):
|
| 865 |
+
# For tuple we have (tokenizer name, {kwargs})
|
| 866 |
+
use_fast = tokenizer[1].pop("use_fast", use_fast)
|
| 867 |
+
tokenizer_identifier = tokenizer[0]
|
| 868 |
+
tokenizer_kwargs = tokenizer[1]
|
| 869 |
+
else:
|
| 870 |
+
tokenizer_identifier = tokenizer
|
| 871 |
+
tokenizer_kwargs = model_kwargs
|
| 872 |
+
|
| 873 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
| 874 |
+
tokenizer_identifier, use_fast=use_fast, _from_pipeline=task, **hub_kwargs, **tokenizer_kwargs
|
| 875 |
+
)
|
| 876 |
+
|
| 877 |
+
if load_image_processor:
|
| 878 |
+
# Try to infer image processor from model or config name (if provided as str)
|
| 879 |
+
if image_processor is None:
|
| 880 |
+
if isinstance(model_name, str):
|
| 881 |
+
image_processor = model_name
|
| 882 |
+
elif isinstance(config, str):
|
| 883 |
+
image_processor = config
|
| 884 |
+
# Backward compatibility, as `feature_extractor` used to be the name
|
| 885 |
+
# for `ImageProcessor`.
|
| 886 |
+
elif feature_extractor is not None and isinstance(feature_extractor, BaseImageProcessor):
|
| 887 |
+
image_processor = feature_extractor
|
| 888 |
+
else:
|
| 889 |
+
# Impossible to guess what is the right image_processor here
|
| 890 |
+
raise Exception(
|
| 891 |
+
"Impossible to guess which image processor to use. "
|
| 892 |
+
"Please provide a PreTrainedImageProcessor class or a path/identifier "
|
| 893 |
+
"to a pretrained image processor."
|
| 894 |
+
)
|
| 895 |
+
|
| 896 |
+
# Instantiate image_processor if needed
|
| 897 |
+
if isinstance(image_processor, (str, tuple)):
|
| 898 |
+
image_processor = AutoImageProcessor.from_pretrained(
|
| 899 |
+
image_processor, _from_pipeline=task, **hub_kwargs, **model_kwargs
|
| 900 |
+
)
|
| 901 |
+
|
| 902 |
+
if load_feature_extractor:
|
| 903 |
+
# Try to infer feature extractor from model or config name (if provided as str)
|
| 904 |
+
if feature_extractor is None:
|
| 905 |
+
if isinstance(model_name, str):
|
| 906 |
+
feature_extractor = model_name
|
| 907 |
+
elif isinstance(config, str):
|
| 908 |
+
feature_extractor = config
|
| 909 |
+
else:
|
| 910 |
+
# Impossible to guess what is the right feature_extractor here
|
| 911 |
+
raise Exception(
|
| 912 |
+
"Impossible to guess which feature extractor to use. "
|
| 913 |
+
"Please provide a PreTrainedFeatureExtractor class or a path/identifier "
|
| 914 |
+
"to a pretrained feature extractor."
|
| 915 |
+
)
|
| 916 |
+
|
| 917 |
+
# Instantiate feature_extractor if needed
|
| 918 |
+
if isinstance(feature_extractor, (str, tuple)):
|
| 919 |
+
feature_extractor = AutoFeatureExtractor.from_pretrained(
|
| 920 |
+
feature_extractor, _from_pipeline=task, **hub_kwargs, **model_kwargs
|
| 921 |
+
)
|
| 922 |
+
|
| 923 |
+
if (
|
| 924 |
+
feature_extractor._processor_class
|
| 925 |
+
and feature_extractor._processor_class.endswith("WithLM")
|
| 926 |
+
and isinstance(model_name, str)
|
| 927 |
+
):
|
| 928 |
+
try:
|
| 929 |
+
import kenlm # to trigger `ImportError` if not installed
|
| 930 |
+
from pyctcdecode import BeamSearchDecoderCTC
|
| 931 |
+
|
| 932 |
+
if os.path.isdir(model_name) or os.path.isfile(model_name):
|
| 933 |
+
decoder = BeamSearchDecoderCTC.load_from_dir(model_name)
|
| 934 |
+
else:
|
| 935 |
+
language_model_glob = os.path.join(
|
| 936 |
+
BeamSearchDecoderCTC._LANGUAGE_MODEL_SERIALIZED_DIRECTORY, "*"
|
| 937 |
+
)
|
| 938 |
+
alphabet_filename = BeamSearchDecoderCTC._ALPHABET_SERIALIZED_FILENAME
|
| 939 |
+
allow_patterns = [language_model_glob, alphabet_filename]
|
| 940 |
+
decoder = BeamSearchDecoderCTC.load_from_hf_hub(model_name, allow_patterns=allow_patterns)
|
| 941 |
+
|
| 942 |
+
kwargs["decoder"] = decoder
|
| 943 |
+
except ImportError as e:
|
| 944 |
+
logger.warning(f"Could not load the `decoder` for {model_name}. Defaulting to raw CTC. Error: {e}")
|
| 945 |
+
if not is_kenlm_available():
|
| 946 |
+
logger.warning("Try to install `kenlm`: `pip install kenlm")
|
| 947 |
+
|
| 948 |
+
if not is_pyctcdecode_available():
|
| 949 |
+
logger.warning("Try to install `pyctcdecode`: `pip install pyctcdecode")
|
| 950 |
+
|
| 951 |
+
if task == "translation" and model.config.task_specific_params:
|
| 952 |
+
for key in model.config.task_specific_params:
|
| 953 |
+
if key.startswith("translation"):
|
| 954 |
+
task = key
|
| 955 |
+
warnings.warn(
|
| 956 |
+
f'"translation" task was used, instead of "translation_XX_to_YY", defaulting to "{task}"',
|
| 957 |
+
UserWarning,
|
| 958 |
+
)
|
| 959 |
+
break
|
| 960 |
+
|
| 961 |
+
if tokenizer is not None:
|
| 962 |
+
kwargs["tokenizer"] = tokenizer
|
| 963 |
+
|
| 964 |
+
if feature_extractor is not None:
|
| 965 |
+
kwargs["feature_extractor"] = feature_extractor
|
| 966 |
+
|
| 967 |
+
if torch_dtype is not None:
|
| 968 |
+
kwargs["torch_dtype"] = torch_dtype
|
| 969 |
+
|
| 970 |
+
if image_processor is not None:
|
| 971 |
+
kwargs["image_processor"] = image_processor
|
| 972 |
+
|
| 973 |
+
if device is not None:
|
| 974 |
+
kwargs["device"] = device
|
| 975 |
+
|
| 976 |
+
return pipeline_class(model=model, framework=framework, task=task, **kwargs)
|
valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (29.5 kB). View file
|
|
|
valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/audio_classification.cpython-310.pyc
ADDED
|
Binary file (6.22 kB). View file
|
|
|
valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/audio_utils.cpython-310.pyc
ADDED
|
Binary file (6.25 kB). View file
|
|
|
valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/automatic_speech_recognition.cpython-310.pyc
ADDED
|
Binary file (20.7 kB). View file
|
|
|
valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/base.cpython-310.pyc
ADDED
|
Binary file (41.8 kB). View file
|
|
|
valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/conversational.cpython-310.pyc
ADDED
|
Binary file (11.8 kB). View file
|
|
|
valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/document_question_answering.cpython-310.pyc
ADDED
|
Binary file (16.4 kB). View file
|
|
|
valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/feature_extraction.cpython-310.pyc
ADDED
|
Binary file (4.89 kB). View file
|
|
|
valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/fill_mask.cpython-310.pyc
ADDED
|
Binary file (8.67 kB). View file
|
|
|
valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/image_classification.cpython-310.pyc
ADDED
|
Binary file (5.54 kB). View file
|
|
|
valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/image_segmentation.cpython-310.pyc
ADDED
|
Binary file (7.49 kB). View file
|
|
|
valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/image_to_text.cpython-310.pyc
ADDED
|
Binary file (4.55 kB). View file
|
|
|
valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/object_detection.cpython-310.pyc
ADDED
|
Binary file (7.52 kB). View file
|
|
|
valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/pt_utils.cpython-310.pyc
ADDED
|
Binary file (9.31 kB). View file
|
|
|
valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/question_answering.cpython-310.pyc
ADDED
|
Binary file (20.4 kB). View file
|
|
|
valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/table_question_answering.cpython-310.pyc
ADDED
|
Binary file (14.9 kB). View file
|
|
|
valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/text2text_generation.cpython-310.pyc
ADDED
|
Binary file (15.7 kB). View file
|
|
|
valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/text_classification.cpython-310.pyc
ADDED
|
Binary file (8.64 kB). View file
|
|
|
valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/text_generation.cpython-310.pyc
ADDED
|
Binary file (10.3 kB). View file
|
|
|
valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/token_classification.cpython-310.pyc
ADDED
|
Binary file (19.5 kB). View file
|
|
|
valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/video_classification.cpython-310.pyc
ADDED
|
Binary file (5.44 kB). View file
|
|
|
valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/zero_shot_classification.cpython-310.pyc
ADDED
|
Binary file (11.1 kB). View file
|
|
|
valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/zero_shot_image_classification.cpython-310.pyc
ADDED
|
Binary file (6.45 kB). View file
|
|
|
valley/lib/python3.10/site-packages/transformers/pipelines/__pycache__/zero_shot_object_detection.cpython-310.pyc
ADDED
|
Binary file (8.92 kB). View file
|
|
|
valley/lib/python3.10/site-packages/transformers/pipelines/fill_mask.py
ADDED
|
@@ -0,0 +1,242 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
|
| 6 |
+
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
if is_tf_available():
|
| 10 |
+
import tensorflow as tf
|
| 11 |
+
|
| 12 |
+
from ..tf_utils import stable_softmax
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
if is_torch_available():
|
| 16 |
+
import torch
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
logger = logging.get_logger(__name__)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
@add_end_docstrings(
|
| 23 |
+
PIPELINE_INIT_ARGS,
|
| 24 |
+
r"""
|
| 25 |
+
top_k (`int`, defaults to 5):
|
| 26 |
+
The number of predictions to return.
|
| 27 |
+
targets (`str` or `List[str]`, *optional*):
|
| 28 |
+
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
|
| 29 |
+
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
|
| 30 |
+
token will be used (with a warning, and that might be slower).
|
| 31 |
+
|
| 32 |
+
""",
|
| 33 |
+
)
|
| 34 |
+
class FillMaskPipeline(Pipeline):
|
| 35 |
+
"""
|
| 36 |
+
Masked language modeling prediction pipeline using any `ModelWithLMHead`. See the [masked language modeling
|
| 37 |
+
examples](../task_summary#masked-language-modeling) for more information.
|
| 38 |
+
|
| 39 |
+
Example:
|
| 40 |
+
|
| 41 |
+
```python
|
| 42 |
+
>>> from transformers import pipeline
|
| 43 |
+
|
| 44 |
+
>>> fill_masker = pipeline(model="bert-base-uncased")
|
| 45 |
+
>>> fill_masker("This is a simple [MASK].")
|
| 46 |
+
[{'score': 0.042, 'token': 3291, 'token_str': 'problem', 'sequence': 'this is a simple problem.'}, {'score': 0.031, 'token': 3160, 'token_str': 'question', 'sequence': 'this is a simple question.'}, {'score': 0.03, 'token': 8522, 'token_str': 'equation', 'sequence': 'this is a simple equation.'}, {'score': 0.027, 'token': 2028, 'token_str': 'one', 'sequence': 'this is a simple one.'}, {'score': 0.024, 'token': 3627, 'token_str': 'rule', 'sequence': 'this is a simple rule.'}]
|
| 47 |
+
```
|
| 48 |
+
|
| 49 |
+
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
|
| 50 |
+
|
| 51 |
+
This mask filling pipeline can currently be loaded from [`pipeline`] using the following task identifier:
|
| 52 |
+
`"fill-mask"`.
|
| 53 |
+
|
| 54 |
+
The models that this pipeline can use are models that have been trained with a masked language modeling objective,
|
| 55 |
+
which includes the bi-directional models in the library. See the up-to-date list of available models on
|
| 56 |
+
[huggingface.co/models](https://huggingface.co/models?filter=fill-mask).
|
| 57 |
+
|
| 58 |
+
<Tip>
|
| 59 |
+
|
| 60 |
+
This pipeline only works for inputs with exactly one token masked. Experimental: We added support for multiple
|
| 61 |
+
masks. The returned values are raw model output, and correspond to disjoint probabilities where one might expect
|
| 62 |
+
joint probabilities (See [discussion](https://github.com/huggingface/transformers/pull/10222)).
|
| 63 |
+
|
| 64 |
+
</Tip>"""
|
| 65 |
+
|
| 66 |
+
def get_masked_index(self, input_ids: GenericTensor) -> np.ndarray:
|
| 67 |
+
if self.framework == "tf":
|
| 68 |
+
masked_index = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()
|
| 69 |
+
elif self.framework == "pt":
|
| 70 |
+
masked_index = torch.nonzero(input_ids == self.tokenizer.mask_token_id, as_tuple=False)
|
| 71 |
+
else:
|
| 72 |
+
raise ValueError("Unsupported framework")
|
| 73 |
+
return masked_index
|
| 74 |
+
|
| 75 |
+
def _ensure_exactly_one_mask_token(self, input_ids: GenericTensor) -> np.ndarray:
|
| 76 |
+
masked_index = self.get_masked_index(input_ids)
|
| 77 |
+
numel = np.prod(masked_index.shape)
|
| 78 |
+
if numel < 1:
|
| 79 |
+
raise PipelineException(
|
| 80 |
+
"fill-mask",
|
| 81 |
+
self.model.base_model_prefix,
|
| 82 |
+
f"No mask_token ({self.tokenizer.mask_token}) found on the input",
|
| 83 |
+
)
|
| 84 |
+
|
| 85 |
+
def ensure_exactly_one_mask_token(self, model_inputs: GenericTensor):
|
| 86 |
+
if isinstance(model_inputs, list):
|
| 87 |
+
for model_input in model_inputs:
|
| 88 |
+
self._ensure_exactly_one_mask_token(model_input["input_ids"][0])
|
| 89 |
+
else:
|
| 90 |
+
for input_ids in model_inputs["input_ids"]:
|
| 91 |
+
self._ensure_exactly_one_mask_token(input_ids)
|
| 92 |
+
|
| 93 |
+
def preprocess(self, inputs, return_tensors=None, **preprocess_parameters) -> Dict[str, GenericTensor]:
|
| 94 |
+
if return_tensors is None:
|
| 95 |
+
return_tensors = self.framework
|
| 96 |
+
model_inputs = self.tokenizer(inputs, return_tensors=return_tensors)
|
| 97 |
+
self.ensure_exactly_one_mask_token(model_inputs)
|
| 98 |
+
return model_inputs
|
| 99 |
+
|
| 100 |
+
def _forward(self, model_inputs):
|
| 101 |
+
model_outputs = self.model(**model_inputs)
|
| 102 |
+
model_outputs["input_ids"] = model_inputs["input_ids"]
|
| 103 |
+
return model_outputs
|
| 104 |
+
|
| 105 |
+
def postprocess(self, model_outputs, top_k=5, target_ids=None):
|
| 106 |
+
# Cap top_k if there are targets
|
| 107 |
+
if target_ids is not None and target_ids.shape[0] < top_k:
|
| 108 |
+
top_k = target_ids.shape[0]
|
| 109 |
+
input_ids = model_outputs["input_ids"][0]
|
| 110 |
+
outputs = model_outputs["logits"]
|
| 111 |
+
|
| 112 |
+
if self.framework == "tf":
|
| 113 |
+
masked_index = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()[:, 0]
|
| 114 |
+
|
| 115 |
+
outputs = outputs.numpy()
|
| 116 |
+
|
| 117 |
+
logits = outputs[0, masked_index, :]
|
| 118 |
+
probs = stable_softmax(logits, axis=-1)
|
| 119 |
+
if target_ids is not None:
|
| 120 |
+
probs = tf.gather_nd(tf.squeeze(probs, 0), target_ids.reshape(-1, 1))
|
| 121 |
+
probs = tf.expand_dims(probs, 0)
|
| 122 |
+
|
| 123 |
+
topk = tf.math.top_k(probs, k=top_k)
|
| 124 |
+
values, predictions = topk.values.numpy(), topk.indices.numpy()
|
| 125 |
+
else:
|
| 126 |
+
masked_index = torch.nonzero(input_ids == self.tokenizer.mask_token_id, as_tuple=False).squeeze(-1)
|
| 127 |
+
# Fill mask pipeline supports only one ${mask_token} per sample
|
| 128 |
+
|
| 129 |
+
logits = outputs[0, masked_index, :]
|
| 130 |
+
probs = logits.softmax(dim=-1)
|
| 131 |
+
if target_ids is not None:
|
| 132 |
+
probs = probs[..., target_ids]
|
| 133 |
+
|
| 134 |
+
values, predictions = probs.topk(top_k)
|
| 135 |
+
|
| 136 |
+
result = []
|
| 137 |
+
single_mask = values.shape[0] == 1
|
| 138 |
+
for i, (_values, _predictions) in enumerate(zip(values.tolist(), predictions.tolist())):
|
| 139 |
+
row = []
|
| 140 |
+
for v, p in zip(_values, _predictions):
|
| 141 |
+
# Copy is important since we're going to modify this array in place
|
| 142 |
+
tokens = input_ids.numpy().copy()
|
| 143 |
+
if target_ids is not None:
|
| 144 |
+
p = target_ids[p].tolist()
|
| 145 |
+
|
| 146 |
+
tokens[masked_index[i]] = p
|
| 147 |
+
# Filter padding out:
|
| 148 |
+
tokens = tokens[np.where(tokens != self.tokenizer.pad_token_id)]
|
| 149 |
+
# Originally we skip special tokens to give readable output.
|
| 150 |
+
# For multi masks though, the other [MASK] would be removed otherwise
|
| 151 |
+
# making the output look odd, so we add them back
|
| 152 |
+
sequence = self.tokenizer.decode(tokens, skip_special_tokens=single_mask)
|
| 153 |
+
proposition = {"score": v, "token": p, "token_str": self.tokenizer.decode([p]), "sequence": sequence}
|
| 154 |
+
row.append(proposition)
|
| 155 |
+
result.append(row)
|
| 156 |
+
if single_mask:
|
| 157 |
+
return result[0]
|
| 158 |
+
return result
|
| 159 |
+
|
| 160 |
+
def get_target_ids(self, targets, top_k=None):
|
| 161 |
+
if isinstance(targets, str):
|
| 162 |
+
targets = [targets]
|
| 163 |
+
try:
|
| 164 |
+
vocab = self.tokenizer.get_vocab()
|
| 165 |
+
except Exception:
|
| 166 |
+
vocab = {}
|
| 167 |
+
target_ids = []
|
| 168 |
+
for target in targets:
|
| 169 |
+
id_ = vocab.get(target, None)
|
| 170 |
+
if id_ is None:
|
| 171 |
+
input_ids = self.tokenizer(
|
| 172 |
+
target,
|
| 173 |
+
add_special_tokens=False,
|
| 174 |
+
return_attention_mask=False,
|
| 175 |
+
return_token_type_ids=False,
|
| 176 |
+
max_length=1,
|
| 177 |
+
truncation=True,
|
| 178 |
+
)["input_ids"]
|
| 179 |
+
if len(input_ids) == 0:
|
| 180 |
+
logger.warning(
|
| 181 |
+
f"The specified target token `{target}` does not exist in the model vocabulary. "
|
| 182 |
+
"We cannot replace it with anything meaningful, ignoring it"
|
| 183 |
+
)
|
| 184 |
+
continue
|
| 185 |
+
id_ = input_ids[0]
|
| 186 |
+
# XXX: If users encounter this pass
|
| 187 |
+
# it becomes pretty slow, so let's make sure
|
| 188 |
+
# The warning enables them to fix the input to
|
| 189 |
+
# get faster performance.
|
| 190 |
+
logger.warning(
|
| 191 |
+
f"The specified target token `{target}` does not exist in the model vocabulary. "
|
| 192 |
+
f"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_)}`."
|
| 193 |
+
)
|
| 194 |
+
target_ids.append(id_)
|
| 195 |
+
target_ids = list(set(target_ids))
|
| 196 |
+
if len(target_ids) == 0:
|
| 197 |
+
raise ValueError("At least one target must be provided when passed.")
|
| 198 |
+
target_ids = np.array(target_ids)
|
| 199 |
+
return target_ids
|
| 200 |
+
|
| 201 |
+
def _sanitize_parameters(self, top_k=None, targets=None):
|
| 202 |
+
postprocess_params = {}
|
| 203 |
+
|
| 204 |
+
if targets is not None:
|
| 205 |
+
target_ids = self.get_target_ids(targets, top_k)
|
| 206 |
+
postprocess_params["target_ids"] = target_ids
|
| 207 |
+
|
| 208 |
+
if top_k is not None:
|
| 209 |
+
postprocess_params["top_k"] = top_k
|
| 210 |
+
|
| 211 |
+
if self.tokenizer.mask_token_id is None:
|
| 212 |
+
raise PipelineException(
|
| 213 |
+
"fill-mask", self.model.base_model_prefix, "The tokenizer does not define a `mask_token`."
|
| 214 |
+
)
|
| 215 |
+
return {}, {}, postprocess_params
|
| 216 |
+
|
| 217 |
+
def __call__(self, inputs, *args, **kwargs):
|
| 218 |
+
"""
|
| 219 |
+
Fill the masked token in the text(s) given as inputs.
|
| 220 |
+
|
| 221 |
+
Args:
|
| 222 |
+
args (`str` or `List[str]`):
|
| 223 |
+
One or several texts (or one list of prompts) with masked tokens.
|
| 224 |
+
targets (`str` or `List[str]`, *optional*):
|
| 225 |
+
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
|
| 226 |
+
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first
|
| 227 |
+
resulting token will be used (with a warning, and that might be slower).
|
| 228 |
+
top_k (`int`, *optional*):
|
| 229 |
+
When passed, overrides the number of predictions to return.
|
| 230 |
+
|
| 231 |
+
Return:
|
| 232 |
+
A list or a list of list of `dict`: Each result comes as list of dictionaries with the following keys:
|
| 233 |
+
|
| 234 |
+
- **sequence** (`str`) -- The corresponding input with the mask token prediction.
|
| 235 |
+
- **score** (`float`) -- The corresponding probability.
|
| 236 |
+
- **token** (`int`) -- The predicted token id (to replace the masked one).
|
| 237 |
+
- **token_str** (`str`) -- The predicted token (to replace the masked one).
|
| 238 |
+
"""
|
| 239 |
+
outputs = super().__call__(inputs, **kwargs)
|
| 240 |
+
if isinstance(inputs, list) and len(inputs) == 1:
|
| 241 |
+
return outputs[0]
|
| 242 |
+
return outputs
|
valley/lib/python3.10/site-packages/transformers/pipelines/object_detection.py
ADDED
|
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Dict, List, Union
|
| 2 |
+
|
| 3 |
+
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
|
| 4 |
+
from .base import PIPELINE_INIT_ARGS, Pipeline
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
if is_vision_available():
|
| 8 |
+
from ..image_utils import load_image
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
if is_torch_available():
|
| 12 |
+
import torch
|
| 13 |
+
|
| 14 |
+
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
|
| 15 |
+
|
| 16 |
+
logger = logging.get_logger(__name__)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
Prediction = Dict[str, Any]
|
| 20 |
+
Predictions = List[Prediction]
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
@add_end_docstrings(PIPELINE_INIT_ARGS)
|
| 24 |
+
class ObjectDetectionPipeline(Pipeline):
|
| 25 |
+
"""
|
| 26 |
+
Object detection pipeline using any `AutoModelForObjectDetection`. This pipeline predicts bounding boxes of objects
|
| 27 |
+
and their classes.
|
| 28 |
+
|
| 29 |
+
Example:
|
| 30 |
+
|
| 31 |
+
```python
|
| 32 |
+
>>> from transformers import pipeline
|
| 33 |
+
|
| 34 |
+
>>> detector = pipeline(model="facebook/detr-resnet-50")
|
| 35 |
+
>>> detector("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png")
|
| 36 |
+
[{'score': 0.997, 'label': 'bird', 'box': {'xmin': 69, 'ymin': 171, 'xmax': 396, 'ymax': 507}}, {'score': 0.999, 'label': 'bird', 'box': {'xmin': 398, 'ymin': 105, 'xmax': 767, 'ymax': 507}}]
|
| 37 |
+
|
| 38 |
+
>>> # x, y are expressed relative to the top left hand corner.
|
| 39 |
+
```
|
| 40 |
+
|
| 41 |
+
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
|
| 42 |
+
|
| 43 |
+
This object detection pipeline can currently be loaded from [`pipeline`] using the following task identifier:
|
| 44 |
+
`"object-detection"`.
|
| 45 |
+
|
| 46 |
+
See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=object-detection).
|
| 47 |
+
"""
|
| 48 |
+
|
| 49 |
+
def __init__(self, *args, **kwargs):
|
| 50 |
+
super().__init__(*args, **kwargs)
|
| 51 |
+
|
| 52 |
+
if self.framework == "tf":
|
| 53 |
+
raise ValueError(f"The {self.__class__} is only available in PyTorch.")
|
| 54 |
+
|
| 55 |
+
requires_backends(self, "vision")
|
| 56 |
+
self.check_model_type(
|
| 57 |
+
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items())
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
def _sanitize_parameters(self, **kwargs):
|
| 61 |
+
postprocess_kwargs = {}
|
| 62 |
+
if "threshold" in kwargs:
|
| 63 |
+
postprocess_kwargs["threshold"] = kwargs["threshold"]
|
| 64 |
+
return {}, {}, postprocess_kwargs
|
| 65 |
+
|
| 66 |
+
def __call__(self, *args, **kwargs) -> Union[Predictions, List[Prediction]]:
|
| 67 |
+
"""
|
| 68 |
+
Detect objects (bounding boxes & classes) in the image(s) passed as inputs.
|
| 69 |
+
|
| 70 |
+
Args:
|
| 71 |
+
images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`):
|
| 72 |
+
The pipeline handles three types of images:
|
| 73 |
+
|
| 74 |
+
- A string containing an HTTP(S) link pointing to an image
|
| 75 |
+
- A string containing a local path to an image
|
| 76 |
+
- An image loaded in PIL directly
|
| 77 |
+
|
| 78 |
+
The pipeline accepts either a single image or a batch of images. Images in a batch must all be in the
|
| 79 |
+
same format: all as HTTP(S) links, all as local paths, or all as PIL images.
|
| 80 |
+
threshold (`float`, *optional*, defaults to 0.9):
|
| 81 |
+
The probability necessary to make a prediction.
|
| 82 |
+
|
| 83 |
+
Return:
|
| 84 |
+
A list of dictionaries or a list of list of dictionaries containing the result. If the input is a single
|
| 85 |
+
image, will return a list of dictionaries, if the input is a list of several images, will return a list of
|
| 86 |
+
list of dictionaries corresponding to each image.
|
| 87 |
+
|
| 88 |
+
The dictionaries contain the following keys:
|
| 89 |
+
|
| 90 |
+
- **label** (`str`) -- The class label identified by the model.
|
| 91 |
+
- **score** (`float`) -- The score attributed by the model for that label.
|
| 92 |
+
- **box** (`List[Dict[str, int]]`) -- The bounding box of detected object in image's original size.
|
| 93 |
+
"""
|
| 94 |
+
|
| 95 |
+
return super().__call__(*args, **kwargs)
|
| 96 |
+
|
| 97 |
+
def preprocess(self, image):
|
| 98 |
+
image = load_image(image)
|
| 99 |
+
target_size = torch.IntTensor([[image.height, image.width]])
|
| 100 |
+
inputs = self.image_processor(images=[image], return_tensors="pt")
|
| 101 |
+
if self.tokenizer is not None:
|
| 102 |
+
inputs = self.tokenizer(text=inputs["words"], boxes=inputs["boxes"], return_tensors="pt")
|
| 103 |
+
inputs["target_size"] = target_size
|
| 104 |
+
return inputs
|
| 105 |
+
|
| 106 |
+
def _forward(self, model_inputs):
|
| 107 |
+
target_size = model_inputs.pop("target_size")
|
| 108 |
+
outputs = self.model(**model_inputs)
|
| 109 |
+
model_outputs = outputs.__class__({"target_size": target_size, **outputs})
|
| 110 |
+
if self.tokenizer is not None:
|
| 111 |
+
model_outputs["bbox"] = model_inputs["bbox"]
|
| 112 |
+
return model_outputs
|
| 113 |
+
|
| 114 |
+
def postprocess(self, model_outputs, threshold=0.9):
|
| 115 |
+
target_size = model_outputs["target_size"]
|
| 116 |
+
if self.tokenizer is not None:
|
| 117 |
+
# This is a LayoutLMForTokenClassification variant.
|
| 118 |
+
# The OCR got the boxes and the model classified the words.
|
| 119 |
+
height, width = target_size[0].tolist()
|
| 120 |
+
|
| 121 |
+
def unnormalize(bbox):
|
| 122 |
+
return self._get_bounding_box(
|
| 123 |
+
torch.Tensor(
|
| 124 |
+
[
|
| 125 |
+
(width * bbox[0] / 1000),
|
| 126 |
+
(height * bbox[1] / 1000),
|
| 127 |
+
(width * bbox[2] / 1000),
|
| 128 |
+
(height * bbox[3] / 1000),
|
| 129 |
+
]
|
| 130 |
+
)
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
scores, classes = model_outputs["logits"].squeeze(0).softmax(dim=-1).max(dim=-1)
|
| 134 |
+
labels = [self.model.config.id2label[prediction] for prediction in classes.tolist()]
|
| 135 |
+
boxes = [unnormalize(bbox) for bbox in model_outputs["bbox"].squeeze(0)]
|
| 136 |
+
keys = ["score", "label", "box"]
|
| 137 |
+
annotation = [dict(zip(keys, vals)) for vals in zip(scores.tolist(), labels, boxes) if vals[0] > threshold]
|
| 138 |
+
else:
|
| 139 |
+
# This is a regular ForObjectDetectionModel
|
| 140 |
+
raw_annotations = self.image_processor.post_process_object_detection(model_outputs, threshold, target_size)
|
| 141 |
+
raw_annotation = raw_annotations[0]
|
| 142 |
+
scores = raw_annotation["scores"]
|
| 143 |
+
labels = raw_annotation["labels"]
|
| 144 |
+
boxes = raw_annotation["boxes"]
|
| 145 |
+
|
| 146 |
+
raw_annotation["scores"] = scores.tolist()
|
| 147 |
+
raw_annotation["labels"] = [self.model.config.id2label[label.item()] for label in labels]
|
| 148 |
+
raw_annotation["boxes"] = [self._get_bounding_box(box) for box in boxes]
|
| 149 |
+
|
| 150 |
+
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
|
| 151 |
+
keys = ["score", "label", "box"]
|
| 152 |
+
annotation = [
|
| 153 |
+
dict(zip(keys, vals))
|
| 154 |
+
for vals in zip(raw_annotation["scores"], raw_annotation["labels"], raw_annotation["boxes"])
|
| 155 |
+
]
|
| 156 |
+
|
| 157 |
+
return annotation
|
| 158 |
+
|
| 159 |
+
def _get_bounding_box(self, box: "torch.Tensor") -> Dict[str, int]:
|
| 160 |
+
"""
|
| 161 |
+
Turns list [xmin, xmax, ymin, ymax] into dict { "xmin": xmin, ... }
|
| 162 |
+
|
| 163 |
+
Args:
|
| 164 |
+
box (`torch.Tensor`): Tensor containing the coordinates in corners format.
|
| 165 |
+
|
| 166 |
+
Returns:
|
| 167 |
+
bbox (`Dict[str, int]`): Dict containing the coordinates in corners format.
|
| 168 |
+
"""
|
| 169 |
+
if self.framework != "pt":
|
| 170 |
+
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch.")
|
| 171 |
+
xmin, ymin, xmax, ymax = box.int().tolist()
|
| 172 |
+
bbox = {
|
| 173 |
+
"xmin": xmin,
|
| 174 |
+
"ymin": ymin,
|
| 175 |
+
"xmax": xmax,
|
| 176 |
+
"ymax": ymax,
|
| 177 |
+
}
|
| 178 |
+
return bbox
|
valley/lib/python3.10/site-packages/transformers/pipelines/question_answering.py
ADDED
|
@@ -0,0 +1,664 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import types
|
| 2 |
+
import warnings
|
| 3 |
+
from collections.abc import Iterable
|
| 4 |
+
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
|
| 8 |
+
from ..data import SquadExample, SquadFeatures, squad_convert_examples_to_features
|
| 9 |
+
from ..modelcard import ModelCard
|
| 10 |
+
from ..tokenization_utils import PreTrainedTokenizer
|
| 11 |
+
from ..utils import (
|
| 12 |
+
PaddingStrategy,
|
| 13 |
+
add_end_docstrings,
|
| 14 |
+
is_tf_available,
|
| 15 |
+
is_tokenizers_available,
|
| 16 |
+
is_torch_available,
|
| 17 |
+
logging,
|
| 18 |
+
)
|
| 19 |
+
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
logger = logging.get_logger(__name__)
|
| 23 |
+
|
| 24 |
+
if TYPE_CHECKING:
|
| 25 |
+
from ..modeling_tf_utils import TFPreTrainedModel
|
| 26 |
+
from ..modeling_utils import PreTrainedModel
|
| 27 |
+
|
| 28 |
+
if is_tokenizers_available():
|
| 29 |
+
import tokenizers
|
| 30 |
+
|
| 31 |
+
if is_tf_available():
|
| 32 |
+
import tensorflow as tf
|
| 33 |
+
|
| 34 |
+
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING
|
| 35 |
+
|
| 36 |
+
Dataset = None
|
| 37 |
+
|
| 38 |
+
if is_torch_available():
|
| 39 |
+
import torch
|
| 40 |
+
from torch.utils.data import Dataset
|
| 41 |
+
|
| 42 |
+
from ..models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def decode_spans(
|
| 46 |
+
start: np.ndarray, end: np.ndarray, topk: int, max_answer_len: int, undesired_tokens: np.ndarray
|
| 47 |
+
) -> Tuple:
|
| 48 |
+
"""
|
| 49 |
+
Take the output of any `ModelForQuestionAnswering` and will generate probabilities for each span to be the actual
|
| 50 |
+
answer.
|
| 51 |
+
|
| 52 |
+
In addition, it filters out some unwanted/impossible cases like answer len being greater than max_answer_len or
|
| 53 |
+
answer end position being before the starting position. The method supports output the k-best answer through the
|
| 54 |
+
topk argument.
|
| 55 |
+
|
| 56 |
+
Args:
|
| 57 |
+
start (`np.ndarray`): Individual start probabilities for each token.
|
| 58 |
+
end (`np.ndarray`): Individual end probabilities for each token.
|
| 59 |
+
topk (`int`): Indicates how many possible answer span(s) to extract from the model output.
|
| 60 |
+
max_answer_len (`int`): Maximum size of the answer to extract from the model's output.
|
| 61 |
+
undesired_tokens (`np.ndarray`): Mask determining tokens that can be part of the answer
|
| 62 |
+
"""
|
| 63 |
+
# Ensure we have batch axis
|
| 64 |
+
if start.ndim == 1:
|
| 65 |
+
start = start[None]
|
| 66 |
+
|
| 67 |
+
if end.ndim == 1:
|
| 68 |
+
end = end[None]
|
| 69 |
+
|
| 70 |
+
# Compute the score of each tuple(start, end) to be the real answer
|
| 71 |
+
outer = np.matmul(np.expand_dims(start, -1), np.expand_dims(end, 1))
|
| 72 |
+
|
| 73 |
+
# Remove candidate with end < start and end - start > max_answer_len
|
| 74 |
+
candidates = np.tril(np.triu(outer), max_answer_len - 1)
|
| 75 |
+
|
| 76 |
+
# Inspired by Chen & al. (https://github.com/facebookresearch/DrQA)
|
| 77 |
+
scores_flat = candidates.flatten()
|
| 78 |
+
if topk == 1:
|
| 79 |
+
idx_sort = [np.argmax(scores_flat)]
|
| 80 |
+
elif len(scores_flat) < topk:
|
| 81 |
+
idx_sort = np.argsort(-scores_flat)
|
| 82 |
+
else:
|
| 83 |
+
idx = np.argpartition(-scores_flat, topk)[0:topk]
|
| 84 |
+
idx_sort = idx[np.argsort(-scores_flat[idx])]
|
| 85 |
+
|
| 86 |
+
starts, ends = np.unravel_index(idx_sort, candidates.shape)[1:]
|
| 87 |
+
desired_spans = np.isin(starts, undesired_tokens.nonzero()) & np.isin(ends, undesired_tokens.nonzero())
|
| 88 |
+
starts = starts[desired_spans]
|
| 89 |
+
ends = ends[desired_spans]
|
| 90 |
+
scores = candidates[0, starts, ends]
|
| 91 |
+
|
| 92 |
+
return starts, ends, scores
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def select_starts_ends(
|
| 96 |
+
start,
|
| 97 |
+
end,
|
| 98 |
+
p_mask,
|
| 99 |
+
attention_mask,
|
| 100 |
+
min_null_score=1000000,
|
| 101 |
+
top_k=1,
|
| 102 |
+
handle_impossible_answer=False,
|
| 103 |
+
max_answer_len=15,
|
| 104 |
+
):
|
| 105 |
+
"""
|
| 106 |
+
Takes the raw output of any `ModelForQuestionAnswering` and first normalizes its outputs and then uses
|
| 107 |
+
`decode_spans()` to generate probabilities for each span to be the actual answer.
|
| 108 |
+
|
| 109 |
+
Args:
|
| 110 |
+
start (`np.ndarray`): Individual start logits for each token.
|
| 111 |
+
end (`np.ndarray`): Individual end logits for each token.
|
| 112 |
+
p_mask (`np.ndarray`): A mask with 1 for values that cannot be in the answer
|
| 113 |
+
attention_mask (`np.ndarray`): The attention mask generated by the tokenizer
|
| 114 |
+
min_null_score(`float`): The minimum null (empty) answer score seen so far.
|
| 115 |
+
topk (`int`): Indicates how many possible answer span(s) to extract from the model output.
|
| 116 |
+
handle_impossible_answer(`bool`): Whether to allow null (empty) answers
|
| 117 |
+
max_answer_len (`int`): Maximum size of the answer to extract from the model's output.
|
| 118 |
+
"""
|
| 119 |
+
# Ensure padded tokens & question tokens cannot belong to the set of candidate answers.
|
| 120 |
+
undesired_tokens = np.abs(np.array(p_mask) - 1)
|
| 121 |
+
|
| 122 |
+
if attention_mask is not None:
|
| 123 |
+
undesired_tokens = undesired_tokens & attention_mask
|
| 124 |
+
|
| 125 |
+
# Generate mask
|
| 126 |
+
undesired_tokens_mask = undesired_tokens == 0.0
|
| 127 |
+
|
| 128 |
+
# Make sure non-context indexes in the tensor cannot contribute to the softmax
|
| 129 |
+
start = np.where(undesired_tokens_mask, -10000.0, start)
|
| 130 |
+
end = np.where(undesired_tokens_mask, -10000.0, end)
|
| 131 |
+
|
| 132 |
+
# Normalize logits and spans to retrieve the answer
|
| 133 |
+
start = np.exp(start - start.max(axis=-1, keepdims=True))
|
| 134 |
+
start = start / start.sum()
|
| 135 |
+
|
| 136 |
+
end = np.exp(end - end.max(axis=-1, keepdims=True))
|
| 137 |
+
end = end / end.sum()
|
| 138 |
+
|
| 139 |
+
if handle_impossible_answer:
|
| 140 |
+
min_null_score = min(min_null_score, (start[0, 0] * end[0, 0]).item())
|
| 141 |
+
|
| 142 |
+
# Mask CLS
|
| 143 |
+
start[0, 0] = end[0, 0] = 0.0
|
| 144 |
+
|
| 145 |
+
starts, ends, scores = decode_spans(start, end, top_k, max_answer_len, undesired_tokens)
|
| 146 |
+
return starts, ends, scores, min_null_score
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
class QuestionAnsweringArgumentHandler(ArgumentHandler):
|
| 150 |
+
"""
|
| 151 |
+
QuestionAnsweringPipeline requires the user to provide multiple arguments (i.e. question & context) to be mapped to
|
| 152 |
+
internal [`SquadExample`].
|
| 153 |
+
|
| 154 |
+
QuestionAnsweringArgumentHandler manages all the possible to create a [`SquadExample`] from the command-line
|
| 155 |
+
supplied arguments.
|
| 156 |
+
"""
|
| 157 |
+
|
| 158 |
+
def normalize(self, item):
|
| 159 |
+
if isinstance(item, SquadExample):
|
| 160 |
+
return item
|
| 161 |
+
elif isinstance(item, dict):
|
| 162 |
+
for k in ["question", "context"]:
|
| 163 |
+
if k not in item:
|
| 164 |
+
raise KeyError("You need to provide a dictionary with keys {question:..., context:...}")
|
| 165 |
+
elif item[k] is None:
|
| 166 |
+
raise ValueError(f"`{k}` cannot be None")
|
| 167 |
+
elif isinstance(item[k], str) and len(item[k]) == 0:
|
| 168 |
+
raise ValueError(f"`{k}` cannot be empty")
|
| 169 |
+
|
| 170 |
+
return QuestionAnsweringPipeline.create_sample(**item)
|
| 171 |
+
raise ValueError(f"{item} argument needs to be of type (SquadExample, dict)")
|
| 172 |
+
|
| 173 |
+
def __call__(self, *args, **kwargs):
|
| 174 |
+
# Detect where the actual inputs are
|
| 175 |
+
if args is not None and len(args) > 0:
|
| 176 |
+
if len(args) == 1:
|
| 177 |
+
inputs = args[0]
|
| 178 |
+
elif len(args) == 2 and {type(el) for el in args} == {str}:
|
| 179 |
+
inputs = [{"question": args[0], "context": args[1]}]
|
| 180 |
+
else:
|
| 181 |
+
inputs = list(args)
|
| 182 |
+
# Generic compatibility with sklearn and Keras
|
| 183 |
+
# Batched data
|
| 184 |
+
elif "X" in kwargs:
|
| 185 |
+
inputs = kwargs["X"]
|
| 186 |
+
elif "data" in kwargs:
|
| 187 |
+
inputs = kwargs["data"]
|
| 188 |
+
elif "question" in kwargs and "context" in kwargs:
|
| 189 |
+
if isinstance(kwargs["question"], list) and isinstance(kwargs["context"], str):
|
| 190 |
+
inputs = [{"question": Q, "context": kwargs["context"]} for Q in kwargs["question"]]
|
| 191 |
+
elif isinstance(kwargs["question"], list) and isinstance(kwargs["context"], list):
|
| 192 |
+
if len(kwargs["question"]) != len(kwargs["context"]):
|
| 193 |
+
raise ValueError("Questions and contexts don't have the same lengths")
|
| 194 |
+
|
| 195 |
+
inputs = [{"question": Q, "context": C} for Q, C in zip(kwargs["question"], kwargs["context"])]
|
| 196 |
+
elif isinstance(kwargs["question"], str) and isinstance(kwargs["context"], str):
|
| 197 |
+
inputs = [{"question": kwargs["question"], "context": kwargs["context"]}]
|
| 198 |
+
else:
|
| 199 |
+
raise ValueError("Arguments can't be understood")
|
| 200 |
+
else:
|
| 201 |
+
raise ValueError(f"Unknown arguments {kwargs}")
|
| 202 |
+
|
| 203 |
+
# When user is sending a generator we need to trust it's a valid example
|
| 204 |
+
generator_types = (types.GeneratorType, Dataset) if Dataset is not None else (types.GeneratorType,)
|
| 205 |
+
if isinstance(inputs, generator_types):
|
| 206 |
+
return inputs
|
| 207 |
+
|
| 208 |
+
# Normalize inputs
|
| 209 |
+
if isinstance(inputs, dict):
|
| 210 |
+
inputs = [inputs]
|
| 211 |
+
elif isinstance(inputs, Iterable):
|
| 212 |
+
# Copy to avoid overriding arguments
|
| 213 |
+
inputs = list(inputs)
|
| 214 |
+
else:
|
| 215 |
+
raise ValueError(f"Invalid arguments {kwargs}")
|
| 216 |
+
|
| 217 |
+
for i, item in enumerate(inputs):
|
| 218 |
+
inputs[i] = self.normalize(item)
|
| 219 |
+
|
| 220 |
+
return inputs
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
@add_end_docstrings(PIPELINE_INIT_ARGS)
|
| 224 |
+
class QuestionAnsweringPipeline(ChunkPipeline):
|
| 225 |
+
"""
|
| 226 |
+
Question Answering pipeline using any `ModelForQuestionAnswering`. See the [question answering
|
| 227 |
+
examples](../task_summary#question-answering) for more information.
|
| 228 |
+
|
| 229 |
+
Example:
|
| 230 |
+
|
| 231 |
+
```python
|
| 232 |
+
>>> from transformers import pipeline
|
| 233 |
+
|
| 234 |
+
>>> oracle = pipeline(model="deepset/roberta-base-squad2")
|
| 235 |
+
>>> oracle(question="Where do I live?", context="My name is Wolfgang and I live in Berlin")
|
| 236 |
+
{'score': 0.9191, 'start': 34, 'end': 40, 'answer': 'Berlin'}
|
| 237 |
+
```
|
| 238 |
+
|
| 239 |
+
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
|
| 240 |
+
|
| 241 |
+
This question answering pipeline can currently be loaded from [`pipeline`] using the following task identifier:
|
| 242 |
+
`"question-answering"`.
|
| 243 |
+
|
| 244 |
+
The models that this pipeline can use are models that have been fine-tuned on a question answering task. See the
|
| 245 |
+
up-to-date list of available models on
|
| 246 |
+
[huggingface.co/models](https://huggingface.co/models?filter=question-answering).
|
| 247 |
+
"""
|
| 248 |
+
|
| 249 |
+
default_input_names = "question,context"
|
| 250 |
+
handle_impossible_answer = False
|
| 251 |
+
|
| 252 |
+
def __init__(
|
| 253 |
+
self,
|
| 254 |
+
model: Union["PreTrainedModel", "TFPreTrainedModel"],
|
| 255 |
+
tokenizer: PreTrainedTokenizer,
|
| 256 |
+
modelcard: Optional[ModelCard] = None,
|
| 257 |
+
framework: Optional[str] = None,
|
| 258 |
+
task: str = "",
|
| 259 |
+
**kwargs,
|
| 260 |
+
):
|
| 261 |
+
super().__init__(
|
| 262 |
+
model=model,
|
| 263 |
+
tokenizer=tokenizer,
|
| 264 |
+
modelcard=modelcard,
|
| 265 |
+
framework=framework,
|
| 266 |
+
task=task,
|
| 267 |
+
**kwargs,
|
| 268 |
+
)
|
| 269 |
+
|
| 270 |
+
self._args_parser = QuestionAnsweringArgumentHandler()
|
| 271 |
+
self.check_model_type(
|
| 272 |
+
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING if self.framework == "tf" else MODEL_FOR_QUESTION_ANSWERING_MAPPING
|
| 273 |
+
)
|
| 274 |
+
|
| 275 |
+
@staticmethod
|
| 276 |
+
def create_sample(
|
| 277 |
+
question: Union[str, List[str]], context: Union[str, List[str]]
|
| 278 |
+
) -> Union[SquadExample, List[SquadExample]]:
|
| 279 |
+
"""
|
| 280 |
+
QuestionAnsweringPipeline leverages the [`SquadExample`] internally. This helper method encapsulate all the
|
| 281 |
+
logic for converting question(s) and context(s) to [`SquadExample`].
|
| 282 |
+
|
| 283 |
+
We currently support extractive question answering.
|
| 284 |
+
|
| 285 |
+
Arguments:
|
| 286 |
+
question (`str` or `List[str]`): The question(s) asked.
|
| 287 |
+
context (`str` or `List[str]`): The context(s) in which we will look for the answer.
|
| 288 |
+
|
| 289 |
+
Returns:
|
| 290 |
+
One or a list of [`SquadExample`]: The corresponding [`SquadExample`] grouping question and context.
|
| 291 |
+
"""
|
| 292 |
+
if isinstance(question, list):
|
| 293 |
+
return [SquadExample(None, q, c, None, None, None) for q, c in zip(question, context)]
|
| 294 |
+
else:
|
| 295 |
+
return SquadExample(None, question, context, None, None, None)
|
| 296 |
+
|
| 297 |
+
def _sanitize_parameters(
|
| 298 |
+
self,
|
| 299 |
+
padding=None,
|
| 300 |
+
topk=None,
|
| 301 |
+
top_k=None,
|
| 302 |
+
doc_stride=None,
|
| 303 |
+
max_answer_len=None,
|
| 304 |
+
max_seq_len=None,
|
| 305 |
+
max_question_len=None,
|
| 306 |
+
handle_impossible_answer=None,
|
| 307 |
+
align_to_words=None,
|
| 308 |
+
**kwargs,
|
| 309 |
+
):
|
| 310 |
+
# Set defaults values
|
| 311 |
+
preprocess_params = {}
|
| 312 |
+
if padding is not None:
|
| 313 |
+
preprocess_params["padding"] = padding
|
| 314 |
+
if doc_stride is not None:
|
| 315 |
+
preprocess_params["doc_stride"] = doc_stride
|
| 316 |
+
if max_question_len is not None:
|
| 317 |
+
preprocess_params["max_question_len"] = max_question_len
|
| 318 |
+
if max_seq_len is not None:
|
| 319 |
+
preprocess_params["max_seq_len"] = max_seq_len
|
| 320 |
+
|
| 321 |
+
postprocess_params = {}
|
| 322 |
+
if topk is not None and top_k is None:
|
| 323 |
+
warnings.warn("topk parameter is deprecated, use top_k instead", UserWarning)
|
| 324 |
+
top_k = topk
|
| 325 |
+
if top_k is not None:
|
| 326 |
+
if top_k < 1:
|
| 327 |
+
raise ValueError(f"top_k parameter should be >= 1 (got {top_k})")
|
| 328 |
+
postprocess_params["top_k"] = top_k
|
| 329 |
+
if max_answer_len is not None:
|
| 330 |
+
if max_answer_len < 1:
|
| 331 |
+
raise ValueError(f"max_answer_len parameter should be >= 1 (got {max_answer_len}")
|
| 332 |
+
if max_answer_len is not None:
|
| 333 |
+
postprocess_params["max_answer_len"] = max_answer_len
|
| 334 |
+
if handle_impossible_answer is not None:
|
| 335 |
+
postprocess_params["handle_impossible_answer"] = handle_impossible_answer
|
| 336 |
+
if align_to_words is not None:
|
| 337 |
+
postprocess_params["align_to_words"] = align_to_words
|
| 338 |
+
return preprocess_params, {}, postprocess_params
|
| 339 |
+
|
| 340 |
+
def __call__(self, *args, **kwargs):
|
| 341 |
+
"""
|
| 342 |
+
Answer the question(s) given as inputs by using the context(s).
|
| 343 |
+
|
| 344 |
+
Args:
|
| 345 |
+
args ([`SquadExample`] or a list of [`SquadExample`]):
|
| 346 |
+
One or several [`SquadExample`] containing the question and context.
|
| 347 |
+
X ([`SquadExample`] or a list of [`SquadExample`], *optional*):
|
| 348 |
+
One or several [`SquadExample`] containing the question and context (will be treated the same way as if
|
| 349 |
+
passed as the first positional argument).
|
| 350 |
+
data ([`SquadExample`] or a list of [`SquadExample`], *optional*):
|
| 351 |
+
One or several [`SquadExample`] containing the question and context (will be treated the same way as if
|
| 352 |
+
passed as the first positional argument).
|
| 353 |
+
question (`str` or `List[str]`):
|
| 354 |
+
One or several question(s) (must be used in conjunction with the `context` argument).
|
| 355 |
+
context (`str` or `List[str]`):
|
| 356 |
+
One or several context(s) associated with the question(s) (must be used in conjunction with the
|
| 357 |
+
`question` argument).
|
| 358 |
+
topk (`int`, *optional*, defaults to 1):
|
| 359 |
+
The number of answers to return (will be chosen by order of likelihood). Note that we return less than
|
| 360 |
+
topk answers if there are not enough options available within the context.
|
| 361 |
+
doc_stride (`int`, *optional*, defaults to 128):
|
| 362 |
+
If the context is too long to fit with the question for the model, it will be split in several chunks
|
| 363 |
+
with some overlap. This argument controls the size of that overlap.
|
| 364 |
+
max_answer_len (`int`, *optional*, defaults to 15):
|
| 365 |
+
The maximum length of predicted answers (e.g., only answers with a shorter length are considered).
|
| 366 |
+
max_seq_len (`int`, *optional*, defaults to 384):
|
| 367 |
+
The maximum length of the total sentence (context + question) in tokens of each chunk passed to the
|
| 368 |
+
model. The context will be split in several chunks (using `doc_stride` as overlap) if needed.
|
| 369 |
+
max_question_len (`int`, *optional*, defaults to 64):
|
| 370 |
+
The maximum length of the question after tokenization. It will be truncated if needed.
|
| 371 |
+
handle_impossible_answer (`bool`, *optional*, defaults to `False`):
|
| 372 |
+
Whether or not we accept impossible as an answer.
|
| 373 |
+
align_to_words (`bool`, *optional*, defaults to `True`):
|
| 374 |
+
Attempts to align the answer to real words. Improves quality on space separated langages. Might hurt on
|
| 375 |
+
non-space-separated languages (like Japanese or Chinese)
|
| 376 |
+
|
| 377 |
+
Return:
|
| 378 |
+
A `dict` or a list of `dict`: Each result comes as a dictionary with the following keys:
|
| 379 |
+
|
| 380 |
+
- **score** (`float`) -- The probability associated to the answer.
|
| 381 |
+
- **start** (`int`) -- The character start index of the answer (in the tokenized version of the input).
|
| 382 |
+
- **end** (`int`) -- The character end index of the answer (in the tokenized version of the input).
|
| 383 |
+
- **answer** (`str`) -- The answer to the question.
|
| 384 |
+
"""
|
| 385 |
+
|
| 386 |
+
# Convert inputs to features
|
| 387 |
+
|
| 388 |
+
examples = self._args_parser(*args, **kwargs)
|
| 389 |
+
if isinstance(examples, (list, tuple)) and len(examples) == 1:
|
| 390 |
+
return super().__call__(examples[0], **kwargs)
|
| 391 |
+
return super().__call__(examples, **kwargs)
|
| 392 |
+
|
| 393 |
+
def preprocess(self, example, padding="do_not_pad", doc_stride=None, max_question_len=64, max_seq_len=None):
|
| 394 |
+
# XXX: This is specal, args_parser will not handle anything generator or dataset like
|
| 395 |
+
# For those we expect user to send a simple valid example either directly as a SquadExample or simple dict.
|
| 396 |
+
# So we still need a little sanitation here.
|
| 397 |
+
if isinstance(example, dict):
|
| 398 |
+
example = SquadExample(None, example["question"], example["context"], None, None, None)
|
| 399 |
+
|
| 400 |
+
if max_seq_len is None:
|
| 401 |
+
max_seq_len = min(self.tokenizer.model_max_length, 384)
|
| 402 |
+
if doc_stride is None:
|
| 403 |
+
doc_stride = min(max_seq_len // 2, 128)
|
| 404 |
+
|
| 405 |
+
if doc_stride > max_seq_len:
|
| 406 |
+
raise ValueError(f"`doc_stride` ({doc_stride}) is larger than `max_seq_len` ({max_seq_len})")
|
| 407 |
+
|
| 408 |
+
if not self.tokenizer.is_fast:
|
| 409 |
+
features = squad_convert_examples_to_features(
|
| 410 |
+
examples=[example],
|
| 411 |
+
tokenizer=self.tokenizer,
|
| 412 |
+
max_seq_length=max_seq_len,
|
| 413 |
+
doc_stride=doc_stride,
|
| 414 |
+
max_query_length=max_question_len,
|
| 415 |
+
padding_strategy=PaddingStrategy.MAX_LENGTH,
|
| 416 |
+
is_training=False,
|
| 417 |
+
tqdm_enabled=False,
|
| 418 |
+
)
|
| 419 |
+
else:
|
| 420 |
+
# Define the side we want to truncate / pad and the text/pair sorting
|
| 421 |
+
question_first = self.tokenizer.padding_side == "right"
|
| 422 |
+
|
| 423 |
+
encoded_inputs = self.tokenizer(
|
| 424 |
+
text=example.question_text if question_first else example.context_text,
|
| 425 |
+
text_pair=example.context_text if question_first else example.question_text,
|
| 426 |
+
padding=padding,
|
| 427 |
+
truncation="only_second" if question_first else "only_first",
|
| 428 |
+
max_length=max_seq_len,
|
| 429 |
+
stride=doc_stride,
|
| 430 |
+
return_token_type_ids=True,
|
| 431 |
+
return_overflowing_tokens=True,
|
| 432 |
+
return_offsets_mapping=True,
|
| 433 |
+
return_special_tokens_mask=True,
|
| 434 |
+
)
|
| 435 |
+
# When the input is too long, it's converted in a batch of inputs with overflowing tokens
|
| 436 |
+
# and a stride of overlap between the inputs. If a batch of inputs is given, a special output
|
| 437 |
+
# "overflow_to_sample_mapping" indicate which member of the encoded batch belong to which original batch sample.
|
| 438 |
+
# Here we tokenize examples one-by-one so we don't need to use "overflow_to_sample_mapping".
|
| 439 |
+
# "num_span" is the number of output samples generated from the overflowing tokens.
|
| 440 |
+
num_spans = len(encoded_inputs["input_ids"])
|
| 441 |
+
|
| 442 |
+
# p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer)
|
| 443 |
+
# We put 0 on the tokens from the context and 1 everywhere else (question and special tokens)
|
| 444 |
+
p_mask = [
|
| 445 |
+
[tok != 1 if question_first else 0 for tok in encoded_inputs.sequence_ids(span_id)]
|
| 446 |
+
for span_id in range(num_spans)
|
| 447 |
+
]
|
| 448 |
+
|
| 449 |
+
features = []
|
| 450 |
+
for span_idx in range(num_spans):
|
| 451 |
+
input_ids_span_idx = encoded_inputs["input_ids"][span_idx]
|
| 452 |
+
attention_mask_span_idx = (
|
| 453 |
+
encoded_inputs["attention_mask"][span_idx] if "attention_mask" in encoded_inputs else None
|
| 454 |
+
)
|
| 455 |
+
token_type_ids_span_idx = (
|
| 456 |
+
encoded_inputs["token_type_ids"][span_idx] if "token_type_ids" in encoded_inputs else None
|
| 457 |
+
)
|
| 458 |
+
# keep the cls_token unmasked (some models use it to indicate unanswerable questions)
|
| 459 |
+
if self.tokenizer.cls_token_id is not None:
|
| 460 |
+
cls_indices = np.nonzero(np.array(input_ids_span_idx) == self.tokenizer.cls_token_id)[0]
|
| 461 |
+
for cls_index in cls_indices:
|
| 462 |
+
p_mask[span_idx][cls_index] = 0
|
| 463 |
+
submask = p_mask[span_idx]
|
| 464 |
+
features.append(
|
| 465 |
+
SquadFeatures(
|
| 466 |
+
input_ids=input_ids_span_idx,
|
| 467 |
+
attention_mask=attention_mask_span_idx,
|
| 468 |
+
token_type_ids=token_type_ids_span_idx,
|
| 469 |
+
p_mask=submask,
|
| 470 |
+
encoding=encoded_inputs[span_idx],
|
| 471 |
+
# We don't use the rest of the values - and actually
|
| 472 |
+
# for Fast tokenizer we could totally avoid using SquadFeatures and SquadExample
|
| 473 |
+
cls_index=None,
|
| 474 |
+
token_to_orig_map={},
|
| 475 |
+
example_index=0,
|
| 476 |
+
unique_id=0,
|
| 477 |
+
paragraph_len=0,
|
| 478 |
+
token_is_max_context=0,
|
| 479 |
+
tokens=[],
|
| 480 |
+
start_position=0,
|
| 481 |
+
end_position=0,
|
| 482 |
+
is_impossible=False,
|
| 483 |
+
qas_id=None,
|
| 484 |
+
)
|
| 485 |
+
)
|
| 486 |
+
|
| 487 |
+
for i, feature in enumerate(features):
|
| 488 |
+
fw_args = {}
|
| 489 |
+
others = {}
|
| 490 |
+
model_input_names = self.tokenizer.model_input_names + ["p_mask", "token_type_ids"]
|
| 491 |
+
|
| 492 |
+
for k, v in feature.__dict__.items():
|
| 493 |
+
if k in model_input_names:
|
| 494 |
+
if self.framework == "tf":
|
| 495 |
+
tensor = tf.constant(v)
|
| 496 |
+
if tensor.dtype == tf.int64:
|
| 497 |
+
tensor = tf.cast(tensor, tf.int32)
|
| 498 |
+
fw_args[k] = tf.expand_dims(tensor, 0)
|
| 499 |
+
elif self.framework == "pt":
|
| 500 |
+
tensor = torch.tensor(v)
|
| 501 |
+
if tensor.dtype == torch.int32:
|
| 502 |
+
tensor = tensor.long()
|
| 503 |
+
fw_args[k] = tensor.unsqueeze(0)
|
| 504 |
+
else:
|
| 505 |
+
others[k] = v
|
| 506 |
+
|
| 507 |
+
is_last = i == len(features) - 1
|
| 508 |
+
yield {"example": example, "is_last": is_last, **fw_args, **others}
|
| 509 |
+
|
| 510 |
+
def _forward(self, inputs):
|
| 511 |
+
example = inputs["example"]
|
| 512 |
+
model_inputs = {k: inputs[k] for k in self.tokenizer.model_input_names}
|
| 513 |
+
output = self.model(**model_inputs)
|
| 514 |
+
if isinstance(output, dict):
|
| 515 |
+
return {"start": output["start_logits"], "end": output["end_logits"], "example": example, **inputs}
|
| 516 |
+
else:
|
| 517 |
+
start, end = output[:2]
|
| 518 |
+
return {"start": start, "end": end, "example": example, **inputs}
|
| 519 |
+
|
| 520 |
+
def postprocess(
|
| 521 |
+
self,
|
| 522 |
+
model_outputs,
|
| 523 |
+
top_k=1,
|
| 524 |
+
handle_impossible_answer=False,
|
| 525 |
+
max_answer_len=15,
|
| 526 |
+
align_to_words=True,
|
| 527 |
+
):
|
| 528 |
+
min_null_score = 1000000 # large and positive
|
| 529 |
+
answers = []
|
| 530 |
+
for output in model_outputs:
|
| 531 |
+
start_ = output["start"]
|
| 532 |
+
end_ = output["end"]
|
| 533 |
+
example = output["example"]
|
| 534 |
+
p_mask = output["p_mask"]
|
| 535 |
+
attention_mask = (
|
| 536 |
+
output["attention_mask"].numpy() if output.get("attention_mask", None) is not None else None
|
| 537 |
+
)
|
| 538 |
+
|
| 539 |
+
starts, ends, scores, min_null_score = select_starts_ends(
|
| 540 |
+
start_, end_, p_mask, attention_mask, min_null_score, top_k, handle_impossible_answer, max_answer_len
|
| 541 |
+
)
|
| 542 |
+
|
| 543 |
+
if not self.tokenizer.is_fast:
|
| 544 |
+
char_to_word = np.array(example.char_to_word_offset)
|
| 545 |
+
|
| 546 |
+
# Convert the answer (tokens) back to the original text
|
| 547 |
+
# Score: score from the model
|
| 548 |
+
# Start: Index of the first character of the answer in the context string
|
| 549 |
+
# End: Index of the character following the last character of the answer in the context string
|
| 550 |
+
# Answer: Plain text of the answer
|
| 551 |
+
for s, e, score in zip(starts, ends, scores):
|
| 552 |
+
token_to_orig_map = output["token_to_orig_map"]
|
| 553 |
+
answers.append(
|
| 554 |
+
{
|
| 555 |
+
"score": score.item(),
|
| 556 |
+
"start": np.where(char_to_word == token_to_orig_map[s])[0][0].item(),
|
| 557 |
+
"end": np.where(char_to_word == token_to_orig_map[e])[0][-1].item(),
|
| 558 |
+
"answer": " ".join(example.doc_tokens[token_to_orig_map[s] : token_to_orig_map[e] + 1]),
|
| 559 |
+
}
|
| 560 |
+
)
|
| 561 |
+
else:
|
| 562 |
+
# Convert the answer (tokens) back to the original text
|
| 563 |
+
# Score: score from the model
|
| 564 |
+
# Start: Index of the first character of the answer in the context string
|
| 565 |
+
# End: Index of the character following the last character of the answer in the context string
|
| 566 |
+
# Answer: Plain text of the answer
|
| 567 |
+
question_first = bool(self.tokenizer.padding_side == "right")
|
| 568 |
+
enc = output["encoding"]
|
| 569 |
+
|
| 570 |
+
# Encoding was *not* padded, input_ids *might*.
|
| 571 |
+
# It doesn't make a difference unless we're padding on
|
| 572 |
+
# the left hand side, since now we have different offsets
|
| 573 |
+
# everywhere.
|
| 574 |
+
if self.tokenizer.padding_side == "left":
|
| 575 |
+
offset = (output["input_ids"] == self.tokenizer.pad_token_id).numpy().sum()
|
| 576 |
+
else:
|
| 577 |
+
offset = 0
|
| 578 |
+
|
| 579 |
+
# Sometimes the max probability token is in the middle of a word so:
|
| 580 |
+
# - we start by finding the right word containing the token with `token_to_word`
|
| 581 |
+
# - then we convert this word in a character span with `word_to_chars`
|
| 582 |
+
sequence_index = 1 if question_first else 0
|
| 583 |
+
for s, e, score in zip(starts, ends, scores):
|
| 584 |
+
s = s - offset
|
| 585 |
+
e = e - offset
|
| 586 |
+
|
| 587 |
+
start_index, end_index = self.get_indices(enc, s, e, sequence_index, align_to_words)
|
| 588 |
+
|
| 589 |
+
answers.append(
|
| 590 |
+
{
|
| 591 |
+
"score": score.item(),
|
| 592 |
+
"start": start_index,
|
| 593 |
+
"end": end_index,
|
| 594 |
+
"answer": example.context_text[start_index:end_index],
|
| 595 |
+
}
|
| 596 |
+
)
|
| 597 |
+
|
| 598 |
+
if handle_impossible_answer:
|
| 599 |
+
answers.append({"score": min_null_score, "start": 0, "end": 0, "answer": ""})
|
| 600 |
+
answers = sorted(answers, key=lambda x: x["score"], reverse=True)[:top_k]
|
| 601 |
+
if len(answers) == 1:
|
| 602 |
+
return answers[0]
|
| 603 |
+
return answers
|
| 604 |
+
|
| 605 |
+
def get_indices(
|
| 606 |
+
self, enc: "tokenizers.Encoding", s: int, e: int, sequence_index: int, align_to_words: bool
|
| 607 |
+
) -> Tuple[int, int]:
|
| 608 |
+
if align_to_words:
|
| 609 |
+
try:
|
| 610 |
+
start_word = enc.token_to_word(s)
|
| 611 |
+
end_word = enc.token_to_word(e)
|
| 612 |
+
start_index = enc.word_to_chars(start_word, sequence_index=sequence_index)[0]
|
| 613 |
+
end_index = enc.word_to_chars(end_word, sequence_index=sequence_index)[1]
|
| 614 |
+
except Exception:
|
| 615 |
+
# Some tokenizers don't really handle words. Keep to offsets then.
|
| 616 |
+
start_index = enc.offsets[s][0]
|
| 617 |
+
end_index = enc.offsets[e][1]
|
| 618 |
+
else:
|
| 619 |
+
start_index = enc.offsets[s][0]
|
| 620 |
+
end_index = enc.offsets[e][1]
|
| 621 |
+
return start_index, end_index
|
| 622 |
+
|
| 623 |
+
def span_to_answer(self, text: str, start: int, end: int) -> Dict[str, Union[str, int]]:
|
| 624 |
+
"""
|
| 625 |
+
When decoding from token probabilities, this method maps token indexes to actual word in the initial context.
|
| 626 |
+
|
| 627 |
+
Args:
|
| 628 |
+
text (`str`): The actual context to extract the answer from.
|
| 629 |
+
start (`int`): The answer starting token index.
|
| 630 |
+
end (`int`): The answer end token index.
|
| 631 |
+
|
| 632 |
+
Returns:
|
| 633 |
+
Dictionary like `{'answer': str, 'start': int, 'end': int}`
|
| 634 |
+
"""
|
| 635 |
+
words = []
|
| 636 |
+
token_idx = char_start_idx = char_end_idx = chars_idx = 0
|
| 637 |
+
|
| 638 |
+
for i, word in enumerate(text.split(" ")):
|
| 639 |
+
token = self.tokenizer.tokenize(word)
|
| 640 |
+
|
| 641 |
+
# Append words if they are in the span
|
| 642 |
+
if start <= token_idx <= end:
|
| 643 |
+
if token_idx == start:
|
| 644 |
+
char_start_idx = chars_idx
|
| 645 |
+
|
| 646 |
+
if token_idx == end:
|
| 647 |
+
char_end_idx = chars_idx + len(word)
|
| 648 |
+
|
| 649 |
+
words += [word]
|
| 650 |
+
|
| 651 |
+
# Stop if we went over the end of the answer
|
| 652 |
+
if token_idx > end:
|
| 653 |
+
break
|
| 654 |
+
|
| 655 |
+
# Append the subtokenization length to the running index
|
| 656 |
+
token_idx += len(token)
|
| 657 |
+
chars_idx += len(word) + 1
|
| 658 |
+
|
| 659 |
+
# Join text with spaces
|
| 660 |
+
return {
|
| 661 |
+
"answer": " ".join(words),
|
| 662 |
+
"start": max(0, char_start_idx),
|
| 663 |
+
"end": min(len(text), char_end_idx),
|
| 664 |
+
}
|
valley/lib/python3.10/site-packages/transformers/pipelines/table_question_answering.py
ADDED
|
@@ -0,0 +1,436 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import collections
|
| 2 |
+
import types
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
from ..utils import (
|
| 7 |
+
add_end_docstrings,
|
| 8 |
+
is_tensorflow_probability_available,
|
| 9 |
+
is_tf_available,
|
| 10 |
+
is_torch_available,
|
| 11 |
+
requires_backends,
|
| 12 |
+
)
|
| 13 |
+
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, Dataset, Pipeline, PipelineException
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
if is_torch_available():
|
| 17 |
+
import torch
|
| 18 |
+
|
| 19 |
+
from ..models.auto.modeling_auto import (
|
| 20 |
+
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
|
| 21 |
+
MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING,
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
if is_tf_available() and is_tensorflow_probability_available():
|
| 25 |
+
import tensorflow as tf
|
| 26 |
+
import tensorflow_probability as tfp
|
| 27 |
+
|
| 28 |
+
from ..models.auto.modeling_tf_auto import (
|
| 29 |
+
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
|
| 30 |
+
TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING,
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class TableQuestionAnsweringArgumentHandler(ArgumentHandler):
|
| 35 |
+
"""
|
| 36 |
+
Handles arguments for the TableQuestionAnsweringPipeline
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
def __call__(self, table=None, query=None, **kwargs):
|
| 40 |
+
# Returns tqa_pipeline_inputs of shape:
|
| 41 |
+
# [
|
| 42 |
+
# {"table": pd.DataFrame, "query": List[str]},
|
| 43 |
+
# ...,
|
| 44 |
+
# {"table": pd.DataFrame, "query" : List[str]}
|
| 45 |
+
# ]
|
| 46 |
+
requires_backends(self, "pandas")
|
| 47 |
+
import pandas as pd
|
| 48 |
+
|
| 49 |
+
if table is None:
|
| 50 |
+
raise ValueError("Keyword argument `table` cannot be None.")
|
| 51 |
+
elif query is None:
|
| 52 |
+
if isinstance(table, dict) and table.get("query") is not None and table.get("table") is not None:
|
| 53 |
+
tqa_pipeline_inputs = [table]
|
| 54 |
+
elif isinstance(table, list) and len(table) > 0:
|
| 55 |
+
if not all(isinstance(d, dict) for d in table):
|
| 56 |
+
raise ValueError(
|
| 57 |
+
f"Keyword argument `table` should be a list of dict, but is {(type(d) for d in table)}"
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
if table[0].get("query") is not None and table[0].get("table") is not None:
|
| 61 |
+
tqa_pipeline_inputs = table
|
| 62 |
+
else:
|
| 63 |
+
raise ValueError(
|
| 64 |
+
"If keyword argument `table` is a list of dictionaries, each dictionary should have a `table`"
|
| 65 |
+
f" and `query` key, but only dictionary has keys {table[0].keys()} `table` and `query` keys."
|
| 66 |
+
)
|
| 67 |
+
elif Dataset is not None and isinstance(table, Dataset) or isinstance(table, types.GeneratorType):
|
| 68 |
+
return table
|
| 69 |
+
else:
|
| 70 |
+
raise ValueError(
|
| 71 |
+
"Invalid input. Keyword argument `table` should be either of type `dict` or `list`, but "
|
| 72 |
+
f"is {type(table)})"
|
| 73 |
+
)
|
| 74 |
+
else:
|
| 75 |
+
tqa_pipeline_inputs = [{"table": table, "query": query}]
|
| 76 |
+
|
| 77 |
+
for tqa_pipeline_input in tqa_pipeline_inputs:
|
| 78 |
+
if not isinstance(tqa_pipeline_input["table"], pd.DataFrame):
|
| 79 |
+
if tqa_pipeline_input["table"] is None:
|
| 80 |
+
raise ValueError("Table cannot be None.")
|
| 81 |
+
|
| 82 |
+
tqa_pipeline_input["table"] = pd.DataFrame(tqa_pipeline_input["table"])
|
| 83 |
+
|
| 84 |
+
return tqa_pipeline_inputs
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
@add_end_docstrings(PIPELINE_INIT_ARGS)
|
| 88 |
+
class TableQuestionAnsweringPipeline(Pipeline):
|
| 89 |
+
"""
|
| 90 |
+
Table Question Answering pipeline using a `ModelForTableQuestionAnswering`. This pipeline is only available in
|
| 91 |
+
PyTorch.
|
| 92 |
+
|
| 93 |
+
Example:
|
| 94 |
+
|
| 95 |
+
```python
|
| 96 |
+
>>> from transformers import pipeline
|
| 97 |
+
|
| 98 |
+
>>> oracle = pipeline(model="google/tapas-base-finetuned-wtq")
|
| 99 |
+
>>> table = {
|
| 100 |
+
... "Repository": ["Transformers", "Datasets", "Tokenizers"],
|
| 101 |
+
... "Stars": ["36542", "4512", "3934"],
|
| 102 |
+
... "Contributors": ["651", "77", "34"],
|
| 103 |
+
... "Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
|
| 104 |
+
... }
|
| 105 |
+
>>> oracle(query="How many stars does the transformers repository have?", table=table)
|
| 106 |
+
{'answer': 'AVERAGE > 36542', 'coordinates': [(0, 1)], 'cells': ['36542'], 'aggregator': 'AVERAGE'}
|
| 107 |
+
```
|
| 108 |
+
|
| 109 |
+
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
|
| 110 |
+
|
| 111 |
+
This tabular question answering pipeline can currently be loaded from [`pipeline`] using the following task
|
| 112 |
+
identifier: `"table-question-answering"`.
|
| 113 |
+
|
| 114 |
+
The models that this pipeline can use are models that have been fine-tuned on a tabular question answering task.
|
| 115 |
+
See the up-to-date list of available models on
|
| 116 |
+
[huggingface.co/models](https://huggingface.co/models?filter=table-question-answering).
|
| 117 |
+
"""
|
| 118 |
+
|
| 119 |
+
default_input_names = "table,query"
|
| 120 |
+
|
| 121 |
+
def __init__(self, args_parser=TableQuestionAnsweringArgumentHandler(), *args, **kwargs):
|
| 122 |
+
super().__init__(*args, **kwargs)
|
| 123 |
+
self._args_parser = args_parser
|
| 124 |
+
|
| 125 |
+
self.check_model_type(
|
| 126 |
+
dict(
|
| 127 |
+
TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING.items()
|
| 128 |
+
+ TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.items()
|
| 129 |
+
)
|
| 130 |
+
if self.framework == "tf"
|
| 131 |
+
else dict(
|
| 132 |
+
MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING.items() + MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.items()
|
| 133 |
+
)
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
self.aggregate = bool(getattr(self.model.config, "aggregation_labels", None)) and bool(
|
| 137 |
+
getattr(self.model.config, "num_aggregation_labels", None)
|
| 138 |
+
)
|
| 139 |
+
self.type = "tapas" if hasattr(self.model.config, "aggregation_labels") else None
|
| 140 |
+
|
| 141 |
+
def batch_inference(self, **inputs):
|
| 142 |
+
return self.model(**inputs)
|
| 143 |
+
|
| 144 |
+
def sequential_inference(self, **inputs):
|
| 145 |
+
"""
|
| 146 |
+
Inference used for models that need to process sequences in a sequential fashion, like the SQA models which
|
| 147 |
+
handle conversational query related to a table.
|
| 148 |
+
"""
|
| 149 |
+
if self.framework == "pt":
|
| 150 |
+
all_logits = []
|
| 151 |
+
all_aggregations = []
|
| 152 |
+
prev_answers = None
|
| 153 |
+
batch_size = inputs["input_ids"].shape[0]
|
| 154 |
+
|
| 155 |
+
input_ids = inputs["input_ids"].to(self.device)
|
| 156 |
+
attention_mask = inputs["attention_mask"].to(self.device)
|
| 157 |
+
token_type_ids = inputs["token_type_ids"].to(self.device)
|
| 158 |
+
token_type_ids_example = None
|
| 159 |
+
|
| 160 |
+
for index in range(batch_size):
|
| 161 |
+
# If sequences have already been processed, the token type IDs will be created according to the previous
|
| 162 |
+
# answer.
|
| 163 |
+
if prev_answers is not None:
|
| 164 |
+
prev_labels_example = token_type_ids_example[:, 3] # shape (seq_len,)
|
| 165 |
+
model_labels = np.zeros_like(prev_labels_example.cpu().numpy()) # shape (seq_len,)
|
| 166 |
+
|
| 167 |
+
token_type_ids_example = token_type_ids[index] # shape (seq_len, 7)
|
| 168 |
+
for i in range(model_labels.shape[0]):
|
| 169 |
+
segment_id = token_type_ids_example[:, 0].tolist()[i]
|
| 170 |
+
col_id = token_type_ids_example[:, 1].tolist()[i] - 1
|
| 171 |
+
row_id = token_type_ids_example[:, 2].tolist()[i] - 1
|
| 172 |
+
|
| 173 |
+
if row_id >= 0 and col_id >= 0 and segment_id == 1:
|
| 174 |
+
model_labels[i] = int(prev_answers[(col_id, row_id)])
|
| 175 |
+
|
| 176 |
+
token_type_ids_example[:, 3] = torch.from_numpy(model_labels).type(torch.long).to(self.device)
|
| 177 |
+
|
| 178 |
+
input_ids_example = input_ids[index]
|
| 179 |
+
attention_mask_example = attention_mask[index] # shape (seq_len,)
|
| 180 |
+
token_type_ids_example = token_type_ids[index] # shape (seq_len, 7)
|
| 181 |
+
outputs = self.model(
|
| 182 |
+
input_ids=input_ids_example.unsqueeze(0),
|
| 183 |
+
attention_mask=attention_mask_example.unsqueeze(0),
|
| 184 |
+
token_type_ids=token_type_ids_example.unsqueeze(0),
|
| 185 |
+
)
|
| 186 |
+
logits = outputs.logits
|
| 187 |
+
|
| 188 |
+
if self.aggregate:
|
| 189 |
+
all_aggregations.append(outputs.logits_aggregation)
|
| 190 |
+
|
| 191 |
+
all_logits.append(logits)
|
| 192 |
+
|
| 193 |
+
dist_per_token = torch.distributions.Bernoulli(logits=logits)
|
| 194 |
+
probabilities = dist_per_token.probs * attention_mask_example.type(torch.float32).to(
|
| 195 |
+
dist_per_token.probs.device
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
coords_to_probs = collections.defaultdict(list)
|
| 199 |
+
for i, p in enumerate(probabilities.squeeze().tolist()):
|
| 200 |
+
segment_id = token_type_ids_example[:, 0].tolist()[i]
|
| 201 |
+
col = token_type_ids_example[:, 1].tolist()[i] - 1
|
| 202 |
+
row = token_type_ids_example[:, 2].tolist()[i] - 1
|
| 203 |
+
if col >= 0 and row >= 0 and segment_id == 1:
|
| 204 |
+
coords_to_probs[(col, row)].append(p)
|
| 205 |
+
|
| 206 |
+
prev_answers = {key: np.array(coords_to_probs[key]).mean() > 0.5 for key in coords_to_probs}
|
| 207 |
+
|
| 208 |
+
logits_batch = torch.cat(tuple(all_logits), 0)
|
| 209 |
+
|
| 210 |
+
return (logits_batch,) if not self.aggregate else (logits_batch, torch.cat(tuple(all_aggregations), 0))
|
| 211 |
+
else:
|
| 212 |
+
all_logits = []
|
| 213 |
+
all_aggregations = []
|
| 214 |
+
prev_answers = None
|
| 215 |
+
batch_size = inputs["input_ids"].shape[0]
|
| 216 |
+
|
| 217 |
+
input_ids = inputs["input_ids"]
|
| 218 |
+
attention_mask = inputs["attention_mask"]
|
| 219 |
+
token_type_ids = inputs["token_type_ids"].numpy()
|
| 220 |
+
token_type_ids_example = None
|
| 221 |
+
|
| 222 |
+
for index in range(batch_size):
|
| 223 |
+
# If sequences have already been processed, the token type IDs will be created according to the previous
|
| 224 |
+
# answer.
|
| 225 |
+
if prev_answers is not None:
|
| 226 |
+
prev_labels_example = token_type_ids_example[:, 3] # shape (seq_len,)
|
| 227 |
+
model_labels = np.zeros_like(prev_labels_example, dtype=np.int32) # shape (seq_len,)
|
| 228 |
+
|
| 229 |
+
token_type_ids_example = token_type_ids[index] # shape (seq_len, 7)
|
| 230 |
+
for i in range(model_labels.shape[0]):
|
| 231 |
+
segment_id = token_type_ids_example[:, 0].tolist()[i]
|
| 232 |
+
col_id = token_type_ids_example[:, 1].tolist()[i] - 1
|
| 233 |
+
row_id = token_type_ids_example[:, 2].tolist()[i] - 1
|
| 234 |
+
|
| 235 |
+
if row_id >= 0 and col_id >= 0 and segment_id == 1:
|
| 236 |
+
model_labels[i] = int(prev_answers[(col_id, row_id)])
|
| 237 |
+
|
| 238 |
+
token_type_ids_example[:, 3] = model_labels
|
| 239 |
+
|
| 240 |
+
input_ids_example = input_ids[index]
|
| 241 |
+
attention_mask_example = attention_mask[index] # shape (seq_len,)
|
| 242 |
+
token_type_ids_example = token_type_ids[index] # shape (seq_len, 7)
|
| 243 |
+
outputs = self.model(
|
| 244 |
+
input_ids=np.expand_dims(input_ids_example, axis=0),
|
| 245 |
+
attention_mask=np.expand_dims(attention_mask_example, axis=0),
|
| 246 |
+
token_type_ids=np.expand_dims(token_type_ids_example, axis=0),
|
| 247 |
+
)
|
| 248 |
+
logits = outputs.logits
|
| 249 |
+
|
| 250 |
+
if self.aggregate:
|
| 251 |
+
all_aggregations.append(outputs.logits_aggregation)
|
| 252 |
+
|
| 253 |
+
all_logits.append(logits)
|
| 254 |
+
|
| 255 |
+
dist_per_token = tfp.distributions.Bernoulli(logits=logits)
|
| 256 |
+
probabilities = dist_per_token.probs_parameter() * tf.cast(attention_mask_example, tf.float32)
|
| 257 |
+
|
| 258 |
+
coords_to_probs = collections.defaultdict(list)
|
| 259 |
+
token_type_ids_example = token_type_ids_example
|
| 260 |
+
for i, p in enumerate(tf.squeeze(probabilities).numpy().tolist()):
|
| 261 |
+
segment_id = token_type_ids_example[:, 0].tolist()[i]
|
| 262 |
+
col = token_type_ids_example[:, 1].tolist()[i] - 1
|
| 263 |
+
row = token_type_ids_example[:, 2].tolist()[i] - 1
|
| 264 |
+
if col >= 0 and row >= 0 and segment_id == 1:
|
| 265 |
+
coords_to_probs[(col, row)].append(p)
|
| 266 |
+
|
| 267 |
+
prev_answers = {key: np.array(coords_to_probs[key]).mean() > 0.5 for key in coords_to_probs}
|
| 268 |
+
|
| 269 |
+
logits_batch = tf.concat(tuple(all_logits), 0)
|
| 270 |
+
|
| 271 |
+
return (logits_batch,) if not self.aggregate else (logits_batch, tf.concat(tuple(all_aggregations), 0))
|
| 272 |
+
|
| 273 |
+
def __call__(self, *args, **kwargs):
|
| 274 |
+
r"""
|
| 275 |
+
Answers queries according to a table. The pipeline accepts several types of inputs which are detailed below:
|
| 276 |
+
|
| 277 |
+
- `pipeline(table, query)`
|
| 278 |
+
- `pipeline(table, [query])`
|
| 279 |
+
- `pipeline(table=table, query=query)`
|
| 280 |
+
- `pipeline(table=table, query=[query])`
|
| 281 |
+
- `pipeline({"table": table, "query": query})`
|
| 282 |
+
- `pipeline({"table": table, "query": [query]})`
|
| 283 |
+
- `pipeline([{"table": table, "query": query}, {"table": table, "query": query}])`
|
| 284 |
+
|
| 285 |
+
The `table` argument should be a dict or a DataFrame built from that dict, containing the whole table:
|
| 286 |
+
|
| 287 |
+
Example:
|
| 288 |
+
|
| 289 |
+
```python
|
| 290 |
+
data = {
|
| 291 |
+
"actors": ["brad pitt", "leonardo di caprio", "george clooney"],
|
| 292 |
+
"age": ["56", "45", "59"],
|
| 293 |
+
"number of movies": ["87", "53", "69"],
|
| 294 |
+
"date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
|
| 295 |
+
}
|
| 296 |
+
```
|
| 297 |
+
|
| 298 |
+
This dictionary can be passed in as such, or can be converted to a pandas DataFrame:
|
| 299 |
+
|
| 300 |
+
Example:
|
| 301 |
+
|
| 302 |
+
```python
|
| 303 |
+
import pandas as pd
|
| 304 |
+
|
| 305 |
+
table = pd.DataFrame.from_dict(data)
|
| 306 |
+
```
|
| 307 |
+
|
| 308 |
+
Args:
|
| 309 |
+
table (`pd.DataFrame` or `Dict`):
|
| 310 |
+
Pandas DataFrame or dictionary that will be converted to a DataFrame containing all the table values.
|
| 311 |
+
See above for an example of dictionary.
|
| 312 |
+
query (`str` or `List[str]`):
|
| 313 |
+
Query or list of queries that will be sent to the model alongside the table.
|
| 314 |
+
sequential (`bool`, *optional*, defaults to `False`):
|
| 315 |
+
Whether to do inference sequentially or as a batch. Batching is faster, but models like SQA require the
|
| 316 |
+
inference to be done sequentially to extract relations within sequences, given their conversational
|
| 317 |
+
nature.
|
| 318 |
+
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
|
| 319 |
+
Activates and controls padding. Accepts the following values:
|
| 320 |
+
|
| 321 |
+
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
|
| 322 |
+
sequence if provided).
|
| 323 |
+
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
|
| 324 |
+
acceptable input length for the model if that argument is not provided.
|
| 325 |
+
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
|
| 326 |
+
lengths).
|
| 327 |
+
|
| 328 |
+
truncation (`bool`, `str` or [`TapasTruncationStrategy`], *optional*, defaults to `False`):
|
| 329 |
+
Activates and controls truncation. Accepts the following values:
|
| 330 |
+
|
| 331 |
+
- `True` or `'drop_rows_to_fit'`: Truncate to a maximum length specified with the argument `max_length`
|
| 332 |
+
or to the maximum acceptable input length for the model if that argument is not provided. This will
|
| 333 |
+
truncate row by row, removing rows from the table.
|
| 334 |
+
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
|
| 335 |
+
greater than the model maximum admissible input size).
|
| 336 |
+
|
| 337 |
+
|
| 338 |
+
Return:
|
| 339 |
+
A dictionary or a list of dictionaries containing results: Each result is a dictionary with the following
|
| 340 |
+
keys:
|
| 341 |
+
|
| 342 |
+
- **answer** (`str`) -- The answer of the query given the table. If there is an aggregator, the answer will
|
| 343 |
+
be preceded by `AGGREGATOR >`.
|
| 344 |
+
- **coordinates** (`List[Tuple[int, int]]`) -- Coordinates of the cells of the answers.
|
| 345 |
+
- **cells** (`List[str]`) -- List of strings made up of the answer cell values.
|
| 346 |
+
- **aggregator** (`str`) -- If the model has an aggregator, this returns the aggregator.
|
| 347 |
+
"""
|
| 348 |
+
pipeline_inputs = self._args_parser(*args, **kwargs)
|
| 349 |
+
|
| 350 |
+
results = super().__call__(pipeline_inputs, **kwargs)
|
| 351 |
+
if len(results) == 1:
|
| 352 |
+
return results[0]
|
| 353 |
+
return results
|
| 354 |
+
|
| 355 |
+
def _sanitize_parameters(self, sequential=None, padding=None, truncation=None, **kwargs):
|
| 356 |
+
preprocess_params = {}
|
| 357 |
+
if padding is not None:
|
| 358 |
+
preprocess_params["padding"] = padding
|
| 359 |
+
if truncation is not None:
|
| 360 |
+
preprocess_params["truncation"] = truncation
|
| 361 |
+
|
| 362 |
+
forward_params = {}
|
| 363 |
+
if sequential is not None:
|
| 364 |
+
forward_params["sequential"] = sequential
|
| 365 |
+
return preprocess_params, forward_params, {}
|
| 366 |
+
|
| 367 |
+
def preprocess(self, pipeline_input, sequential=None, padding=True, truncation=None):
|
| 368 |
+
if truncation is None:
|
| 369 |
+
if self.type == "tapas":
|
| 370 |
+
truncation = "drop_rows_to_fit"
|
| 371 |
+
else:
|
| 372 |
+
truncation = "do_not_truncate"
|
| 373 |
+
|
| 374 |
+
table, query = pipeline_input["table"], pipeline_input["query"]
|
| 375 |
+
if table.empty:
|
| 376 |
+
raise ValueError("table is empty")
|
| 377 |
+
if query is None or query == "":
|
| 378 |
+
raise ValueError("query is empty")
|
| 379 |
+
inputs = self.tokenizer(table, query, return_tensors=self.framework, truncation=truncation, padding=padding)
|
| 380 |
+
inputs["table"] = table
|
| 381 |
+
return inputs
|
| 382 |
+
|
| 383 |
+
def _forward(self, model_inputs, sequential=False):
|
| 384 |
+
table = model_inputs.pop("table")
|
| 385 |
+
|
| 386 |
+
if self.type == "tapas":
|
| 387 |
+
if sequential:
|
| 388 |
+
outputs = self.sequential_inference(**model_inputs)
|
| 389 |
+
else:
|
| 390 |
+
outputs = self.batch_inference(**model_inputs)
|
| 391 |
+
else:
|
| 392 |
+
outputs = self.model.generate(**model_inputs)
|
| 393 |
+
model_outputs = {"model_inputs": model_inputs, "table": table, "outputs": outputs}
|
| 394 |
+
return model_outputs
|
| 395 |
+
|
| 396 |
+
def postprocess(self, model_outputs):
|
| 397 |
+
inputs = model_outputs["model_inputs"]
|
| 398 |
+
table = model_outputs["table"]
|
| 399 |
+
outputs = model_outputs["outputs"]
|
| 400 |
+
if self.type == "tapas":
|
| 401 |
+
if self.aggregate:
|
| 402 |
+
logits, logits_agg = outputs[:2]
|
| 403 |
+
predictions = self.tokenizer.convert_logits_to_predictions(inputs, logits, logits_agg)
|
| 404 |
+
answer_coordinates_batch, agg_predictions = predictions
|
| 405 |
+
aggregators = {i: self.model.config.aggregation_labels[pred] for i, pred in enumerate(agg_predictions)}
|
| 406 |
+
|
| 407 |
+
no_agg_label_index = self.model.config.no_aggregation_label_index
|
| 408 |
+
aggregators_prefix = {
|
| 409 |
+
i: aggregators[i] + " > " for i, pred in enumerate(agg_predictions) if pred != no_agg_label_index
|
| 410 |
+
}
|
| 411 |
+
else:
|
| 412 |
+
logits = outputs[0]
|
| 413 |
+
predictions = self.tokenizer.convert_logits_to_predictions(inputs, logits)
|
| 414 |
+
answer_coordinates_batch = predictions[0]
|
| 415 |
+
aggregators = {}
|
| 416 |
+
aggregators_prefix = {}
|
| 417 |
+
answers = []
|
| 418 |
+
for index, coordinates in enumerate(answer_coordinates_batch):
|
| 419 |
+
cells = [table.iat[coordinate] for coordinate in coordinates]
|
| 420 |
+
aggregator = aggregators.get(index, "")
|
| 421 |
+
aggregator_prefix = aggregators_prefix.get(index, "")
|
| 422 |
+
answer = {
|
| 423 |
+
"answer": aggregator_prefix + ", ".join(cells),
|
| 424 |
+
"coordinates": coordinates,
|
| 425 |
+
"cells": [table.iat[coordinate] for coordinate in coordinates],
|
| 426 |
+
}
|
| 427 |
+
if aggregator:
|
| 428 |
+
answer["aggregator"] = aggregator
|
| 429 |
+
|
| 430 |
+
answers.append(answer)
|
| 431 |
+
if len(answer) == 0:
|
| 432 |
+
raise PipelineException("Empty answer")
|
| 433 |
+
else:
|
| 434 |
+
answers = [{"answer": answer} for answer in self.tokenizer.batch_decode(outputs, skip_special_tokens=True)]
|
| 435 |
+
|
| 436 |
+
return answers if len(answers) > 1 else answers[0]
|
valley/lib/python3.10/site-packages/transformers/pipelines/text2text_generation.py
ADDED
|
@@ -0,0 +1,366 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import enum
|
| 2 |
+
import warnings
|
| 3 |
+
|
| 4 |
+
from ..tokenization_utils import TruncationStrategy
|
| 5 |
+
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
|
| 6 |
+
from .base import PIPELINE_INIT_ARGS, Pipeline
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
if is_tf_available():
|
| 10 |
+
import tensorflow as tf
|
| 11 |
+
|
| 12 |
+
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
|
| 13 |
+
|
| 14 |
+
if is_torch_available():
|
| 15 |
+
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
|
| 16 |
+
|
| 17 |
+
logger = logging.get_logger(__name__)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class ReturnType(enum.Enum):
|
| 21 |
+
TENSORS = 0
|
| 22 |
+
TEXT = 1
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
@add_end_docstrings(PIPELINE_INIT_ARGS)
|
| 26 |
+
class Text2TextGenerationPipeline(Pipeline):
|
| 27 |
+
"""
|
| 28 |
+
Pipeline for text to text generation using seq2seq models.
|
| 29 |
+
|
| 30 |
+
Example:
|
| 31 |
+
|
| 32 |
+
```python
|
| 33 |
+
>>> from transformers import pipeline
|
| 34 |
+
|
| 35 |
+
>>> generator = pipeline(model="mrm8488/t5-base-finetuned-question-generation-ap")
|
| 36 |
+
>>> generator(
|
| 37 |
+
... "answer: Manuel context: Manuel has created RuPERTa-base with the support of HF-Transformers and Google"
|
| 38 |
+
... )
|
| 39 |
+
[{'generated_text': 'question: Who created the RuPERTa-base?'}]
|
| 40 |
+
```
|
| 41 |
+
|
| 42 |
+
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
This Text2TextGenerationPipeline pipeline can currently be loaded from [`pipeline`] using the following task
|
| 46 |
+
identifier: `"text2text-generation"`.
|
| 47 |
+
|
| 48 |
+
The models that this pipeline can use are models that have been fine-tuned on a translation task. See the
|
| 49 |
+
up-to-date list of available models on
|
| 50 |
+
[huggingface.co/models](https://huggingface.co/models?filter=text2text-generation). For a list of available
|
| 51 |
+
parameters, see the [following
|
| 52 |
+
documentation](https://huggingface.co/docs/transformers/en/main_classes/text_generation#transformers.generation.GenerationMixin.generate)
|
| 53 |
+
|
| 54 |
+
Usage:
|
| 55 |
+
|
| 56 |
+
```python
|
| 57 |
+
text2text_generator = pipeline("text2text-generation")
|
| 58 |
+
text2text_generator("question: What is 42 ? context: 42 is the answer to life, the universe and everything")
|
| 59 |
+
```"""
|
| 60 |
+
|
| 61 |
+
# Used in the return key of the pipeline.
|
| 62 |
+
return_name = "generated"
|
| 63 |
+
|
| 64 |
+
def __init__(self, *args, **kwargs):
|
| 65 |
+
super().__init__(*args, **kwargs)
|
| 66 |
+
|
| 67 |
+
self.check_model_type(
|
| 68 |
+
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
|
| 69 |
+
if self.framework == "tf"
|
| 70 |
+
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
def _sanitize_parameters(
|
| 74 |
+
self,
|
| 75 |
+
return_tensors=None,
|
| 76 |
+
return_text=None,
|
| 77 |
+
return_type=None,
|
| 78 |
+
clean_up_tokenization_spaces=None,
|
| 79 |
+
truncation=None,
|
| 80 |
+
stop_sequence=None,
|
| 81 |
+
**generate_kwargs,
|
| 82 |
+
):
|
| 83 |
+
preprocess_params = {}
|
| 84 |
+
if truncation is not None:
|
| 85 |
+
preprocess_params["truncation"] = truncation
|
| 86 |
+
|
| 87 |
+
forward_params = generate_kwargs
|
| 88 |
+
|
| 89 |
+
postprocess_params = {}
|
| 90 |
+
if return_tensors is not None and return_type is None:
|
| 91 |
+
return_type = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
|
| 92 |
+
if return_type is not None:
|
| 93 |
+
postprocess_params["return_type"] = return_type
|
| 94 |
+
|
| 95 |
+
if clean_up_tokenization_spaces is not None:
|
| 96 |
+
postprocess_params["clean_up_tokenization_spaces"] = clean_up_tokenization_spaces
|
| 97 |
+
|
| 98 |
+
if stop_sequence is not None:
|
| 99 |
+
stop_sequence_ids = self.tokenizer.encode(stop_sequence, add_special_tokens=False)
|
| 100 |
+
if len(stop_sequence_ids) > 1:
|
| 101 |
+
warnings.warn(
|
| 102 |
+
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
|
| 103 |
+
" the stop sequence will be used as the stop sequence string in the interim."
|
| 104 |
+
)
|
| 105 |
+
generate_kwargs["eos_token_id"] = stop_sequence_ids[0]
|
| 106 |
+
|
| 107 |
+
return preprocess_params, forward_params, postprocess_params
|
| 108 |
+
|
| 109 |
+
def check_inputs(self, input_length: int, min_length: int, max_length: int):
|
| 110 |
+
"""
|
| 111 |
+
Checks whether there might be something wrong with given input with regard to the model.
|
| 112 |
+
"""
|
| 113 |
+
return True
|
| 114 |
+
|
| 115 |
+
def _parse_and_tokenize(self, *args, truncation):
|
| 116 |
+
prefix = self.model.config.prefix if self.model.config.prefix is not None else ""
|
| 117 |
+
if isinstance(args[0], list):
|
| 118 |
+
if self.tokenizer.pad_token_id is None:
|
| 119 |
+
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input")
|
| 120 |
+
args = ([prefix + arg for arg in args[0]],)
|
| 121 |
+
padding = True
|
| 122 |
+
|
| 123 |
+
elif isinstance(args[0], str):
|
| 124 |
+
args = (prefix + args[0],)
|
| 125 |
+
padding = False
|
| 126 |
+
else:
|
| 127 |
+
raise ValueError(
|
| 128 |
+
f" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`"
|
| 129 |
+
)
|
| 130 |
+
inputs = self.tokenizer(*args, padding=padding, truncation=truncation, return_tensors=self.framework)
|
| 131 |
+
# This is produced by tokenizers but is an invalid generate kwargs
|
| 132 |
+
if "token_type_ids" in inputs:
|
| 133 |
+
del inputs["token_type_ids"]
|
| 134 |
+
return inputs
|
| 135 |
+
|
| 136 |
+
def __call__(self, *args, **kwargs):
|
| 137 |
+
r"""
|
| 138 |
+
Generate the output text(s) using text(s) given as inputs.
|
| 139 |
+
|
| 140 |
+
Args:
|
| 141 |
+
args (`str` or `List[str]`):
|
| 142 |
+
Input text for the encoder.
|
| 143 |
+
return_tensors (`bool`, *optional*, defaults to `False`):
|
| 144 |
+
Whether or not to include the tensors of predictions (as token indices) in the outputs.
|
| 145 |
+
return_text (`bool`, *optional*, defaults to `True`):
|
| 146 |
+
Whether or not to include the decoded texts in the outputs.
|
| 147 |
+
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
|
| 148 |
+
Whether or not to clean up the potential extra spaces in the text output.
|
| 149 |
+
truncation (`TruncationStrategy`, *optional*, defaults to `TruncationStrategy.DO_NOT_TRUNCATE`):
|
| 150 |
+
The truncation strategy for the tokenization within the pipeline. `TruncationStrategy.DO_NOT_TRUNCATE`
|
| 151 |
+
(default) will never truncate, but it is sometimes desirable to truncate the input to fit the model's
|
| 152 |
+
max_length instead of throwing an error down the line.
|
| 153 |
+
generate_kwargs:
|
| 154 |
+
Additional keyword arguments to pass along to the generate method of the model (see the generate method
|
| 155 |
+
corresponding to your framework [here](./model#generative-models)).
|
| 156 |
+
|
| 157 |
+
Return:
|
| 158 |
+
A list or a list of list of `dict`: Each result comes as a dictionary with the following keys:
|
| 159 |
+
|
| 160 |
+
- **generated_text** (`str`, present when `return_text=True`) -- The generated text.
|
| 161 |
+
- **generated_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token
|
| 162 |
+
ids of the generated text.
|
| 163 |
+
"""
|
| 164 |
+
|
| 165 |
+
result = super().__call__(*args, **kwargs)
|
| 166 |
+
if (
|
| 167 |
+
isinstance(args[0], list)
|
| 168 |
+
and all(isinstance(el, str) for el in args[0])
|
| 169 |
+
and all(len(res) == 1 for res in result)
|
| 170 |
+
):
|
| 171 |
+
return [res[0] for res in result]
|
| 172 |
+
return result
|
| 173 |
+
|
| 174 |
+
def preprocess(self, inputs, truncation=TruncationStrategy.DO_NOT_TRUNCATE, **kwargs):
|
| 175 |
+
inputs = self._parse_and_tokenize(inputs, truncation=truncation, **kwargs)
|
| 176 |
+
return inputs
|
| 177 |
+
|
| 178 |
+
def _forward(self, model_inputs, **generate_kwargs):
|
| 179 |
+
if self.framework == "pt":
|
| 180 |
+
in_b, input_length = model_inputs["input_ids"].shape
|
| 181 |
+
elif self.framework == "tf":
|
| 182 |
+
in_b, input_length = tf.shape(model_inputs["input_ids"]).numpy()
|
| 183 |
+
|
| 184 |
+
generate_kwargs["min_length"] = generate_kwargs.get("min_length", self.model.config.min_length)
|
| 185 |
+
generate_kwargs["max_length"] = generate_kwargs.get("max_length", self.model.config.max_length)
|
| 186 |
+
self.check_inputs(input_length, generate_kwargs["min_length"], generate_kwargs["max_length"])
|
| 187 |
+
output_ids = self.model.generate(**model_inputs, **generate_kwargs)
|
| 188 |
+
out_b = output_ids.shape[0]
|
| 189 |
+
if self.framework == "pt":
|
| 190 |
+
output_ids = output_ids.reshape(in_b, out_b // in_b, *output_ids.shape[1:])
|
| 191 |
+
elif self.framework == "tf":
|
| 192 |
+
output_ids = tf.reshape(output_ids, (in_b, out_b // in_b, *output_ids.shape[1:]))
|
| 193 |
+
return {"output_ids": output_ids}
|
| 194 |
+
|
| 195 |
+
def postprocess(self, model_outputs, return_type=ReturnType.TEXT, clean_up_tokenization_spaces=False):
|
| 196 |
+
records = []
|
| 197 |
+
for output_ids in model_outputs["output_ids"][0]:
|
| 198 |
+
if return_type == ReturnType.TENSORS:
|
| 199 |
+
record = {f"{self.return_name}_token_ids": output_ids}
|
| 200 |
+
elif return_type == ReturnType.TEXT:
|
| 201 |
+
record = {
|
| 202 |
+
f"{self.return_name}_text": self.tokenizer.decode(
|
| 203 |
+
output_ids,
|
| 204 |
+
skip_special_tokens=True,
|
| 205 |
+
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
|
| 206 |
+
)
|
| 207 |
+
}
|
| 208 |
+
records.append(record)
|
| 209 |
+
return records
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
@add_end_docstrings(PIPELINE_INIT_ARGS)
|
| 213 |
+
class SummarizationPipeline(Text2TextGenerationPipeline):
|
| 214 |
+
"""
|
| 215 |
+
Summarize news articles and other documents.
|
| 216 |
+
|
| 217 |
+
This summarizing pipeline can currently be loaded from [`pipeline`] using the following task identifier:
|
| 218 |
+
`"summarization"`.
|
| 219 |
+
|
| 220 |
+
The models that this pipeline can use are models that have been fine-tuned on a summarization task, which is
|
| 221 |
+
currently, '*bart-large-cnn*', '*t5-small*', '*t5-base*', '*t5-large*', '*t5-3b*', '*t5-11b*'. See the up-to-date
|
| 222 |
+
list of available models on [huggingface.co/models](https://huggingface.co/models?filter=summarization). For a list
|
| 223 |
+
of available parameters, see the [following
|
| 224 |
+
documentation](https://huggingface.co/docs/transformers/en/main_classes/text_generation#transformers.generation.GenerationMixin.generate)
|
| 225 |
+
|
| 226 |
+
Usage:
|
| 227 |
+
|
| 228 |
+
```python
|
| 229 |
+
# use bart in pytorch
|
| 230 |
+
summarizer = pipeline("summarization")
|
| 231 |
+
summarizer("An apple a day, keeps the doctor away", min_length=5, max_length=20)
|
| 232 |
+
|
| 233 |
+
# use t5 in tf
|
| 234 |
+
summarizer = pipeline("summarization", model="t5-base", tokenizer="t5-base", framework="tf")
|
| 235 |
+
summarizer("An apple a day, keeps the doctor away", min_length=5, max_length=20)
|
| 236 |
+
```"""
|
| 237 |
+
|
| 238 |
+
# Used in the return key of the pipeline.
|
| 239 |
+
return_name = "summary"
|
| 240 |
+
|
| 241 |
+
def __call__(self, *args, **kwargs):
|
| 242 |
+
r"""
|
| 243 |
+
Summarize the text(s) given as inputs.
|
| 244 |
+
|
| 245 |
+
Args:
|
| 246 |
+
documents (*str* or `List[str]`):
|
| 247 |
+
One or several articles (or one list of articles) to summarize.
|
| 248 |
+
return_text (`bool`, *optional*, defaults to `True`):
|
| 249 |
+
Whether or not to include the decoded texts in the outputs
|
| 250 |
+
return_tensors (`bool`, *optional*, defaults to `False`):
|
| 251 |
+
Whether or not to include the tensors of predictions (as token indices) in the outputs.
|
| 252 |
+
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
|
| 253 |
+
Whether or not to clean up the potential extra spaces in the text output.
|
| 254 |
+
generate_kwargs:
|
| 255 |
+
Additional keyword arguments to pass along to the generate method of the model (see the generate method
|
| 256 |
+
corresponding to your framework [here](./model#generative-models)).
|
| 257 |
+
|
| 258 |
+
Return:
|
| 259 |
+
A list or a list of list of `dict`: Each result comes as a dictionary with the following keys:
|
| 260 |
+
|
| 261 |
+
- **summary_text** (`str`, present when `return_text=True`) -- The summary of the corresponding input.
|
| 262 |
+
- **summary_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token
|
| 263 |
+
ids of the summary.
|
| 264 |
+
"""
|
| 265 |
+
return super().__call__(*args, **kwargs)
|
| 266 |
+
|
| 267 |
+
def check_inputs(self, input_length: int, min_length: int, max_length: int) -> bool:
|
| 268 |
+
"""
|
| 269 |
+
Checks whether there might be something wrong with given input with regard to the model.
|
| 270 |
+
"""
|
| 271 |
+
if max_length < min_length:
|
| 272 |
+
logger.warning(f"Your min_length={min_length} must be inferior than your max_length={max_length}.")
|
| 273 |
+
|
| 274 |
+
if input_length < max_length:
|
| 275 |
+
logger.warning(
|
| 276 |
+
f"Your max_length is set to {max_length}, but you input_length is only {input_length}. You might "
|
| 277 |
+
f"consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})"
|
| 278 |
+
)
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
@add_end_docstrings(PIPELINE_INIT_ARGS)
|
| 282 |
+
class TranslationPipeline(Text2TextGenerationPipeline):
|
| 283 |
+
"""
|
| 284 |
+
Translates from one language to another.
|
| 285 |
+
|
| 286 |
+
This translation pipeline can currently be loaded from [`pipeline`] using the following task identifier:
|
| 287 |
+
`"translation_xx_to_yy"`.
|
| 288 |
+
|
| 289 |
+
The models that this pipeline can use are models that have been fine-tuned on a translation task. See the
|
| 290 |
+
up-to-date list of available models on [huggingface.co/models](https://huggingface.co/models?filter=translation).
|
| 291 |
+
For a list of available parameters, see the [following
|
| 292 |
+
documentation](https://huggingface.co/docs/transformers/en/main_classes/text_generation#transformers.generation.GenerationMixin.generate)
|
| 293 |
+
|
| 294 |
+
Usage:
|
| 295 |
+
|
| 296 |
+
```python
|
| 297 |
+
en_fr_translator = pipeline("translation_en_to_fr")
|
| 298 |
+
en_fr_translator("How old are you?")
|
| 299 |
+
```"""
|
| 300 |
+
|
| 301 |
+
# Used in the return key of the pipeline.
|
| 302 |
+
return_name = "translation"
|
| 303 |
+
|
| 304 |
+
def check_inputs(self, input_length: int, min_length: int, max_length: int):
|
| 305 |
+
if input_length > 0.9 * max_length:
|
| 306 |
+
logger.warning(
|
| 307 |
+
f"Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider "
|
| 308 |
+
"increasing your max_length manually, e.g. translator('...', max_length=400)"
|
| 309 |
+
)
|
| 310 |
+
return True
|
| 311 |
+
|
| 312 |
+
def preprocess(self, *args, truncation=TruncationStrategy.DO_NOT_TRUNCATE, src_lang=None, tgt_lang=None):
|
| 313 |
+
if getattr(self.tokenizer, "_build_translation_inputs", None):
|
| 314 |
+
return self.tokenizer._build_translation_inputs(
|
| 315 |
+
*args, return_tensors=self.framework, truncation=truncation, src_lang=src_lang, tgt_lang=tgt_lang
|
| 316 |
+
)
|
| 317 |
+
else:
|
| 318 |
+
return super()._parse_and_tokenize(*args, truncation=truncation)
|
| 319 |
+
|
| 320 |
+
def _sanitize_parameters(self, src_lang=None, tgt_lang=None, **kwargs):
|
| 321 |
+
preprocess_params, forward_params, postprocess_params = super()._sanitize_parameters(**kwargs)
|
| 322 |
+
if src_lang is not None:
|
| 323 |
+
preprocess_params["src_lang"] = src_lang
|
| 324 |
+
if tgt_lang is not None:
|
| 325 |
+
preprocess_params["tgt_lang"] = tgt_lang
|
| 326 |
+
if src_lang is None and tgt_lang is None:
|
| 327 |
+
# Backward compatibility, direct arguments use is preferred.
|
| 328 |
+
task = kwargs.get("task", self.task)
|
| 329 |
+
items = task.split("_")
|
| 330 |
+
if task and len(items) == 4:
|
| 331 |
+
# translation, XX, to YY
|
| 332 |
+
preprocess_params["src_lang"] = items[1]
|
| 333 |
+
preprocess_params["tgt_lang"] = items[3]
|
| 334 |
+
return preprocess_params, forward_params, postprocess_params
|
| 335 |
+
|
| 336 |
+
def __call__(self, *args, **kwargs):
|
| 337 |
+
r"""
|
| 338 |
+
Translate the text(s) given as inputs.
|
| 339 |
+
|
| 340 |
+
Args:
|
| 341 |
+
args (`str` or `List[str]`):
|
| 342 |
+
Texts to be translated.
|
| 343 |
+
return_tensors (`bool`, *optional*, defaults to `False`):
|
| 344 |
+
Whether or not to include the tensors of predictions (as token indices) in the outputs.
|
| 345 |
+
return_text (`bool`, *optional*, defaults to `True`):
|
| 346 |
+
Whether or not to include the decoded texts in the outputs.
|
| 347 |
+
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
|
| 348 |
+
Whether or not to clean up the potential extra spaces in the text output.
|
| 349 |
+
src_lang (`str`, *optional*):
|
| 350 |
+
The language of the input. Might be required for multilingual models. Will not have any effect for
|
| 351 |
+
single pair translation models
|
| 352 |
+
tgt_lang (`str`, *optional*):
|
| 353 |
+
The language of the desired output. Might be required for multilingual models. Will not have any effect
|
| 354 |
+
for single pair translation models
|
| 355 |
+
generate_kwargs:
|
| 356 |
+
Additional keyword arguments to pass along to the generate method of the model (see the generate method
|
| 357 |
+
corresponding to your framework [here](./model#generative-models)).
|
| 358 |
+
|
| 359 |
+
Return:
|
| 360 |
+
A list or a list of list of `dict`: Each result comes as a dictionary with the following keys:
|
| 361 |
+
|
| 362 |
+
- **translation_text** (`str`, present when `return_text=True`) -- The translation.
|
| 363 |
+
- **translation_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The
|
| 364 |
+
token ids of the translation.
|
| 365 |
+
"""
|
| 366 |
+
return super().__call__(*args, **kwargs)
|
valley/lib/python3.10/site-packages/transformers/pipelines/text_generation.py
ADDED
|
@@ -0,0 +1,296 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import enum
|
| 2 |
+
import warnings
|
| 3 |
+
|
| 4 |
+
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
|
| 5 |
+
from ..utils import add_end_docstrings, is_tf_available
|
| 6 |
+
from .base import PIPELINE_INIT_ARGS, Pipeline
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
if is_tf_available():
|
| 10 |
+
import tensorflow as tf
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class ReturnType(enum.Enum):
|
| 14 |
+
TENSORS = 0
|
| 15 |
+
NEW_TEXT = 1
|
| 16 |
+
FULL_TEXT = 2
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
@add_end_docstrings(PIPELINE_INIT_ARGS)
|
| 20 |
+
class TextGenerationPipeline(Pipeline):
|
| 21 |
+
"""
|
| 22 |
+
Language generation pipeline using any `ModelWithLMHead`. This pipeline predicts the words that will follow a
|
| 23 |
+
specified text prompt.
|
| 24 |
+
|
| 25 |
+
Example:
|
| 26 |
+
|
| 27 |
+
```python
|
| 28 |
+
>>> from transformers import pipeline
|
| 29 |
+
|
| 30 |
+
>>> generator = pipeline(model="gpt2")
|
| 31 |
+
>>> generator("I can't believe you did such a ", do_sample=False)
|
| 32 |
+
[{'generated_text': "I can't believe you did such a icky thing to me. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I"}]
|
| 33 |
+
|
| 34 |
+
>>> # These parameters will return suggestions, and only the newly created text making it easier for prompting suggestions.
|
| 35 |
+
>>> outputs = generator("My tart needs some", num_return_sequences=4, return_full_text=False)
|
| 36 |
+
```
|
| 37 |
+
|
| 38 |
+
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
|
| 39 |
+
|
| 40 |
+
This language generation pipeline can currently be loaded from [`pipeline`] using the following task identifier:
|
| 41 |
+
`"text-generation"`.
|
| 42 |
+
|
| 43 |
+
The models that this pipeline can use are models that have been trained with an autoregressive language modeling
|
| 44 |
+
objective, which includes the uni-directional models in the library (e.g. gpt2). See the list of available models
|
| 45 |
+
on [huggingface.co/models](https://huggingface.co/models?filter=text-generation).
|
| 46 |
+
"""
|
| 47 |
+
|
| 48 |
+
# Prefix text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia
|
| 49 |
+
# in https://github.com/rusiaaman/XLNet-gen#methodology
|
| 50 |
+
# and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e
|
| 51 |
+
|
| 52 |
+
XL_PREFIX = """
|
| 53 |
+
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
|
| 54 |
+
voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
|
| 55 |
+
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
|
| 56 |
+
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
|
| 57 |
+
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
|
| 58 |
+
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
|
| 59 |
+
begging for his blessing. <eod> </s> <eos>
|
| 60 |
+
"""
|
| 61 |
+
|
| 62 |
+
def __init__(self, *args, **kwargs):
|
| 63 |
+
super().__init__(*args, **kwargs)
|
| 64 |
+
self.check_model_type(
|
| 65 |
+
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING
|
| 66 |
+
)
|
| 67 |
+
if "prefix" not in self._preprocess_params:
|
| 68 |
+
# This is very specific. The logic is quite complex and needs to be done
|
| 69 |
+
# as a "default".
|
| 70 |
+
# It also defines both some preprocess_kwargs and generate_kwargs
|
| 71 |
+
# which is why we cannot put them in their respective methods.
|
| 72 |
+
prefix = None
|
| 73 |
+
if self.model.config.prefix is not None:
|
| 74 |
+
prefix = self.model.config.prefix
|
| 75 |
+
if prefix is None and self.model.__class__.__name__ in [
|
| 76 |
+
"XLNetLMHeadModel",
|
| 77 |
+
"TransfoXLLMHeadModel",
|
| 78 |
+
"TFXLNetLMHeadModel",
|
| 79 |
+
"TFTransfoXLLMHeadModel",
|
| 80 |
+
]:
|
| 81 |
+
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
|
| 82 |
+
prefix = self.XL_PREFIX
|
| 83 |
+
if prefix is not None:
|
| 84 |
+
# Recalculate some generate_kwargs linked to prefix.
|
| 85 |
+
preprocess_params, forward_params, _ = self._sanitize_parameters(prefix=prefix, **self._forward_params)
|
| 86 |
+
self._preprocess_params = {**self._preprocess_params, **preprocess_params}
|
| 87 |
+
self._forward_params = {**self._forward_params, **forward_params}
|
| 88 |
+
|
| 89 |
+
def _sanitize_parameters(
|
| 90 |
+
self,
|
| 91 |
+
return_full_text=None,
|
| 92 |
+
return_tensors=None,
|
| 93 |
+
return_text=None,
|
| 94 |
+
return_type=None,
|
| 95 |
+
clean_up_tokenization_spaces=None,
|
| 96 |
+
prefix=None,
|
| 97 |
+
handle_long_generation=None,
|
| 98 |
+
stop_sequence=None,
|
| 99 |
+
**generate_kwargs,
|
| 100 |
+
):
|
| 101 |
+
preprocess_params = {}
|
| 102 |
+
if prefix is not None:
|
| 103 |
+
preprocess_params["prefix"] = prefix
|
| 104 |
+
if prefix:
|
| 105 |
+
prefix_inputs = self.tokenizer(
|
| 106 |
+
prefix, padding=False, add_special_tokens=False, return_tensors=self.framework
|
| 107 |
+
)
|
| 108 |
+
prefix_length = prefix_inputs["input_ids"].shape[-1]
|
| 109 |
+
|
| 110 |
+
if "max_new_tokens" in generate_kwargs:
|
| 111 |
+
pass
|
| 112 |
+
elif "max_length" in generate_kwargs:
|
| 113 |
+
generate_kwargs["max_length"] += prefix_length
|
| 114 |
+
else:
|
| 115 |
+
generate_kwargs["max_length"] = self.model.config.max_length + prefix_length
|
| 116 |
+
|
| 117 |
+
if "min_length" in generate_kwargs:
|
| 118 |
+
generate_kwargs["min_length"] += prefix_length
|
| 119 |
+
if handle_long_generation is not None:
|
| 120 |
+
if handle_long_generation not in {"hole"}:
|
| 121 |
+
raise ValueError(
|
| 122 |
+
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
|
| 123 |
+
" [None, 'hole']"
|
| 124 |
+
)
|
| 125 |
+
preprocess_params["handle_long_generation"] = handle_long_generation
|
| 126 |
+
|
| 127 |
+
preprocess_params.update(generate_kwargs)
|
| 128 |
+
forward_params = generate_kwargs
|
| 129 |
+
|
| 130 |
+
postprocess_params = {}
|
| 131 |
+
if return_full_text is not None and return_type is None:
|
| 132 |
+
if return_text is not None:
|
| 133 |
+
raise ValueError("`return_text` is mutually exclusive with `return_full_text`")
|
| 134 |
+
if return_tensors is not None:
|
| 135 |
+
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`")
|
| 136 |
+
return_type = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
|
| 137 |
+
if return_tensors is not None and return_type is None:
|
| 138 |
+
if return_text is not None:
|
| 139 |
+
raise ValueError("`return_text` is mutually exclusive with `return_tensors`")
|
| 140 |
+
return_type = ReturnType.TENSORS
|
| 141 |
+
if return_type is not None:
|
| 142 |
+
postprocess_params["return_type"] = return_type
|
| 143 |
+
if clean_up_tokenization_spaces is not None:
|
| 144 |
+
postprocess_params["clean_up_tokenization_spaces"] = clean_up_tokenization_spaces
|
| 145 |
+
|
| 146 |
+
if stop_sequence is not None:
|
| 147 |
+
stop_sequence_ids = self.tokenizer.encode(stop_sequence, add_special_tokens=False)
|
| 148 |
+
if len(stop_sequence_ids) > 1:
|
| 149 |
+
warnings.warn(
|
| 150 |
+
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
|
| 151 |
+
" the stop sequence will be used as the stop sequence string in the interim."
|
| 152 |
+
)
|
| 153 |
+
generate_kwargs["eos_token_id"] = stop_sequence_ids[0]
|
| 154 |
+
|
| 155 |
+
return preprocess_params, forward_params, postprocess_params
|
| 156 |
+
|
| 157 |
+
# overriding _parse_and_tokenize to allow for unusual language-modeling tokenizer arguments
|
| 158 |
+
def _parse_and_tokenize(self, *args, **kwargs):
|
| 159 |
+
"""
|
| 160 |
+
Parse arguments and tokenize
|
| 161 |
+
"""
|
| 162 |
+
# Parse arguments
|
| 163 |
+
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
|
| 164 |
+
kwargs.update({"add_space_before_punct_symbol": True})
|
| 165 |
+
|
| 166 |
+
return super()._parse_and_tokenize(*args, **kwargs)
|
| 167 |
+
|
| 168 |
+
def __call__(self, text_inputs, **kwargs):
|
| 169 |
+
"""
|
| 170 |
+
Complete the prompt(s) given as inputs.
|
| 171 |
+
|
| 172 |
+
Args:
|
| 173 |
+
args (`str` or `List[str]`):
|
| 174 |
+
One or several prompts (or one list of prompts) to complete.
|
| 175 |
+
return_tensors (`bool`, *optional*, defaults to `False`):
|
| 176 |
+
Whether or not to return the tensors of predictions (as token indices) in the outputs. If set to
|
| 177 |
+
`True`, the decoded text is not returned.
|
| 178 |
+
return_text (`bool`, *optional*, defaults to `True`):
|
| 179 |
+
Whether or not to return the decoded texts in the outputs.
|
| 180 |
+
return_full_text (`bool`, *optional*, defaults to `True`):
|
| 181 |
+
If set to `False` only added text is returned, otherwise the full text is returned. Only meaningful if
|
| 182 |
+
*return_text* is set to True.
|
| 183 |
+
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
|
| 184 |
+
Whether or not to clean up the potential extra spaces in the text output.
|
| 185 |
+
prefix (`str`, *optional*):
|
| 186 |
+
Prefix added to prompt.
|
| 187 |
+
handle_long_generation (`str`, *optional*):
|
| 188 |
+
By default, this pipelines does not handle long generation (ones that exceed in one form or the other
|
| 189 |
+
the model maximum length). There is no perfect way to adress this (more info
|
| 190 |
+
:https://github.com/huggingface/transformers/issues/14033#issuecomment-948385227). This provides common
|
| 191 |
+
strategies to work around that problem depending on your use case.
|
| 192 |
+
|
| 193 |
+
- `None` : default strategy where nothing in particular happens
|
| 194 |
+
- `"hole"`: Truncates left of input, and leaves a gap wide enough to let generation happen (might
|
| 195 |
+
truncate a lot of the prompt and not suitable when generation exceed the model capacity)
|
| 196 |
+
|
| 197 |
+
generate_kwargs:
|
| 198 |
+
Additional keyword arguments to pass along to the generate method of the model (see the generate method
|
| 199 |
+
corresponding to your framework [here](./model#generative-models)).
|
| 200 |
+
|
| 201 |
+
Return:
|
| 202 |
+
A list or a list of list of `dict`: Returns one of the following dictionaries (cannot return a combination
|
| 203 |
+
of both `generated_text` and `generated_token_ids`):
|
| 204 |
+
|
| 205 |
+
- **generated_text** (`str`, present when `return_text=True`) -- The generated text.
|
| 206 |
+
- **generated_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token
|
| 207 |
+
ids of the generated text.
|
| 208 |
+
"""
|
| 209 |
+
return super().__call__(text_inputs, **kwargs)
|
| 210 |
+
|
| 211 |
+
def preprocess(self, prompt_text, prefix="", handle_long_generation=None, **generate_kwargs):
|
| 212 |
+
inputs = self.tokenizer(
|
| 213 |
+
prefix + prompt_text, padding=False, add_special_tokens=False, return_tensors=self.framework
|
| 214 |
+
)
|
| 215 |
+
inputs["prompt_text"] = prompt_text
|
| 216 |
+
|
| 217 |
+
if handle_long_generation == "hole":
|
| 218 |
+
cur_len = inputs["input_ids"].shape[-1]
|
| 219 |
+
if "max_new_tokens" in generate_kwargs:
|
| 220 |
+
new_tokens = generate_kwargs["max_new_tokens"]
|
| 221 |
+
else:
|
| 222 |
+
new_tokens = generate_kwargs.get("max_length", self.model.config.max_length) - cur_len
|
| 223 |
+
if new_tokens < 0:
|
| 224 |
+
raise ValueError("We cannot infer how many new tokens are expected")
|
| 225 |
+
if cur_len + new_tokens > self.tokenizer.model_max_length:
|
| 226 |
+
keep_length = self.tokenizer.model_max_length - new_tokens
|
| 227 |
+
if keep_length <= 0:
|
| 228 |
+
raise ValueError(
|
| 229 |
+
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
|
| 230 |
+
" models max length"
|
| 231 |
+
)
|
| 232 |
+
|
| 233 |
+
inputs["input_ids"] = inputs["input_ids"][:, -keep_length:]
|
| 234 |
+
if "attention_mask" in inputs:
|
| 235 |
+
inputs["attention_mask"] = inputs["attention_mask"][:, -keep_length:]
|
| 236 |
+
|
| 237 |
+
return inputs
|
| 238 |
+
|
| 239 |
+
def _forward(self, model_inputs, **generate_kwargs):
|
| 240 |
+
input_ids = model_inputs["input_ids"]
|
| 241 |
+
attention_mask = model_inputs.get("attention_mask", None)
|
| 242 |
+
# Allow empty prompts
|
| 243 |
+
if input_ids.shape[1] == 0:
|
| 244 |
+
input_ids = None
|
| 245 |
+
attention_mask = None
|
| 246 |
+
in_b = 1
|
| 247 |
+
else:
|
| 248 |
+
in_b = input_ids.shape[0]
|
| 249 |
+
prompt_text = model_inputs.pop("prompt_text")
|
| 250 |
+
# BS x SL
|
| 251 |
+
generated_sequence = self.model.generate(input_ids=input_ids, attention_mask=attention_mask, **generate_kwargs)
|
| 252 |
+
out_b = generated_sequence.shape[0]
|
| 253 |
+
if self.framework == "pt":
|
| 254 |
+
generated_sequence = generated_sequence.reshape(in_b, out_b // in_b, *generated_sequence.shape[1:])
|
| 255 |
+
elif self.framework == "tf":
|
| 256 |
+
generated_sequence = tf.reshape(generated_sequence, (in_b, out_b // in_b, *generated_sequence.shape[1:]))
|
| 257 |
+
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
|
| 258 |
+
|
| 259 |
+
def postprocess(self, model_outputs, return_type=ReturnType.FULL_TEXT, clean_up_tokenization_spaces=True):
|
| 260 |
+
generated_sequence = model_outputs["generated_sequence"][0]
|
| 261 |
+
input_ids = model_outputs["input_ids"]
|
| 262 |
+
prompt_text = model_outputs["prompt_text"]
|
| 263 |
+
generated_sequence = generated_sequence.numpy().tolist()
|
| 264 |
+
records = []
|
| 265 |
+
for sequence in generated_sequence:
|
| 266 |
+
if return_type == ReturnType.TENSORS:
|
| 267 |
+
record = {"generated_token_ids": sequence}
|
| 268 |
+
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
|
| 269 |
+
# Decode text
|
| 270 |
+
text = self.tokenizer.decode(
|
| 271 |
+
sequence,
|
| 272 |
+
skip_special_tokens=True,
|
| 273 |
+
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
|
| 274 |
+
)
|
| 275 |
+
|
| 276 |
+
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
|
| 277 |
+
if input_ids is None:
|
| 278 |
+
prompt_length = 0
|
| 279 |
+
else:
|
| 280 |
+
prompt_length = len(
|
| 281 |
+
self.tokenizer.decode(
|
| 282 |
+
input_ids[0],
|
| 283 |
+
skip_special_tokens=True,
|
| 284 |
+
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
|
| 285 |
+
)
|
| 286 |
+
)
|
| 287 |
+
|
| 288 |
+
if return_type == ReturnType.FULL_TEXT:
|
| 289 |
+
all_text = prompt_text + text[prompt_length:]
|
| 290 |
+
else:
|
| 291 |
+
all_text = text[prompt_length:]
|
| 292 |
+
|
| 293 |
+
record = {"generated_text": all_text}
|
| 294 |
+
records.append(record)
|
| 295 |
+
|
| 296 |
+
return records
|