repo_id stringlengths 15 89 | file_path stringlengths 27 180 | content stringlengths 1 2.23M | __index_level_0__ int64 0 0 |
|---|---|---|---|
hf_public_repos/transformers | hf_public_repos/transformers/tests/test_modeling_tf_utils.py | # coding=utf-8
# Copyright 2019 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import inspect
import json
import os
import random
import tempfile
import unittest
import unittest.mock as mock
from huggingface_hub import HfFolder, Repository, delete_repo, snapshot_download
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import is_tf_available, is_torch_available
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import ( # noqa: F401
TOKEN,
USER,
CaptureLogger,
_tf_gpu_memory_limit,
is_pt_tf_cross_test,
is_staging_test,
require_safetensors,
require_tf,
require_torch,
slow,
)
from transformers.utils import SAFE_WEIGHTS_NAME, TF2_WEIGHTS_INDEX_NAME, TF2_WEIGHTS_NAME, logging
logger = logging.get_logger(__name__)
if is_tf_available():
import h5py
import numpy as np
import tensorflow as tf
from transformers import (
BertConfig,
PreTrainedModel,
PushToHubCallback,
RagRetriever,
TFBertForMaskedLM,
TFBertForSequenceClassification,
TFBertModel,
TFPreTrainedModel,
TFRagModel,
)
from transformers.modeling_tf_utils import tf_shard_checkpoint, unpack_inputs
from transformers.tf_utils import stable_softmax
tf.config.experimental.enable_tensor_float_32_execution(False)
if _tf_gpu_memory_limit is not None:
gpus = tf.config.list_physical_devices("GPU")
for gpu in gpus:
# Restrict TensorFlow to only allocate x GB of memory on the GPUs
try:
tf.config.set_logical_device_configuration(
gpu, [tf.config.LogicalDeviceConfiguration(memory_limit=_tf_gpu_memory_limit)]
)
logical_gpus = tf.config.list_logical_devices("GPU")
print("Logical GPUs", logical_gpus)
except RuntimeError as e:
# Virtual devices must be set before GPUs have been initialized
print(e)
if is_torch_available():
from transformers import BertModel
@require_tf
class TFModelUtilsTest(unittest.TestCase):
def test_cached_files_are_used_when_internet_is_down(self):
# A mock response for an HTTP head request to emulate server down
response_mock = mock.Mock()
response_mock.status_code = 500
response_mock.headers = {}
response_mock.raise_for_status.side_effect = HTTPError
response_mock.json.return_value = {}
# Download this model to make sure it's in the cache.
_ = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert")
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request", return_value=response_mock) as mock_head:
_ = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert")
# This check we did call the fake head request
mock_head.assert_called()
def test_load_from_one_file(self):
try:
tmp_file = tempfile.mktemp()
with open(tmp_file, "wb") as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/tf_model.h5", f)
config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert")
_ = TFBertModel.from_pretrained(tmp_file, config=config)
finally:
os.remove(tmp_file)
def test_legacy_load_from_url(self):
# This test is for deprecated behavior and can be removed in v5
config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert")
_ = TFBertModel.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/tf_model.h5", config=config
)
# tests whether the unpack_inputs function behaves as expected
def test_unpack_inputs(self):
class DummyModel:
def __init__(self):
config_kwargs = {"output_attentions": False, "output_hidden_states": False, "return_dict": False}
self.config = PretrainedConfig(**config_kwargs)
self.main_input_name = "input_ids"
@unpack_inputs
def call(
self,
input_ids=None,
past_key_values=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return input_ids, past_key_values, output_attentions, output_hidden_states, return_dict
@unpack_inputs
def foo(self, pixel_values, output_attentions=None, output_hidden_states=None, return_dict=None):
return pixel_values, output_attentions, output_hidden_states, return_dict
dummy_model = DummyModel()
input_ids = tf.constant([0, 1, 2, 3], dtype=tf.int32)
past_key_values = tf.constant([4, 5, 6, 7], dtype=tf.int32)
pixel_values = tf.constant([8, 9, 10, 11], dtype=tf.int32)
# test case 1: Pass inputs as keyword arguments; Booleans are inherited from the config.
output = dummy_model.call(input_ids=input_ids, past_key_values=past_key_values)
tf.debugging.assert_equal(output[0], input_ids)
tf.debugging.assert_equal(output[1], past_key_values)
self.assertFalse(output[2])
self.assertFalse(output[3])
self.assertFalse(output[4])
# test case 2: Same as above, but with positional arguments.
output = dummy_model.call(input_ids, past_key_values)
tf.debugging.assert_equal(output[0], input_ids)
tf.debugging.assert_equal(output[1], past_key_values)
self.assertFalse(output[2])
self.assertFalse(output[3])
self.assertFalse(output[4])
# test case 3: We can also pack everything in the first input.
output = dummy_model.call(input_ids={"input_ids": input_ids, "past_key_values": past_key_values})
tf.debugging.assert_equal(output[0], input_ids)
tf.debugging.assert_equal(output[1], past_key_values)
self.assertFalse(output[2])
self.assertFalse(output[3])
self.assertFalse(output[4])
# test case 4: Explicit boolean arguments should override the config.
output = dummy_model.call(
input_ids=input_ids, past_key_values=past_key_values, output_attentions=False, return_dict=True
)
tf.debugging.assert_equal(output[0], input_ids)
tf.debugging.assert_equal(output[1], past_key_values)
self.assertFalse(output[2])
self.assertFalse(output[3])
self.assertTrue(output[4])
# test case 5: Unexpected arguments should raise an exception.
with self.assertRaises(ValueError):
output = dummy_model.call(input_ids=input_ids, past_key_values=past_key_values, foo="bar")
# test case 6: the decorator is independent from `main_input_name` -- it treats the first argument of the
# decorated function as its main input.
output = dummy_model.foo(pixel_values=pixel_values)
tf.debugging.assert_equal(output[0], pixel_values)
self.assertFalse(output[1])
self.assertFalse(output[2])
self.assertFalse(output[3])
# Tests whether the stable softmax is stable on CPU, with and without XLA
def test_xla_stable_softmax(self):
large_penalty = -1e9
n_tokens = 10
batch_size = 8
def masked_softmax(x, boolean_mask):
numerical_mask = (1.0 - tf.cast(boolean_mask, dtype=tf.float32)) * large_penalty
masked_x = x + numerical_mask
return stable_softmax(masked_x)
xla_masked_softmax = tf.function(masked_softmax, jit_compile=True)
xla_stable_softmax = tf.function(stable_softmax, jit_compile=True)
x = tf.random.normal((batch_size, n_tokens))
# Same outcome regardless of the boolean mask here
masked_tokens = random.randint(0, n_tokens)
boolean_mask = tf.convert_to_tensor([[1] * (n_tokens - masked_tokens) + [0] * masked_tokens], dtype=tf.int32)
# We can randomly mask a random numerical input OUTSIDE XLA
numerical_mask = (1.0 - tf.cast(boolean_mask, dtype=tf.float32)) * large_penalty
masked_x = x + numerical_mask
xla_out = xla_stable_softmax(masked_x)
out = stable_softmax(masked_x)
assert tf.experimental.numpy.allclose(xla_out, out)
# The stable softmax has the same output as the original softmax
unstable_out = tf.nn.softmax(masked_x)
assert tf.experimental.numpy.allclose(unstable_out, out)
# We can randomly mask a random numerical input INSIDE XLA
xla_out = xla_masked_softmax(x, boolean_mask)
out = masked_softmax(x, boolean_mask)
assert tf.experimental.numpy.allclose(xla_out, out)
def test_checkpoint_sharding_from_hub(self):
model = TFBertModel.from_pretrained("ArthurZ/tiny-random-bert-sharded")
# the model above is the same as the model below, just a sharded version.
ref_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert")
for p1, p2 in zip(model.weights, ref_model.weights):
assert np.allclose(p1.numpy(), p2.numpy())
def test_sharded_checkpoint_with_prefix(self):
model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert", load_weight_prefix="a/b")
sharded_model = TFBertModel.from_pretrained("ArthurZ/tiny-random-bert-sharded", load_weight_prefix="a/b")
for p1, p2 in zip(model.weights, sharded_model.weights):
self.assertTrue(np.allclose(p1.numpy(), p2.numpy()))
self.assertTrue(p1.name.startswith("a/b/"))
self.assertTrue(p2.name.startswith("a/b/"))
def test_sharded_checkpoint_transfer(self):
# If this doesn't throw an error then the test passes
TFBertForSequenceClassification.from_pretrained("ArthurZ/tiny-random-bert-sharded")
@is_pt_tf_cross_test
def test_checkpoint_sharding_local_from_pt(self):
with tempfile.TemporaryDirectory() as tmp_dir:
_ = Repository(local_dir=tmp_dir, clone_from="hf-internal-testing/tiny-random-bert-sharded")
model = TFBertModel.from_pretrained(tmp_dir, from_pt=True)
# the model above is the same as the model below, just a sharded pytorch version.
ref_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert")
for p1, p2 in zip(model.weights, ref_model.weights):
assert np.allclose(p1.numpy(), p2.numpy())
@is_pt_tf_cross_test
def test_checkpoint_loading_with_prefix_from_pt(self):
model = TFBertModel.from_pretrained(
"hf-internal-testing/tiny-random-bert", from_pt=True, load_weight_prefix="a/b"
)
ref_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert", from_pt=True)
for p1, p2 in zip(model.weights, ref_model.weights):
self.assertTrue(np.allclose(p1.numpy(), p2.numpy()))
self.assertTrue(p1.name.startswith("a/b/"))
@is_pt_tf_cross_test
def test_checkpoint_sharding_hub_from_pt(self):
model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded", from_pt=True)
# the model above is the same as the model below, just a sharded pytorch version.
ref_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert")
for p1, p2 in zip(model.weights, ref_model.weights):
assert np.allclose(p1.numpy(), p2.numpy())
def test_shard_checkpoint(self):
# This is the model we will use, total size 340,000 bytes.
model = tf.keras.Sequential(
[
tf.keras.layers.Dense(200, use_bias=False), # size 80,000
tf.keras.layers.Dense(200, use_bias=False), # size 160,000
tf.keras.layers.Dense(100, use_bias=False), # size 80,000
tf.keras.layers.Dense(50, use_bias=False), # size 20,000
]
)
inputs = tf.zeros((1, 100), dtype=tf.float32)
model(inputs)
weights = model.weights
weights_dict = {w.name: w for w in weights}
with self.subTest("No shard when max size is bigger than model size"):
shards, index = tf_shard_checkpoint(weights)
self.assertIsNone(index)
self.assertDictEqual(shards, {TF2_WEIGHTS_NAME: weights})
with self.subTest("Test sharding, no weights bigger than max size"):
shards, index = tf_shard_checkpoint(weights, max_shard_size="300kB")
# Split is first two layers then last two.
self.assertDictEqual(
index,
{
"metadata": {"total_size": 340000},
"weight_map": {
"dense/kernel:0": "tf_model-00001-of-00002.h5",
"dense_1/kernel:0": "tf_model-00001-of-00002.h5",
"dense_2/kernel:0": "tf_model-00002-of-00002.h5",
"dense_3/kernel:0": "tf_model-00002-of-00002.h5",
},
},
)
shard1 = [weights_dict["dense/kernel:0"], weights_dict["dense_1/kernel:0"]]
shard2 = [weights_dict["dense_2/kernel:0"], weights_dict["dense_3/kernel:0"]]
self.assertDictEqual(shards, {"tf_model-00001-of-00002.h5": shard1, "tf_model-00002-of-00002.h5": shard2})
with self.subTest("Test sharding with weights bigger than max size"):
shards, index = tf_shard_checkpoint(weights, max_shard_size="100kB")
# Split is first layer, second layer then last 2.
self.assertDictEqual(
index,
{
"metadata": {"total_size": 340000},
"weight_map": {
"dense/kernel:0": "tf_model-00001-of-00003.h5",
"dense_1/kernel:0": "tf_model-00002-of-00003.h5",
"dense_2/kernel:0": "tf_model-00003-of-00003.h5",
"dense_3/kernel:0": "tf_model-00003-of-00003.h5",
},
},
)
shard1 = [weights_dict["dense/kernel:0"]]
shard2 = [weights_dict["dense_1/kernel:0"]]
shard3 = [weights_dict["dense_2/kernel:0"], weights_dict["dense_3/kernel:0"]]
self.assertDictEqual(
shards,
{
"tf_model-00001-of-00003.h5": shard1,
"tf_model-00002-of-00003.h5": shard2,
"tf_model-00003-of-00003.h5": shard3,
},
)
@slow
def test_special_layer_name_sharding(self):
retriever = RagRetriever.from_pretrained("facebook/rag-token-nq", index_name="exact", use_dummy_dataset=True)
model = TFRagModel.from_pretrained("facebook/rag-token-nq", retriever=retriever)
with tempfile.TemporaryDirectory() as tmp_dir:
for max_size in ["150kB", "150kiB", "200kB", "200kiB"]:
model.save_pretrained(tmp_dir, max_shard_size=max_size)
ref_model = TFRagModel.from_pretrained(tmp_dir, retriever=retriever)
for p1, p2 in zip(model.weights, ref_model.weights):
assert np.allclose(p1.numpy(), p2.numpy())
def test_checkpoint_sharding_local(self):
model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert")
with tempfile.TemporaryDirectory() as tmp_dir:
# We use the same folder for various sizes to make sure a new save erases the old checkpoint.
for max_size in ["150kB", "150kiB", "200kB", "200kiB"]:
model.save_pretrained(tmp_dir, max_shard_size=max_size)
# Get each shard file and its size
shard_to_size = {}
for shard in os.listdir(tmp_dir):
if shard.endswith(".h5"):
shard_file = os.path.join(tmp_dir, shard)
shard_to_size[shard_file] = os.path.getsize(shard_file)
index_file = os.path.join(tmp_dir, TF2_WEIGHTS_INDEX_NAME)
# Check there is an index but no regular weight file
self.assertTrue(os.path.isfile(index_file))
self.assertFalse(os.path.isfile(os.path.join(tmp_dir, TF2_WEIGHTS_NAME)))
# Check a file is bigger than max_size only when it has a single weight
for shard_file, size in shard_to_size.items():
if max_size.endswith("kiB"):
max_size_int = int(max_size[:-3]) * 2**10
else:
max_size_int = int(max_size[:-2]) * 10**3
# Note: pickle adds some junk so the weight of the file can end up being slightly bigger than
# the size asked for (since we count parameters)
if size >= max_size_int + 50000:
with h5py.File(shard_file, "r") as state_file:
self.assertEqual(len(state_file), 1)
# Check the index and the shard files found match
with open(index_file, "r", encoding="utf-8") as f:
index = json.loads(f.read())
all_shards = set(index["weight_map"].values())
shards_found = {f for f in os.listdir(tmp_dir) if f.endswith(".h5")}
self.assertSetEqual(all_shards, shards_found)
# Finally, check the model can be reloaded
new_model = TFBertModel.from_pretrained(tmp_dir)
model.build()
new_model.build()
for p1, p2 in zip(model.weights, new_model.weights):
self.assertTrue(np.allclose(p1.numpy(), p2.numpy()))
@slow
def test_save_pretrained_signatures(self):
model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert")
# Short custom TF signature function.
# `input_signature` is specific to BERT.
@tf.function(
input_signature=[
[
tf.TensorSpec([None, None], tf.int32, name="input_ids"),
tf.TensorSpec([None, None], tf.int32, name="token_type_ids"),
tf.TensorSpec([None, None], tf.int32, name="attention_mask"),
]
]
)
def serving_fn(input):
return model(input)
# Using default signature (default behavior) overrides 'serving_default'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir, saved_model=True, signatures=None)
model_loaded = tf.keras.models.load_model(f"{tmp_dir}/saved_model/1")
self.assertTrue("serving_default" in list(model_loaded.signatures.keys()))
# Providing custom signature function
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir, saved_model=True, signatures={"custom_signature": serving_fn})
model_loaded = tf.keras.models.load_model(f"{tmp_dir}/saved_model/1")
self.assertTrue("custom_signature" in list(model_loaded.signatures.keys()))
# Providing multiple custom signature function
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
tmp_dir,
saved_model=True,
signatures={"custom_signature_1": serving_fn, "custom_signature_2": serving_fn},
)
model_loaded = tf.keras.models.load_model(f"{tmp_dir}/saved_model/1")
self.assertTrue("custom_signature_1" in list(model_loaded.signatures.keys()))
self.assertTrue("custom_signature_2" in list(model_loaded.signatures.keys()))
@require_safetensors
def test_safetensors_save_and_load(self):
model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert")
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir, safe_serialization=True)
# No tf_model.h5 file, only a model.safetensors
self.assertTrue(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME)))
self.assertFalse(os.path.isfile(os.path.join(tmp_dir, TF2_WEIGHTS_NAME)))
new_model = TFBertModel.from_pretrained(tmp_dir)
# Check models are equal
for p1, p2 in zip(model.weights, new_model.weights):
self.assertTrue(np.allclose(p1.numpy(), p2.numpy()))
@is_pt_tf_cross_test
def test_safetensors_save_and_load_pt_to_tf(self):
model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert")
pt_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert")
with tempfile.TemporaryDirectory() as tmp_dir:
pt_model.save_pretrained(tmp_dir, safe_serialization=True)
# Check we have a model.safetensors file
self.assertTrue(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME)))
new_model = TFBertModel.from_pretrained(tmp_dir)
# Check models are equal
for p1, p2 in zip(model.weights, new_model.weights):
self.assertTrue(np.allclose(p1.numpy(), p2.numpy()))
@require_safetensors
def test_safetensors_load_from_hub(self):
tf_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert")
# Can load from the TF-formatted checkpoint
safetensors_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert-safetensors-tf")
# Check models are equal
for p1, p2 in zip(safetensors_model.weights, tf_model.weights):
self.assertTrue(np.allclose(p1.numpy(), p2.numpy()))
# Can load from the PyTorch-formatted checkpoint
safetensors_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert-safetensors")
# Check models are equal
for p1, p2 in zip(safetensors_model.weights, tf_model.weights):
self.assertTrue(np.allclose(p1.numpy(), p2.numpy()))
@require_safetensors
def test_safetensors_tf_from_tf(self):
model = TFBertModel.from_pretrained("hf-internal-testing/tiny-bert-tf-only")
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir, safe_serialization=True)
new_model = TFBertModel.from_pretrained(tmp_dir)
for p1, p2 in zip(model.weights, new_model.weights):
self.assertTrue(np.allclose(p1.numpy(), p2.numpy()))
@require_safetensors
@is_pt_tf_cross_test
def test_safetensors_tf_from_torch(self):
hub_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-bert-tf-only")
model = BertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only")
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir, safe_serialization=True)
new_model = TFBertModel.from_pretrained(tmp_dir)
for p1, p2 in zip(hub_model.weights, new_model.weights):
self.assertTrue(np.allclose(p1.numpy(), p2.numpy()))
@require_safetensors
def test_safetensors_tf_from_sharded_h5_with_sharded_safetensors_local(self):
with tempfile.TemporaryDirectory() as tmp_dir:
path = snapshot_download("hf-internal-testing/tiny-bert-tf-safetensors-h5-sharded", cache_dir=tmp_dir)
# This should not raise even if there are two types of sharded weights
TFBertModel.from_pretrained(path)
@require_safetensors
def test_safetensors_tf_from_sharded_h5_with_sharded_safetensors_hub(self):
# This should not raise even if there are two types of sharded weights
# This should discard the safetensors weights in favor of the .h5 sharded weights
TFBertModel.from_pretrained("hf-internal-testing/tiny-bert-tf-safetensors-h5-sharded")
@require_safetensors
def test_safetensors_load_from_local(self):
"""
This test checks that we can load safetensors from a checkpoint that only has those on the Hub
"""
with tempfile.TemporaryDirectory() as tmp:
location = snapshot_download("hf-internal-testing/tiny-bert-tf-only", cache_dir=tmp)
tf_model = TFBertModel.from_pretrained(location)
with tempfile.TemporaryDirectory() as tmp:
location = snapshot_download("hf-internal-testing/tiny-bert-tf-safetensors-only", cache_dir=tmp)
safetensors_model = TFBertModel.from_pretrained(location)
for p1, p2 in zip(tf_model.weights, safetensors_model.weights):
self.assertTrue(np.allclose(p1.numpy(), p2.numpy()))
@require_safetensors
def test_safetensors_load_from_hub_from_safetensors_pt(self):
"""
This test checks that we can load safetensors from a checkpoint that only has those on the Hub.
saved in the "pt" format.
"""
tf_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-bert-h5")
# Can load from the PyTorch-formatted checkpoint
safetensors_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-safetensors")
for p1, p2 in zip(tf_model.weights, safetensors_model.weights):
self.assertTrue(np.allclose(p1.numpy(), p2.numpy()))
@require_safetensors
def test_safetensors_load_from_local_from_safetensors_pt(self):
"""
This test checks that we can load safetensors from a local checkpoint that only has those
saved in the "pt" format.
"""
with tempfile.TemporaryDirectory() as tmp:
location = snapshot_download("hf-internal-testing/tiny-bert-h5", cache_dir=tmp)
tf_model = TFBertModel.from_pretrained(location)
# Can load from the PyTorch-formatted checkpoint
with tempfile.TemporaryDirectory() as tmp:
location = snapshot_download("hf-internal-testing/tiny-bert-pt-safetensors", cache_dir=tmp)
safetensors_model = TFBertModel.from_pretrained(location)
for p1, p2 in zip(tf_model.weights, safetensors_model.weights):
self.assertTrue(np.allclose(p1.numpy(), p2.numpy()))
@require_safetensors
def test_safetensors_load_from_hub_h5_before_safetensors(self):
"""
This test checks that we'll first download h5 weights before safetensors
The safetensors file on that repo is a pt safetensors and therefore cannot be loaded without PyTorch
"""
TFBertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-safetensors-msgpack")
@require_safetensors
def test_safetensors_load_from_local_h5_before_safetensors(self):
"""
This test checks that we'll first download h5 weights before safetensors
The safetensors file on that repo is a pt safetensors and therefore cannot be loaded without PyTorch
"""
with tempfile.TemporaryDirectory() as tmp:
location = snapshot_download("hf-internal-testing/tiny-bert-pt-safetensors-msgpack", cache_dir=tmp)
TFBertModel.from_pretrained(location)
@require_tf
@is_staging_test
class TFModelPushToHubTester(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._token = TOKEN
HfFolder.save_token(TOKEN)
@classmethod
def tearDownClass(cls):
try:
delete_repo(token=cls._token, repo_id="test-model-tf")
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="test-model-tf-callback")
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="valid_org/test-model-tf-org")
except HTTPError:
pass
def test_push_to_hub(self):
config = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37
)
model = TFBertModel(config)
# Make sure model is properly initialized
model.build()
logging.set_verbosity_info()
logger = logging.get_logger("transformers.utils.hub")
with CaptureLogger(logger) as cl:
model.push_to_hub("test-model-tf", token=self._token)
logging.set_verbosity_warning()
# Check the model card was created and uploaded.
self.assertIn("Uploading the following files to __DUMMY_TRANSFORMERS_USER__/test-model-tf", cl.out)
new_model = TFBertModel.from_pretrained(f"{USER}/test-model-tf")
models_equal = True
for p1, p2 in zip(model.weights, new_model.weights):
if not tf.math.reduce_all(p1 == p2):
models_equal = False
break
self.assertTrue(models_equal)
# Reset repo
delete_repo(token=self._token, repo_id="test-model-tf")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir, repo_id="test-model-tf", push_to_hub=True, token=self._token)
new_model = TFBertModel.from_pretrained(f"{USER}/test-model-tf")
models_equal = True
for p1, p2 in zip(model.weights, new_model.weights):
if not tf.math.reduce_all(p1 == p2):
models_equal = False
break
self.assertTrue(models_equal)
@is_pt_tf_cross_test
def test_push_to_hub_callback(self):
config = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37
)
model = TFBertForMaskedLM(config)
model.compile()
with tempfile.TemporaryDirectory() as tmp_dir:
push_to_hub_callback = PushToHubCallback(
output_dir=tmp_dir,
hub_model_id="test-model-tf-callback",
hub_token=self._token,
)
model.fit(model.dummy_inputs, model.dummy_inputs, epochs=1, callbacks=[push_to_hub_callback])
new_model = TFBertForMaskedLM.from_pretrained(f"{USER}/test-model-tf-callback")
models_equal = True
for p1, p2 in zip(model.weights, new_model.weights):
if not tf.math.reduce_all(p1 == p2):
models_equal = False
break
self.assertTrue(models_equal)
tf_push_to_hub_params = dict(inspect.signature(TFPreTrainedModel.push_to_hub).parameters)
tf_push_to_hub_params.pop("base_model_card_args")
pt_push_to_hub_params = dict(inspect.signature(PreTrainedModel.push_to_hub).parameters)
pt_push_to_hub_params.pop("deprecated_kwargs")
self.assertDictEaual(tf_push_to_hub_params, pt_push_to_hub_params)
def test_push_to_hub_in_organization(self):
config = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37
)
model = TFBertModel(config)
# Make sure model is properly initialized
model.build()
model.push_to_hub("valid_org/test-model-tf-org", token=self._token)
new_model = TFBertModel.from_pretrained("valid_org/test-model-tf-org")
models_equal = True
for p1, p2 in zip(model.weights, new_model.weights):
if not tf.math.reduce_all(p1 == p2):
models_equal = False
break
self.assertTrue(models_equal)
# Reset repo
delete_repo(token=self._token, repo_id="valid_org/test-model-tf-org")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir, push_to_hub=True, token=self._token, repo_id="valid_org/test-model-tf-org")
new_model = TFBertModel.from_pretrained("valid_org/test-model-tf-org")
models_equal = True
for p1, p2 in zip(model.weights, new_model.weights):
if not tf.math.reduce_all(p1 == p2):
models_equal = False
break
self.assertTrue(models_equal)
| 0 |
hf_public_repos/transformers | hf_public_repos/transformers/tests/test_modeling_utils.py | # coding=utf-8
# Copyright 2019 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import glob
import json
import os
import os.path
import sys
import tempfile
import unittest
import unittest.mock as mock
import uuid
from pathlib import Path
import requests
from huggingface_hub import HfApi, HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from pytest import mark
from requests.exceptions import HTTPError
from transformers import (
AutoConfig,
AutoModel,
PretrainedConfig,
is_torch_available,
logging,
)
from transformers.testing_utils import (
TOKEN,
USER,
CaptureLogger,
TestCasePlus,
is_staging_test,
require_accelerate,
require_flax,
require_safetensors,
require_tf,
require_torch,
require_torch_accelerator,
require_torch_multi_accelerator,
require_usr_bin_time,
slow,
torch_device,
)
from transformers.utils import (
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from transformers.utils.import_utils import (
is_flash_attn_2_available,
is_flax_available,
is_tf_available,
is_torch_sdpa_available,
is_torchdynamo_available,
)
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig, NoSuperInitConfig # noqa E402
if is_torch_available():
import torch
from safetensors.torch import save_file as safe_save_file
from test_module.custom_modeling import CustomModel, NoSuperInitModel
from torch import nn
from transformers import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoModelForCausalLM,
AutoTokenizer,
BertConfig,
BertModel,
CLIPTextModel,
PreTrainedModel,
T5Config,
T5ForConditionalGeneration,
)
from transformers.modeling_attn_mask_utils import (
AttentionMaskConverter,
_create_4d_causal_attention_mask,
_prepare_4d_attention_mask,
_prepare_4d_causal_attention_mask,
)
from transformers.modeling_utils import shard_checkpoint
# Fake pretrained models for tests
class BaseModel(PreTrainedModel):
base_model_prefix = "base"
config_class = PretrainedConfig
def __init__(self, config):
super().__init__(config)
self.linear = nn.Linear(5, 5)
self.linear_2 = nn.Linear(5, 5)
def forward(self, x):
return self.linear_2(self.linear(x))
class BaseModelWithTiedWeights(PreTrainedModel):
config_class = PretrainedConfig
def __init__(self, config):
super().__init__(config)
self.linear = nn.Linear(5, 5)
self.linear_2 = nn.Linear(5, 5)
def forward(self, x):
return self.linear_2(self.linear(x))
def tie_weights(self):
self.linear_2.weight = self.linear.weight
class ModelWithHead(PreTrainedModel):
base_model_prefix = "base"
config_class = PretrainedConfig
def _init_weights(self, module):
pass
def __init__(self, config):
super().__init__(config)
self.base = BaseModel(config)
# linear is a common name between Base and Head on purpose.
self.linear = nn.Linear(5, 5)
self.linear2 = nn.Linear(5, 5)
def forward(self, x):
return self.linear2(self.linear(self.base(x)))
class ModelWithHeadAndTiedWeights(PreTrainedModel):
base_model_prefix = "base"
config_class = PretrainedConfig
def _init_weights(self, module):
pass
def __init__(self, config):
super().__init__(config)
self.base = BaseModel(config)
self.decoder = nn.Linear(5, 5)
def forward(self, x):
return self.decoder(self.base(x))
def tie_weights(self):
self.decoder.weight = self.base.linear.weight
class Prepare4dCausalAttentionMaskModel(nn.Module):
def forward(self, inputs_embeds):
batch_size, seq_length, _ = inputs_embeds.shape
past_key_values_length = 4
attention_mask = _prepare_4d_causal_attention_mask(
None, (batch_size, seq_length), inputs_embeds, past_key_values_length
)
return attention_mask
class Create4dCausalAttentionMaskModel(nn.Module):
def forward(self, inputs_embeds):
batch_size, seq_length, _ = inputs_embeds.shape
past_key_values_length = 4
attention_mask = _create_4d_causal_attention_mask(
(batch_size, seq_length),
dtype=inputs_embeds.dtype,
device=inputs_embeds.device,
past_key_values_length=past_key_values_length,
)
return attention_mask
class Prepare4dAttentionMaskModel(nn.Module):
def forward(self, mask, inputs_embeds):
attention_mask = _prepare_4d_attention_mask(mask, dtype=inputs_embeds.dtype)
return attention_mask
if is_flax_available():
from transformers import FlaxBertModel
if is_tf_available():
from transformers import TFBertModel
TINY_T5 = "patrickvonplaten/t5-tiny-random"
TINY_BERT_FOR_TOKEN_CLASSIFICATION = "hf-internal-testing/tiny-bert-for-token-classification"
def check_models_equal(model1, model2):
models_are_equal = True
for model1_p, model2_p in zip(model1.parameters(), model2.parameters()):
if model1_p.data.ne(model2_p.data).sum() > 0:
models_are_equal = False
return models_are_equal
@require_torch
class ModelUtilsTest(TestCasePlus):
@slow
def test_model_from_pretrained(self):
for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = BertConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, PretrainedConfig)
model = BertModel.from_pretrained(model_name)
model, loading_info = BertModel.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, PreTrainedModel)
self.assertEqual(len(loading_info["missing_keys"]), 0)
self.assertEqual(len(loading_info["unexpected_keys"]), 8)
self.assertEqual(len(loading_info["mismatched_keys"]), 0)
self.assertEqual(len(loading_info["error_msgs"]), 0)
config = BertConfig.from_pretrained(model_name, output_attentions=True, output_hidden_states=True)
# Not sure this is the intended behavior. TODO fix Lysandre & Thom
config.name_or_path = model_name
model = BertModel.from_pretrained(model_name, output_attentions=True, output_hidden_states=True)
self.assertEqual(model.config.output_hidden_states, True)
self.assertEqual(model.config, config)
def test_model_from_pretrained_subfolder(self):
config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert")
model = BertModel(config)
subfolder = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(tmp_dir, subfolder))
with self.assertRaises(OSError):
_ = BertModel.from_pretrained(tmp_dir)
model_loaded = BertModel.from_pretrained(tmp_dir, subfolder=subfolder)
self.assertTrue(check_models_equal(model, model_loaded))
def test_model_from_pretrained_subfolder_sharded(self):
config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert")
model = BertModel(config)
subfolder = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(tmp_dir, subfolder), max_shard_size="10KB")
with self.assertRaises(OSError):
_ = BertModel.from_pretrained(tmp_dir)
model_loaded = BertModel.from_pretrained(tmp_dir, subfolder=subfolder)
self.assertTrue(check_models_equal(model, model_loaded))
def test_model_from_pretrained_hub_subfolder(self):
subfolder = "bert"
model_id = "hf-internal-testing/tiny-random-bert-subfolder"
with self.assertRaises(OSError):
_ = BertModel.from_pretrained(model_id)
model = BertModel.from_pretrained(model_id, subfolder=subfolder)
self.assertIsNotNone(model)
def test_model_from_pretrained_hub_subfolder_sharded(self):
subfolder = "bert"
model_id = "hf-internal-testing/tiny-random-bert-sharded-subfolder"
with self.assertRaises(OSError):
_ = BertModel.from_pretrained(model_id)
model = BertModel.from_pretrained(model_id, subfolder=subfolder)
self.assertIsNotNone(model)
def test_model_from_pretrained_with_different_pretrained_model_name(self):
model = T5ForConditionalGeneration.from_pretrained(TINY_T5)
self.assertIsNotNone(model)
logger = logging.get_logger("transformers.configuration_utils")
with CaptureLogger(logger) as cl:
BertModel.from_pretrained(TINY_T5)
self.assertTrue("You are using a model of type t5 to instantiate a model of type bert" in cl.out)
def test_model_from_config_torch_dtype(self):
# test that the model can be instantiated with dtype of user's choice - as long as it's a
# float dtype. To make it happen config.torch_dtype needs to be set before instantiating the
# model from the config object.
config = T5Config.from_pretrained(TINY_T5)
model = AutoModel.from_config(config)
# XXX: isn't supported
# model = T5ForConditionalGeneration.from_config(config)
self.assertEqual(model.dtype, torch.float32)
model = AutoModel.from_config(config, torch_dtype=torch.float16)
self.assertEqual(model.dtype, torch.float16)
# torch.set_default_dtype() supports only float dtypes, so will fail with non-float type
with self.assertRaises(ValueError):
model = AutoModel.from_config(config, torch_dtype=torch.int64)
def test_model_from_pretrained_torch_dtype(self):
# test that the model can be instantiated with dtype of either
# 1. explicit from_pretrained's torch_dtype argument
# 2. via autodiscovery by looking at model weights (torch_dtype="auto")
# so if a model.half() was saved, we want it to be instantiated as such.
#
# test an explicit model class, but also AutoModel separately as the latter goes through a different code path
model_path = self.get_auto_remove_tmp_dir()
# baseline - we know TINY_T5 is fp32 model
model = T5ForConditionalGeneration.from_pretrained(TINY_T5)
self.assertEqual(model.dtype, torch.float32)
def remove_torch_dtype(model_path):
file = f"{model_path}/config.json"
with open(file, "r", encoding="utf-8") as f:
s = json.load(f)
s.pop("torch_dtype")
with open(file, "w", encoding="utf-8") as f:
json.dump(s, f)
# test the default fp32 save_pretrained => from_pretrained cycle
model.save_pretrained(model_path)
model = T5ForConditionalGeneration.from_pretrained(model_path)
self.assertEqual(model.dtype, torch.float32)
# 1. test torch_dtype="auto" via `config.torch_dtype`
model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype="auto")
self.assertEqual(model.dtype, torch.float32)
# 2. test torch_dtype="auto" via auto-derivation
# now remove the torch_dtype entry from config.json and try "auto" again which should
# perform auto-derivation from weights
remove_torch_dtype(model_path)
model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype="auto")
self.assertEqual(model.dtype, torch.float32)
# test forced loading in fp16 (even though the weights are in fp32)
model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype=torch.float16)
self.assertEqual(model.dtype, torch.float16)
# test fp16 save_pretrained, loaded with auto-detection
model = model.half()
model.save_pretrained(model_path)
# 1. test torch_dtype="auto" via `config.torch_dtype`
model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype="auto")
self.assertEqual(model.config.torch_dtype, torch.float16)
self.assertEqual(model.dtype, torch.float16)
# tests `config.torch_dtype` saving
with open(f"{model_path}/config.json") as f:
config_dict = json.load(f)
self.assertEqual(config_dict["torch_dtype"], "float16")
# 2. test torch_dtype="auto" via auto-derivation
# now same with using config info
remove_torch_dtype(model_path)
model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype="auto")
self.assertEqual(model.dtype, torch.float16)
# 3. now retest that AutoModel behaves the same wrt torch_dtype="auto" as T5ForConditionalGeneration
model = AutoModel.from_pretrained(model_path, torch_dtype="auto")
self.assertEqual(model.dtype, torch.float16)
# test fp16 save_pretrained, loaded with the explicit fp16
model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype=torch.float16)
self.assertEqual(model.dtype, torch.float16)
# test AutoModel separately as it goes through a different path
# test auto-detection - as currently TINY_T5 doesn't have torch_dtype entry
model = AutoModel.from_pretrained(TINY_T5, torch_dtype="auto")
# test that the config object didn't get polluted with torch_dtype="auto"
# there was a bug that after this call we ended up with config.torch_dtype=="auto"
self.assertNotEqual(model.config.torch_dtype, "auto")
# now test the outcome
self.assertEqual(model.dtype, torch.float32)
model = AutoModel.from_pretrained(TINY_T5, torch_dtype=torch.float16)
self.assertEqual(model.dtype, torch.float16)
# test model whose first param is not of a floating type, but int
model = AutoModel.from_pretrained(TINY_BERT_FOR_TOKEN_CLASSIFICATION, torch_dtype="auto")
self.assertEqual(model.dtype, torch.float32)
def test_no_super_init_config_and_model(self):
config = NoSuperInitConfig(attribute=32)
model = NoSuperInitModel(config)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir)
new_model = NoSuperInitModel.from_pretrained(tmp_dir)
for p1, p2 in zip(model.parameters(), new_model.parameters()):
self.assertTrue(torch.equal(p1, p2))
def test_shard_checkpoint(self):
# This is the model we will use, total size 340,000 bytes.
model = torch.nn.Sequential(
torch.nn.Linear(100, 200, bias=False), # size 80,000
torch.nn.Linear(200, 200, bias=False), # size 160,000
torch.nn.Linear(200, 100, bias=False), # size 80,000
torch.nn.Linear(100, 50, bias=False), # size 20,000
)
state_dict = model.state_dict()
with self.subTest("No shard when max size is bigger than model size"):
shards, index = shard_checkpoint(state_dict)
self.assertIsNone(index)
self.assertDictEqual(shards, {WEIGHTS_NAME: state_dict})
with self.subTest("Test sharding, no weights bigger than max size"):
shards, index = shard_checkpoint(state_dict, max_shard_size="300kB")
# Split is first two layers then last two.
self.assertDictEqual(
index,
{
"metadata": {"total_size": 340000},
"weight_map": {
"0.weight": "pytorch_model-00001-of-00002.bin",
"1.weight": "pytorch_model-00001-of-00002.bin",
"2.weight": "pytorch_model-00002-of-00002.bin",
"3.weight": "pytorch_model-00002-of-00002.bin",
},
},
)
shard1 = {"0.weight": state_dict["0.weight"], "1.weight": state_dict["1.weight"]}
shard2 = {"2.weight": state_dict["2.weight"], "3.weight": state_dict["3.weight"]}
self.assertDictEqual(
shards, {"pytorch_model-00001-of-00002.bin": shard1, "pytorch_model-00002-of-00002.bin": shard2}
)
with self.subTest("Test sharding with weights bigger than max size"):
shards, index = shard_checkpoint(state_dict, max_shard_size="100kB")
# Split is first layer, second layer then last 2.
self.assertDictEqual(
index,
{
"metadata": {"total_size": 340000},
"weight_map": {
"0.weight": "pytorch_model-00001-of-00003.bin",
"1.weight": "pytorch_model-00002-of-00003.bin",
"2.weight": "pytorch_model-00003-of-00003.bin",
"3.weight": "pytorch_model-00003-of-00003.bin",
},
},
)
shard1 = {"0.weight": state_dict["0.weight"]}
shard2 = {"1.weight": state_dict["1.weight"]}
shard3 = {"2.weight": state_dict["2.weight"], "3.weight": state_dict["3.weight"]}
self.assertDictEqual(
shards,
{
"pytorch_model-00001-of-00003.bin": shard1,
"pytorch_model-00002-of-00003.bin": shard2,
"pytorch_model-00003-of-00003.bin": shard3,
},
)
def test_checkpoint_sharding_local_bin(self):
model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert")
with tempfile.TemporaryDirectory() as tmp_dir:
# We use the same folder for various sizes to make sure a new save erases the old checkpoint.
for max_size in ["50kB", "50kiB", "100kB", "100kiB", "200kB", "200kiB"]:
model.save_pretrained(tmp_dir, max_shard_size=max_size, safe_serialization=False)
# Get each shard file and its size
shard_to_size = {}
for shard in os.listdir(tmp_dir):
if shard.endswith(".bin"):
shard_file = os.path.join(tmp_dir, shard)
shard_to_size[shard_file] = os.path.getsize(shard_file)
index_file = os.path.join(tmp_dir, WEIGHTS_INDEX_NAME)
# Check there is an index but no regular weight file
self.assertTrue(os.path.isfile(index_file))
self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME)))
# Check a file is bigger than max_size only when it has a single weight
for shard_file, size in shard_to_size.items():
if max_size.endswith("kiB"):
max_size_int = int(max_size[:-3]) * 2**10
else:
max_size_int = int(max_size[:-2]) * 10**3
# Note: pickle adds some junk so the weight of the file can end up being slightly bigger than
# the size asked for (since we count parameters)
if size >= max_size_int + 50000:
state_dict = torch.load(shard_file)
self.assertEqual(len(state_dict), 1)
# Check the index and the shard files found match
with open(index_file, "r", encoding="utf-8") as f:
index = json.loads(f.read())
all_shards = set(index["weight_map"].values())
shards_found = {f for f in os.listdir(tmp_dir) if f.endswith(".bin")}
self.assertSetEqual(all_shards, shards_found)
# Finally, check the model can be reloaded
new_model = BertModel.from_pretrained(tmp_dir)
for p1, p2 in zip(model.parameters(), new_model.parameters()):
self.assertTrue(torch.allclose(p1, p2))
def test_checkpoint_sharding_from_hub(self):
model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded")
# the model above is the same as the model below, just a sharded version.
ref_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert")
for p1, p2 in zip(model.parameters(), ref_model.parameters()):
self.assertTrue(torch.allclose(p1, p2))
def test_checkpoint_variant_local_bin(self):
model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert")
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir, variant="v2", safe_serialization=False)
weights_name = ".".join(WEIGHTS_NAME.split(".")[:-1] + ["v2"] + ["bin"])
weights_file = os.path.join(tmp_dir, weights_name)
self.assertTrue(os.path.isfile(weights_file))
self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME)))
with self.assertRaises(EnvironmentError):
_ = BertModel.from_pretrained(tmp_dir)
new_model = BertModel.from_pretrained(tmp_dir, variant="v2")
for p1, p2 in zip(model.parameters(), new_model.parameters()):
self.assertTrue(torch.allclose(p1, p2))
def test_checkpoint_variant_local_sharded_bin(self):
model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert")
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir, variant="v2", max_shard_size="50kB", safe_serialization=False)
weights_index_name = ".".join(WEIGHTS_INDEX_NAME.split(".")[:-1] + ["v2"] + ["json"])
weights_index_file = os.path.join(tmp_dir, weights_index_name)
self.assertTrue(os.path.isfile(weights_index_file))
self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_INDEX_NAME)))
for i in range(1, 5):
weights_name = ".".join(WEIGHTS_NAME.split(".")[:-1] + [f"v2-0000{i}-of-00005"] + ["bin"])
weights_name_file = os.path.join(tmp_dir, weights_name)
self.assertTrue(os.path.isfile(weights_name_file))
with self.assertRaises(EnvironmentError):
_ = BertModel.from_pretrained(tmp_dir)
new_model = BertModel.from_pretrained(tmp_dir, variant="v2")
for p1, p2 in zip(model.parameters(), new_model.parameters()):
self.assertTrue(torch.allclose(p1, p2))
@require_safetensors
def test_checkpoint_variant_local_safe(self):
model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert")
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir, variant="v2", safe_serialization=True)
weights_name = ".".join(SAFE_WEIGHTS_NAME.split(".")[:-1] + ["v2"] + ["safetensors"])
weights_file = os.path.join(tmp_dir, weights_name)
self.assertTrue(os.path.isfile(weights_file))
self.assertFalse(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME)))
with self.assertRaises(EnvironmentError):
_ = BertModel.from_pretrained(tmp_dir)
new_model = BertModel.from_pretrained(tmp_dir, variant="v2")
for p1, p2 in zip(model.parameters(), new_model.parameters()):
self.assertTrue(torch.allclose(p1, p2))
@require_safetensors
def test_checkpoint_variant_local_sharded_safe(self):
model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert")
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir, variant="v2", max_shard_size="50kB", safe_serialization=True)
weights_index_name = ".".join(SAFE_WEIGHTS_INDEX_NAME.split(".")[:-1] + ["v2"] + ["json"])
weights_index_file = os.path.join(tmp_dir, weights_index_name)
self.assertTrue(os.path.isfile(weights_index_file))
self.assertFalse(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME)))
for i in range(1, 5):
weights_name = ".".join(SAFE_WEIGHTS_NAME.split(".")[:-1] + [f"v2-0000{i}-of-00005"] + ["safetensors"])
weights_name_file = os.path.join(tmp_dir, weights_name)
self.assertTrue(os.path.isfile(weights_name_file))
with self.assertRaises(EnvironmentError):
_ = BertModel.from_pretrained(tmp_dir)
new_model = BertModel.from_pretrained(tmp_dir, variant="v2")
for p1, p2 in zip(model.parameters(), new_model.parameters()):
self.assertTrue(torch.allclose(p1, p2))
def test_checkpoint_variant_hub(self):
with tempfile.TemporaryDirectory() as tmp_dir:
with self.assertRaises(EnvironmentError):
_ = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-variant", cache_dir=tmp_dir)
model = BertModel.from_pretrained(
"hf-internal-testing/tiny-random-bert-variant", cache_dir=tmp_dir, variant="v2"
)
self.assertIsNotNone(model)
def test_checkpoint_variant_hub_sharded(self):
with tempfile.TemporaryDirectory() as tmp_dir:
with self.assertRaises(EnvironmentError):
_ = BertModel.from_pretrained(
"hf-internal-testing/tiny-random-bert-variant-sharded", cache_dir=tmp_dir
)
model = BertModel.from_pretrained(
"hf-internal-testing/tiny-random-bert-variant-sharded", cache_dir=tmp_dir, variant="v2"
)
self.assertIsNotNone(model)
@require_safetensors
def test_checkpoint_variant_hub_safe(self):
with tempfile.TemporaryDirectory() as tmp_dir:
with self.assertRaises(EnvironmentError):
_ = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-variant-safe", cache_dir=tmp_dir)
model = BertModel.from_pretrained(
"hf-internal-testing/tiny-random-bert-variant-safe", cache_dir=tmp_dir, variant="v2"
)
self.assertIsNotNone(model)
@require_safetensors
def test_checkpoint_variant_hub_sharded_safe(self):
with tempfile.TemporaryDirectory() as tmp_dir:
with self.assertRaises(EnvironmentError):
_ = BertModel.from_pretrained(
"hf-internal-testing/tiny-random-bert-variant-sharded-safe", cache_dir=tmp_dir
)
model = BertModel.from_pretrained(
"hf-internal-testing/tiny-random-bert-variant-sharded-safe", cache_dir=tmp_dir, variant="v2"
)
self.assertIsNotNone(model)
def test_checkpoint_variant_save_load_bin(self):
with tempfile.TemporaryDirectory() as tmp_dir:
model = BertModel.from_pretrained(
"hf-internal-testing/tiny-random-bert-variant", cache_dir=tmp_dir, variant="v2"
)
weights_name = ".".join(WEIGHTS_NAME.split(".")[:-1] + ["v2"] + ["bin"])
model.save_pretrained(tmp_dir, variant="v2", safe_serialization=False)
# saving will create a variant checkpoint
self.assertTrue(os.path.isfile(os.path.join(tmp_dir, weights_name)))
model.save_pretrained(tmp_dir, safe_serialization=False)
# saving shouldn't delete variant checkpoints
weights_name = ".".join(WEIGHTS_NAME.split(".")[:-1] + ["v2"] + ["bin"])
self.assertTrue(os.path.isfile(os.path.join(tmp_dir, weights_name)))
# there should be a normal checkpoint
self.assertTrue(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME)))
self.assertIsNotNone(model)
@require_accelerate
@mark.accelerate_tests
def test_from_pretrained_low_cpu_mem_usage_functional(self):
# test that we can use `from_pretrained(..., low_cpu_mem_usage=True)` with normal and
# sharded models
mnames = [
"hf-internal-testing/tiny-random-bert-sharded",
"hf-internal-testing/tiny-random-bert",
]
for mname in mnames:
_ = BertModel.from_pretrained(mname, low_cpu_mem_usage=True)
@require_usr_bin_time
@require_accelerate
@mark.accelerate_tests
def test_from_pretrained_low_cpu_mem_usage_measured(self):
# test that `from_pretrained(..., low_cpu_mem_usage=True)` uses less cpu memory than default
mname = "bert-base-cased"
preamble = "from transformers import AutoModel"
one_liner_str = f'{preamble}; AutoModel.from_pretrained("{mname}", low_cpu_mem_usage=False)'
max_rss_normal = self.python_one_liner_max_rss(one_liner_str)
# print(f"{max_rss_normal=}")
one_liner_str = f'{preamble}; AutoModel.from_pretrained("{mname}", low_cpu_mem_usage=True)'
max_rss_low_mem = self.python_one_liner_max_rss(one_liner_str)
# print(f"{max_rss_low_mem=}")
diff_bytes = max_rss_normal - max_rss_low_mem
diff_percent = diff_bytes / max_rss_low_mem
# print(f"{diff_bytes=}, {diff_percent=}")
# ideally we would compare that the diff is close to ~1x checkpoint size in bytes, but
# measuring cpu memory on linux is very tricky and inconsistent, so instead let's check that
# it's at least 15% less cpu memory consumed
self.assertGreater(
diff_percent,
0.15,
"should use less CPU memory for low_cpu_mem_usage=True, "
f"but got max_rss_normal={max_rss_normal} and max_rss_low_mem={max_rss_low_mem}",
)
# if you want to compare things manually, let's first look at the size of the model in bytes
# model = BertModel.from_pretrained(mname, low_cpu_mem_usage=False)
# total_numel = sum(dict((p.data_ptr(), p.numel()) for p in model.parameters()).values())
# total_bytes = total_numel * 4 # 420MB
# Now the diff_bytes should be very close to total_bytes, but the reports are inconsistent.
# The easiest way to test this is to switch the model and torch.load to do all the work on
# gpu - that way one can measure exactly the total and peak memory used. Perhaps once we add
# functionality to load models directly on gpu, this test can be rewritten to use torch's
# cuda memory tracking and then we should be able to do a much more precise test.
@require_accelerate
@mark.accelerate_tests
@require_torch_multi_accelerator
@slow
def test_model_parallelism_gpt2(self):
device_map = {"transformer.wte": 0, "transformer.wpe": 0, "lm_head": 0, "transformer.ln_f": 1}
for i in range(12):
device_map[f"transformer.h.{i}"] = 0 if i <= 5 else 1
model = AutoModelForCausalLM.from_pretrained("gpt2", device_map=device_map)
tokenizer = AutoTokenizer.from_pretrained("gpt2")
inputs = tokenizer("Hello, my name is", return_tensors="pt")
output = model.generate(inputs["input_ids"].to(0))
text_output = tokenizer.decode(output[0].tolist())
self.assertEqual(text_output, "Hello, my name is John. I'm a writer, and I'm a writer. I'm")
@require_accelerate
@mark.accelerate_tests
@require_torch_accelerator
def test_from_pretrained_disk_offload_task_model(self):
model = AutoModel.from_pretrained("hf-internal-testing/tiny-random-gpt2")
device_map = {
"transformer.wte": 0,
"transformer.wpe": 0,
"transformer.h.0": "cpu",
"transformer.h.1": "cpu",
"transformer.h.2": "cpu",
"transformer.h.3": "disk",
"transformer.h.4": "disk",
"transformer.ln_f": 0,
"lm_head": 0,
}
with tempfile.TemporaryDirectory() as tmp_dir:
inputs = torch.tensor([[1, 2, 3]]).to(0)
model.save_pretrained(tmp_dir)
new_model = AutoModelForCausalLM.from_pretrained(tmp_dir).to(0)
outputs1 = new_model.to(0)(inputs)
offload_folder = os.path.join(tmp_dir, "offload")
new_model_with_offload = AutoModelForCausalLM.from_pretrained(
tmp_dir, device_map=device_map, offload_folder=offload_folder
)
outputs2 = new_model_with_offload(inputs)
self.assertTrue(torch.allclose(outputs1.logits.cpu(), outputs2.logits.cpu()))
# With state dict temp offload
offload_folder = os.path.join(tmp_dir, "offload")
new_model_with_offload = AutoModelForCausalLM.from_pretrained(
tmp_dir,
device_map=device_map,
offload_folder=offload_folder,
offload_state_dict=True,
)
outputs2 = new_model_with_offload(inputs)
self.assertTrue(torch.allclose(outputs1.logits.cpu(), outputs2.logits.cpu()))
@require_accelerate
@mark.accelerate_tests
@require_torch_accelerator
def test_from_pretrained_disk_offload_derived_to_base_model(self):
derived_model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2")
device_map = {
"wte": 0,
"wpe": 0,
"h.0": "cpu",
"h.1": "cpu",
"h.2": "cpu",
"h.3": "disk",
"h.4": "disk",
"ln_f": 0,
}
with tempfile.TemporaryDirectory() as tmp_dir:
inputs = torch.tensor([[1, 2, 3]]).to(0)
derived_model.save_pretrained(tmp_dir, use_safetensors=True)
base_model = AutoModel.from_pretrained(tmp_dir)
outputs1 = base_model.to(0)(inputs)
# with disk offload
offload_folder = os.path.join(tmp_dir, "offload")
base_model_with_offload = AutoModel.from_pretrained(
tmp_dir, device_map=device_map, offload_folder=offload_folder
)
outputs2 = base_model_with_offload(inputs)
self.assertTrue(torch.allclose(outputs1[0].cpu(), outputs2[0].cpu()))
# With state dict temp offload
new_model_with_offload = AutoModel.from_pretrained(
tmp_dir,
device_map=device_map,
offload_folder=offload_folder,
offload_state_dict=True,
)
outputs2 = new_model_with_offload(inputs)
self.assertTrue(torch.allclose(outputs1[0].cpu(), outputs2[0].cpu()))
def test_cached_files_are_used_when_internet_is_down(self):
# A mock response for an HTTP head request to emulate server down
response_mock = mock.Mock()
response_mock.status_code = 500
response_mock.headers = {}
response_mock.raise_for_status.side_effect = HTTPError
response_mock.json.return_value = {}
# Download this model to make sure it's in the cache.
_ = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert")
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request", return_value=response_mock) as mock_head:
_ = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert")
# This check we did call the fake head request
mock_head.assert_called()
def test_load_from_one_file(self):
try:
tmp_file = tempfile.mktemp()
with open(tmp_file, "wb") as f:
http_get(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/pytorch_model.bin", f
)
config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert")
_ = BertModel.from_pretrained(tmp_file, config=config)
finally:
os.remove(tmp_file)
def test_legacy_load_from_url(self):
# This test is for deprecated behavior and can be removed in v5
config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert")
_ = BertModel.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/pytorch_model.bin", config=config
)
@require_safetensors
def test_use_safetensors(self):
# Should not raise anymore
AutoModel.from_pretrained("hf-internal-testing/tiny-random-RobertaModel", use_safetensors=True)
# test that error if only safetensors is available
with self.assertRaises(OSError) as env_error:
BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-safetensors", use_safetensors=False)
self.assertTrue("does not appear to have a file named pytorch_model.bin" in str(env_error.exception))
# test that only safetensors if both available and use_safetensors=False
with tempfile.TemporaryDirectory() as tmp_dir:
CLIPTextModel.from_pretrained(
"hf-internal-testing/diffusers-stable-diffusion-tiny-all",
subfolder="text_encoder",
use_safetensors=False,
cache_dir=tmp_dir,
)
all_downloaded_files = glob.glob(os.path.join(tmp_dir, "*", "snapshots", "*", "*", "*"))
self.assertTrue(any(f.endswith("bin") for f in all_downloaded_files))
self.assertFalse(any(f.endswith("safetensors") for f in all_downloaded_files))
# test that no safetensors if both available and use_safetensors=True
with tempfile.TemporaryDirectory() as tmp_dir:
CLIPTextModel.from_pretrained(
"hf-internal-testing/diffusers-stable-diffusion-tiny-all",
subfolder="text_encoder",
use_safetensors=True,
cache_dir=tmp_dir,
)
all_downloaded_files = glob.glob(os.path.join(tmp_dir, "*", "snapshots", "*", "*", "*"))
self.assertTrue(any(f.endswith("safetensors") for f in all_downloaded_files))
self.assertFalse(any(f.endswith("bin") for f in all_downloaded_files))
@require_safetensors
def test_safetensors_save_and_load(self):
model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert")
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir, safe_serialization=True)
# No pytorch_model.bin file, only a model.safetensors
self.assertTrue(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME)))
self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME)))
new_model = BertModel.from_pretrained(tmp_dir)
# Check models are equal
for p1, p2 in zip(model.parameters(), new_model.parameters()):
self.assertTrue(torch.allclose(p1, p2))
@require_safetensors
def test_safetensors_load_from_hub(self):
safetensors_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-safetensors")
pytorch_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert")
# Check models are equal
for p1, p2 in zip(safetensors_model.parameters(), pytorch_model.parameters()):
self.assertTrue(torch.allclose(p1, p2))
@require_safetensors
def test_safetensors_save_and_load_sharded(self):
model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert")
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir, safe_serialization=True, max_shard_size="100kB")
# No pytorch_model.bin index file, only a model.safetensors index
self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_INDEX_NAME)))
self.assertTrue(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME)))
# No regular weights file
self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME)))
self.assertFalse(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME)))
new_model = BertModel.from_pretrained(tmp_dir)
# Check models are equal
for p1, p2 in zip(model.parameters(), new_model.parameters()):
self.assertTrue(torch.allclose(p1, p2))
@require_safetensors
def test_safetensors_load_from_hub_sharded(self):
safetensors_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded-safetensors")
pytorch_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded")
# Check models are equal
for p1, p2 in zip(safetensors_model.parameters(), pytorch_model.parameters()):
self.assertTrue(torch.allclose(p1, p2))
def test_base_model_to_head_model_load(self):
base_model = BaseModel(PretrainedConfig())
with tempfile.TemporaryDirectory() as tmp_dir:
base_model.save_pretrained(tmp_dir, safe_serialization=False)
# Can load a base model in a model with head
model = ModelWithHead.from_pretrained(tmp_dir)
for p1, p2 in zip(model.base.parameters(), base_model.parameters()):
self.assertTrue(torch.allclose(p1, p2))
# It doesn't work if the state dict has a mix of keys of the head and base without prefix though.
base_state_dict = base_model.state_dict()
head_state_dict = model.state_dict()
base_state_dict["linear2.weight"] = head_state_dict["linear2.weight"]
base_state_dict["linear2.bias"] = head_state_dict["linear2.bias"]
safe_save_file(base_state_dict, os.path.join(tmp_dir, SAFE_WEIGHTS_NAME), metadata={"format": "pt"})
with self.assertRaisesRegex(
ValueError, "The state dictionary of the model you are trying to load is corrupted."
):
_ = ModelWithHead.from_pretrained(tmp_dir)
def test_tied_weights_reload(self):
# Base
model = BaseModelWithTiedWeights(PretrainedConfig())
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir)
new_model = BaseModelWithTiedWeights.from_pretrained(tmp_dir)
self.assertIs(new_model.linear.weight, new_model.linear_2.weight)
state_dict = model.state_dict()
# Remove tied weight from state_dict -> model should load with no complain of missing keys
del state_dict["linear_2.weight"]
torch.save(state_dict, os.path.join(tmp_dir, WEIGHTS_NAME))
new_model, load_info = BaseModelWithTiedWeights.from_pretrained(tmp_dir, output_loading_info=True)
self.assertListEqual(load_info["missing_keys"], [])
self.assertIs(new_model.linear.weight, new_model.linear_2.weight)
# With head
model.save_pretrained(tmp_dir)
new_model, load_info = ModelWithHeadAndTiedWeights.from_pretrained(tmp_dir, output_loading_info=True)
self.assertIs(new_model.base.linear.weight, new_model.decoder.weight)
# Should only complain about the missing bias
self.assertListEqual(load_info["missing_keys"], ["decoder.bias"])
def test_unexpected_keys_warnings(self):
model = ModelWithHead(PretrainedConfig())
logger = logging.get_logger("transformers.modeling_utils")
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir)
# Loading the model with a new class, we don't get a warning for unexpected weights, just an info
with CaptureLogger(logger) as cl:
_, loading_info = BaseModel.from_pretrained(tmp_dir, output_loading_info=True)
self.assertNotIn("were not used when initializing ModelWithHead", cl.out)
self.assertEqual(
set(loading_info["unexpected_keys"]),
{"linear.weight", "linear.bias", "linear2.weight", "linear2.bias"},
)
# Loading the model with the same class, we do get a warning for unexpected weights
state_dict = model.state_dict()
state_dict["added_key"] = copy.deepcopy(state_dict["linear.weight"])
safe_save_file(state_dict, os.path.join(tmp_dir, SAFE_WEIGHTS_NAME), metadata={"format": "pt"})
with CaptureLogger(logger) as cl:
_, loading_info = ModelWithHead.from_pretrained(tmp_dir, output_loading_info=True)
self.assertIn("were not used when initializing ModelWithHead: ['added_key']", cl.out)
self.assertEqual(loading_info["unexpected_keys"], ["added_key"])
def test_warn_if_padding_and_no_attention_mask(self):
logger = logging.get_logger("transformers.modeling_utils")
with self.subTest("Ensure no warnings when pad_token_id is None."):
logger.warning_once.cache_clear()
with CaptureLogger(logger) as cl:
config_no_pad_token = PretrainedConfig()
config_no_pad_token.pad_token_id = None
model = ModelWithHead(config_no_pad_token)
input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]])
model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None)
self.assertNotIn("We strongly recommend passing in an `attention_mask`", cl.out)
with self.subTest("Ensure no warnings when there is an attention_mask."):
logger.warning_once.cache_clear()
with CaptureLogger(logger) as cl:
config = PretrainedConfig()
config.pad_token_id = 0
model = ModelWithHead(config)
input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]])
attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]])
model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
self.assertNotIn("We strongly recommend passing in an `attention_mask`", cl.out)
with self.subTest("Ensure no warnings when there are no pad_token_ids in the input_ids."):
logger.warning_once.cache_clear()
with CaptureLogger(logger) as cl:
config = PretrainedConfig()
config.pad_token_id = 0
model = ModelWithHead(config)
input_ids = torch.tensor([[1, 345, 232, 328, 740, 140, 1695, 69, 6078, 2341, 25]])
model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None)
self.assertNotIn("We strongly recommend passing in an `attention_mask`", cl.out)
with self.subTest("Ensure a warning is shown when the input_ids start with a pad_token_id."):
logger.warning_once.cache_clear()
with CaptureLogger(logger) as cl:
config = PretrainedConfig()
config.pad_token_id = 0
model = ModelWithHead(config)
input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 432, 5232]])
model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None)
self.assertIn("We strongly recommend passing in an `attention_mask`", cl.out)
with self.subTest("Ensure a warning is shown when the input_ids end with a pad_token_id."):
logger.warning_once.cache_clear()
with CaptureLogger(logger) as cl:
config = PretrainedConfig()
config.pad_token_id = 0
model = ModelWithHead(config)
input_ids = torch.tensor([[432, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]])
model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None)
self.assertIn("We strongly recommend passing in an `attention_mask`", cl.out)
with self.subTest("Ensure that the warning is shown at most once."):
logger.warning_once.cache_clear()
with CaptureLogger(logger) as cl:
config = PretrainedConfig()
config.pad_token_id = 0
model = ModelWithHead(config)
input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]])
model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None)
model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None)
self.assertEqual(cl.out.count("We strongly recommend passing in an `attention_mask`"), 1)
with self.subTest("Ensure a different warning is shown when the pad_token_id is equal to the bos_token_id."):
logger.warning_once.cache_clear()
with CaptureLogger(logger) as cl:
config = PretrainedConfig()
config.pad_token_id = 0
config.bos_token_id = config.pad_token_id
model = ModelWithHead(config)
input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]])
model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None)
self.assertIn("You may ignore this warning if your `pad_token_id`", cl.out)
if not is_torchdynamo_available():
return
with self.subTest("Ensure that the warning code is skipped when compiling with torchdynamo."):
logger.warning_once.cache_clear()
from torch._dynamo import config, testing
config = PretrainedConfig()
config.pad_token_id = 0
model = ModelWithHead(config)
input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 432, 5232]])
def f(input_ids):
model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None)
compile_counter = testing.CompileCounter()
opt_fn = torch.compile(f, dynamic=True, backend=compile_counter)
opt_fn(input_ids)
self.assertEqual(compile_counter.frame_count, 0)
@require_torch_accelerator
@slow
def test_pretrained_low_mem_new_config(self):
# Checking for 1 model(the same one which was described in the issue) .
model_ids = ["gpt2"]
for model_id in model_ids:
model_config = AutoConfig.from_pretrained(pretrained_model_name_or_path=model_id)
model_config.n_layer = 48
model_config.n_head = 25
model_config.n_embd = 1600
model = AutoModelForCausalLM.from_pretrained(
pretrained_model_name_or_path=model_id,
config=model_config,
ignore_mismatched_sizes=True,
torch_dtype=torch.float16,
low_cpu_mem_usage=True,
)
model_ref = AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path=model_id)
self.assertEqual(model.__class__.__name__, model_ref.__class__.__name__)
def test_generation_config_is_loaded_with_model(self):
# Note: `joaogante/tiny-random-gpt2-with-generation-config` has a `generation_config.json` containing a dummy
# `transformers_version` field set to `foo`. If loading the file fails, this test also fails.
# 1. Load without further parameters
model = AutoModelForCausalLM.from_pretrained("joaogante/tiny-random-gpt2-with-generation-config")
self.assertEqual(model.generation_config.transformers_version, "foo")
# 2. Load with `device_map`
model = AutoModelForCausalLM.from_pretrained(
"joaogante/tiny-random-gpt2-with-generation-config", device_map="auto"
)
self.assertEqual(model.generation_config.transformers_version, "foo")
@require_safetensors
def test_safetensors_torch_from_torch(self):
model = BertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only")
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir, safe_serialization=True)
new_model = BertModel.from_pretrained(tmp_dir)
for p1, p2 in zip(model.parameters(), new_model.parameters()):
self.assertTrue(torch.equal(p1, p2))
@require_safetensors
@require_flax
def test_safetensors_torch_from_flax(self):
hub_model = BertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only")
model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only")
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir, safe_serialization=True)
new_model = BertModel.from_pretrained(tmp_dir)
for p1, p2 in zip(hub_model.parameters(), new_model.parameters()):
self.assertTrue(torch.equal(p1, p2))
@require_tf
@require_safetensors
def test_safetensors_torch_from_tf(self):
hub_model = BertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only")
model = TFBertModel.from_pretrained("hf-internal-testing/tiny-bert-tf-only")
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir, safe_serialization=True)
new_model = BertModel.from_pretrained(tmp_dir)
for p1, p2 in zip(hub_model.parameters(), new_model.parameters()):
self.assertTrue(torch.equal(p1, p2))
@require_safetensors
def test_safetensors_torch_from_torch_sharded(self):
model = BertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only")
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir, safe_serialization=True, max_shard_size="100kB")
new_model = BertModel.from_pretrained(tmp_dir)
for p1, p2 in zip(model.parameters(), new_model.parameters()):
self.assertTrue(torch.equal(p1, p2))
@slow
@require_torch
class ModelOnTheFlyConversionTester(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.user = "huggingface-hub-ci"
cls.token = os.getenv("HUGGINGFACE_PRODUCTION_USER_TOKEN", None)
if cls.token is None:
raise ValueError("Cannot run tests as secret isn't setup.")
cls.api = HfApi(token=cls.token)
def setUp(self) -> None:
self.repo_name = f"{self.user}/test-model-on-the-fly-{uuid.uuid4()}"
def tearDown(self) -> None:
self.api.delete_repo(self.repo_name)
def test_safetensors_on_the_fly_conversion(self):
config = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37
)
initial_model = BertModel(config)
initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False)
converted_model = BertModel.from_pretrained(self.repo_name, use_safetensors=True)
with self.subTest("Initial and converted models are equal"):
for p1, p2 in zip(initial_model.parameters(), converted_model.parameters()):
self.assertTrue(torch.equal(p1, p2))
with self.subTest("PR was open with the safetensors account"):
discussions = self.api.get_repo_discussions(self.repo_name)
discussion = next(discussions)
self.assertEqual(discussion.author, "SFconvertbot")
self.assertEqual(discussion.title, "Adding `safetensors` variant of this model")
def test_safetensors_on_the_fly_conversion_private(self):
config = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37
)
initial_model = BertModel(config)
initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False, private=True)
converted_model = BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token)
with self.subTest("Initial and converted models are equal"):
for p1, p2 in zip(initial_model.parameters(), converted_model.parameters()):
self.assertTrue(torch.equal(p1, p2))
with self.subTest("PR was open with the safetensors account"):
discussions = self.api.get_repo_discussions(self.repo_name, token=self.token)
discussion = next(discussions)
self.assertEqual(discussion.author, self.user)
self.assertEqual(discussion.title, "Adding `safetensors` variant of this model")
def test_safetensors_on_the_fly_conversion_gated(self):
config = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37
)
initial_model = BertModel(config)
initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False)
headers = {"Authorization": f"Bearer {self.token}"}
requests.put(
f"https://huggingface.co/api/models/{self.repo_name}/settings", json={"gated": "auto"}, headers=headers
)
converted_model = BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token)
with self.subTest("Initial and converted models are equal"):
for p1, p2 in zip(initial_model.parameters(), converted_model.parameters()):
self.assertTrue(torch.equal(p1, p2))
with self.subTest("PR was open with the safetensors account"):
discussions = self.api.get_repo_discussions(self.repo_name)
discussion = next(discussions)
self.assertEqual(discussion.author, "SFconvertbot")
self.assertEqual(discussion.title, "Adding `safetensors` variant of this model")
def test_safetensors_on_the_fly_sharded_conversion(self):
config = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37
)
initial_model = BertModel(config)
initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False, max_shard_size="200kb")
converted_model = BertModel.from_pretrained(self.repo_name, use_safetensors=True)
with self.subTest("Initial and converted models are equal"):
for p1, p2 in zip(initial_model.parameters(), converted_model.parameters()):
self.assertTrue(torch.equal(p1, p2))
with self.subTest("PR was open with the safetensors account"):
discussions = self.api.get_repo_discussions(self.repo_name)
discussion = next(discussions)
self.assertEqual(discussion.author, "SFconvertbot")
self.assertEqual(discussion.title, "Adding `safetensors` variant of this model")
def test_safetensors_on_the_fly_sharded_conversion_private(self):
config = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37
)
initial_model = BertModel(config)
initial_model.push_to_hub(
self.repo_name, token=self.token, safe_serialization=False, max_shard_size="200kb", private=True
)
converted_model = BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token)
with self.subTest("Initial and converted models are equal"):
for p1, p2 in zip(initial_model.parameters(), converted_model.parameters()):
self.assertTrue(torch.equal(p1, p2))
with self.subTest("PR was open with the safetensors account"):
discussions = self.api.get_repo_discussions(self.repo_name)
discussion = next(discussions)
self.assertEqual(discussion.author, self.user)
self.assertEqual(discussion.title, "Adding `safetensors` variant of this model")
def test_safetensors_on_the_fly_sharded_conversion_gated(self):
config = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37
)
initial_model = BertModel(config)
initial_model.push_to_hub(self.repo_name, token=self.token, max_shard_size="200kb", safe_serialization=False)
headers = {"Authorization": f"Bearer {self.token}"}
requests.put(
f"https://huggingface.co/api/models/{self.repo_name}/settings", json={"gated": "auto"}, headers=headers
)
converted_model = BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token)
with self.subTest("Initial and converted models are equal"):
for p1, p2 in zip(initial_model.parameters(), converted_model.parameters()):
self.assertTrue(torch.equal(p1, p2))
with self.subTest("PR was open with the safetensors account"):
discussions = self.api.get_repo_discussions(self.repo_name)
discussion = next(discussions)
self.assertEqual(discussion.author, "SFconvertbot")
self.assertEqual(discussion.title, "Adding `safetensors` variant of this model")
@unittest.skip("Edge case, should work once the Space is updated`")
def test_safetensors_on_the_fly_wrong_user_opened_pr(self):
config = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37
)
initial_model = BertModel(config)
initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False, private=True)
BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token)
# This should have opened a PR with the user's account
with self.subTest("PR was open with the safetensors account"):
discussions = self.api.get_repo_discussions(self.repo_name)
discussion = next(discussions)
self.assertEqual(discussion.author, self.user)
self.assertEqual(discussion.title, "Adding `safetensors` variant of this model")
# We now switch the repo visibility to public
self.api.update_repo_visibility(self.repo_name, private=False)
# We once again call from_pretrained, which should call the bot to open a PR
BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token)
with self.subTest("PR was open with the safetensors account"):
discussions = self.api.get_repo_discussions(self.repo_name)
bot_opened_pr = None
bot_opened_pr_title = None
for discussion in discussions:
if discussion.author == "SFconvertBot":
bot_opened_pr = True
bot_opened_pr_title = discussion.title
self.assertTrue(bot_opened_pr)
self.assertEqual(bot_opened_pr_title, "Adding `safetensors` variant of this model")
def test_safetensors_on_the_fly_specific_revision(self):
config = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37
)
initial_model = BertModel(config)
# Push a model on `main`
initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False)
# Push a model on a given revision
initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False, revision="new-branch")
# Try to convert the model on that revision should raise
with self.assertRaises(EnvironmentError):
BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token, revision="new-branch")
@require_torch
@is_staging_test
class ModelPushToHubTester(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._token = TOKEN
HfFolder.save_token(TOKEN)
@classmethod
def tearDownClass(cls):
try:
delete_repo(token=cls._token, repo_id="test-model")
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="valid_org/test-model-org")
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="test-dynamic-model")
except HTTPError:
pass
@unittest.skip("This test is flaky")
def test_push_to_hub(self):
config = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37
)
model = BertModel(config)
model.push_to_hub("test-model", token=self._token)
new_model = BertModel.from_pretrained(f"{USER}/test-model")
for p1, p2 in zip(model.parameters(), new_model.parameters()):
self.assertTrue(torch.equal(p1, p2))
# Reset repo
delete_repo(token=self._token, repo_id="test-model")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir, repo_id="test-model", push_to_hub=True, token=self._token)
new_model = BertModel.from_pretrained(f"{USER}/test-model")
for p1, p2 in zip(model.parameters(), new_model.parameters()):
self.assertTrue(torch.equal(p1, p2))
def test_push_to_hub_with_description(self):
config = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37
)
model = BertModel(config)
COMMIT_DESCRIPTION = """
The commit description supports markdown synthax see:
```python
>>> form transformers import AutoConfig
>>> config = AutoConfig.from_pretrained("bert-base-uncased")
```
"""
commit_details = model.push_to_hub(
"test-model", use_auth_token=self._token, create_pr=True, commit_description=COMMIT_DESCRIPTION
)
self.assertEqual(commit_details.commit_description, COMMIT_DESCRIPTION)
@unittest.skip("This test is flaky")
def test_push_to_hub_in_organization(self):
config = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37
)
model = BertModel(config)
model.push_to_hub("valid_org/test-model-org", token=self._token)
new_model = BertModel.from_pretrained("valid_org/test-model-org")
for p1, p2 in zip(model.parameters(), new_model.parameters()):
self.assertTrue(torch.equal(p1, p2))
# Reset repo
delete_repo(token=self._token, repo_id="valid_org/test-model-org")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir, push_to_hub=True, token=self._token, repo_id="valid_org/test-model-org")
new_model = BertModel.from_pretrained("valid_org/test-model-org")
for p1, p2 in zip(model.parameters(), new_model.parameters()):
self.assertTrue(torch.equal(p1, p2))
def test_push_to_hub_dynamic_model(self):
CustomConfig.register_for_auto_class()
CustomModel.register_for_auto_class()
config = CustomConfig(hidden_size=32)
model = CustomModel(config)
model.push_to_hub("test-dynamic-model", token=self._token)
# checks
self.assertDictEqual(
config.auto_map,
{"AutoConfig": "custom_configuration.CustomConfig", "AutoModel": "custom_modeling.CustomModel"},
)
new_model = AutoModel.from_pretrained(f"{USER}/test-dynamic-model", trust_remote_code=True)
# Can't make an isinstance check because the new_model is from the CustomModel class of a dynamic module
self.assertEqual(new_model.__class__.__name__, "CustomModel")
for p1, p2 in zip(model.parameters(), new_model.parameters()):
self.assertTrue(torch.equal(p1, p2))
config = AutoConfig.from_pretrained(f"{USER}/test-dynamic-model", trust_remote_code=True)
new_model = AutoModel.from_config(config, trust_remote_code=True)
self.assertEqual(new_model.__class__.__name__, "CustomModel")
@require_torch
class AttentionMaskTester(unittest.TestCase):
def check_non_causal(self, bsz, q_len, kv_len, mask_2d, mask_4d):
mask_indices = (mask_2d != 1)[:, None].broadcast_to((bsz, q_len, kv_len))
mask_4d_values = mask_4d[:, 0][mask_indices]
is_inf = mask_4d_values == -float("inf")
is_min = mask_4d_values == torch.finfo(mask_4d.dtype).min
assert torch.logical_or(is_inf, is_min).all()
def check_to_4d(self, mask_converter, q_len, kv_len, additional_mask=None, bsz=3):
mask_2d = torch.ones((bsz, kv_len), device=torch_device, dtype=torch.long)
if additional_mask is not None:
for bsz_idx, seq_idx in additional_mask:
mask_2d[bsz_idx, seq_idx] = 0
mask_4d = mask_converter.to_4d(mask_2d, query_length=q_len, key_value_length=kv_len, dtype=torch.float32)
assert mask_4d.shape == (bsz, 1, q_len, kv_len)
# make sure there are no overflows
assert mask_4d.min() != float("-inf")
context = mask_converter.sliding_window
if mask_converter.is_causal and context is None:
# k * (k+1) / 2 tokens are masked in triangualar masks
num_tokens_masked = bsz * (q_len * (q_len - 1) // 2)
if 0 not in mask_2d:
assert (mask_4d != 0).sum().cpu().item() == num_tokens_masked
if 0 in mask_2d:
# at least causal mask + maybe more
assert (mask_4d != 0).sum().cpu().item() >= num_tokens_masked
self.check_non_causal(bsz, q_len, kv_len, mask_2d, mask_4d)
elif not mask_converter.is_causal and context is None:
if 0 not in mask_2d:
assert (mask_4d != 0).sum().cpu().item() == 0
if 0 in mask_2d:
self.check_non_causal(bsz, q_len, kv_len, mask_2d, mask_4d)
elif mask_converter.is_causal and context is not None:
# k * (k+1) / 2 tokens are masked in triangualar masks
num_tokens_masked = (q_len * (q_len - 1) // 2) + self.compute_num_context_mask(kv_len, context, q_len)
num_tokens_masked = bsz * num_tokens_masked
if 0 not in mask_2d:
assert (mask_4d != 0).sum().cpu().item() == num_tokens_masked
if 0 in mask_2d:
# at least causal mask + maybe more
assert (mask_4d != 0).sum().cpu().item() >= num_tokens_masked
self.check_non_causal(bsz, q_len, kv_len, mask_2d, mask_4d)
def check_to_causal(self, mask_converter, q_len, kv_len, bsz=3):
mask_4d = mask_converter.to_causal_4d(
bsz, query_length=q_len, key_value_length=kv_len, device=torch_device, dtype=torch.float32
)
if q_len == 1 and mask_converter.sliding_window is None:
# no causal mask if q_len is 1
assert mask_4d is None
return
context = mask_converter.sliding_window
if mask_converter.is_causal and context is None:
# k * (k+1) / 2 tokens are masked in triangualar masks
num_tokens_masked = bsz * (q_len * (q_len - 1) // 2)
assert (mask_4d != 0).sum().cpu().item() == num_tokens_masked
elif not mask_converter.is_causal and context is None:
assert (mask_4d != 0).sum().cpu().item() == 0
elif mask_converter.is_causal and context is not None:
# k * (k+1) / 2 tokens are masked in triangualar masks
num_tokens_masked = (q_len * (q_len - 1) // 2) + self.compute_num_context_mask(kv_len, context, q_len)
num_tokens_masked = bsz * num_tokens_masked
assert (mask_4d != 0).sum().cpu().item() == num_tokens_masked
def compute_num_context_mask(self, kv_len, context, q_len):
# This function computes the # of attention tokens that are added for
# the sliding window
c_mask_len = kv_len - context
num_mask_triangle = c_mask_len * (c_mask_len + 1) // 2
cut_mask_len = max(c_mask_len - q_len, 0)
num_cut_mask = cut_mask_len * (cut_mask_len + 1) // 2
return num_mask_triangle - num_cut_mask
def test_2d_to_4d_causal(self):
mask_converter = AttentionMaskConverter(is_causal=True)
# auto-regressive use case
self.check_to_4d(mask_converter, q_len=1, kv_len=7)
# special auto-regressive case
self.check_to_4d(mask_converter, q_len=3, kv_len=7)
# non auto-regressive case
self.check_to_4d(mask_converter, q_len=7, kv_len=7)
# same with extra attention masks
self.check_to_4d(mask_converter, q_len=1, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)])
self.check_to_4d(mask_converter, q_len=3, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)])
self.check_to_4d(mask_converter, q_len=7, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)])
# check that the mask does not overflow on causal masked tokens
self.check_to_4d(mask_converter, q_len=7, kv_len=7, additional_mask=[(0, 0), (1, 0), (1, 1)])
def test_2d_to_4d(self):
mask_converter = AttentionMaskConverter(is_causal=False)
# non auto-regressive case
self.check_to_4d(mask_converter, q_len=7, kv_len=7)
# same with extra attention masks
self.check_to_4d(mask_converter, q_len=7, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)])
def test_2d_to_4d_causal_sliding(self):
mask_converter = AttentionMaskConverter(is_causal=True, sliding_window=5)
# auto-regressive use case
self.check_to_4d(mask_converter, q_len=1, kv_len=7)
# special auto-regressive case
self.check_to_4d(mask_converter, q_len=3, kv_len=7)
# non auto-regressive case
self.check_to_4d(mask_converter, q_len=7, kv_len=7)
# same with extra attention masks
self.check_to_4d(mask_converter, q_len=1, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)])
self.check_to_4d(mask_converter, q_len=3, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)])
self.check_to_4d(mask_converter, q_len=7, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)])
def test_causal_mask(self):
mask_converter = AttentionMaskConverter(is_causal=True)
# auto-regressive use case
self.check_to_causal(mask_converter, q_len=1, kv_len=7)
# special auto-regressive case
self.check_to_causal(mask_converter, q_len=3, kv_len=7)
# non auto-regressive case
self.check_to_causal(mask_converter, q_len=7, kv_len=7)
def test_causal_mask_sliding(self):
mask_converter = AttentionMaskConverter(is_causal=True, sliding_window=3)
# auto-regressive use case
self.check_to_causal(mask_converter, q_len=1, kv_len=7)
# special auto-regressive case
self.check_to_causal(mask_converter, q_len=3, kv_len=7)
# non auto-regressive case
self.check_to_causal(mask_converter, q_len=7, kv_len=7)
def test_torch_compile_fullgraph(self):
model = Prepare4dCausalAttentionMaskModel()
inputs_embeds = torch.rand([1, 3, 32])
res_non_compiled = model(inputs_embeds)
compiled_model = torch.compile(model, fullgraph=True)
res_compiled = compiled_model(inputs_embeds)
self.assertTrue(torch.equal(res_non_compiled, res_compiled))
model = Create4dCausalAttentionMaskModel()
inputs_embeds = torch.rand(2, 4, 16)
res_non_compiled = model(inputs_embeds)
compiled_model = torch.compile(model, fullgraph=True)
res_compiled = compiled_model(inputs_embeds)
self.assertTrue(torch.equal(res_non_compiled, res_compiled))
model = Prepare4dAttentionMaskModel()
mask = torch.ones(2, 4)
mask[0, :2] = 0
inputs_embeds = torch.rand(2, 4, 16)
res_non_compiled = model(mask, inputs_embeds)
compiled_model = torch.compile(model, fullgraph=True)
res_compiled = compiled_model(mask, inputs_embeds)
self.assertTrue(torch.equal(res_non_compiled, res_compiled))
@require_torch
@slow
def test_unmask_unattended_left_padding(self):
attention_mask = torch.Tensor([[0, 0, 1], [1, 1, 1], [0, 1, 1]]).to(torch.int64)
expanded_mask = torch.Tensor(
[
[[[0, 0, 0], [0, 0, 0], [0, 0, 1]]],
[[[1, 0, 0], [1, 1, 0], [1, 1, 1]]],
[[[0, 0, 0], [0, 1, 0], [0, 1, 1]]],
]
).to(torch.int64)
reference_output = torch.Tensor(
[
[[[1, 1, 1], [1, 1, 1], [0, 0, 1]]],
[[[1, 0, 0], [1, 1, 0], [1, 1, 1]]],
[[[1, 1, 1], [0, 1, 0], [0, 1, 1]]],
]
).to(torch.int64)
result = AttentionMaskConverter._unmask_unattended(expanded_mask, attention_mask, unmasked_value=1)
self.assertTrue(torch.equal(result, reference_output))
attention_mask = torch.Tensor([[0, 0, 1, 1, 1], [1, 1, 1, 1, 1], [0, 1, 1, 1, 1]]).to(torch.int64)
attn_mask_converter = AttentionMaskConverter(is_causal=True)
past_key_values_length = 0
key_value_length = attention_mask.shape[-1] + past_key_values_length
expanded_mask = attn_mask_converter.to_4d(
attention_mask, attention_mask.shape[-1], key_value_length=key_value_length, dtype=torch.float32
)
result = AttentionMaskConverter._unmask_unattended(expanded_mask, attention_mask, unmasked_value=0)
min_inf = torch.finfo(torch.float32).min
reference_output = torch.Tensor(
[
[
[
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[min_inf, min_inf, 0, min_inf, min_inf],
[min_inf, min_inf, 0, 0, min_inf],
[min_inf, min_inf, 0, 0, 0],
]
],
[
[
[0, min_inf, min_inf, min_inf, min_inf],
[0, 0, min_inf, min_inf, min_inf],
[0, 0, 0, min_inf, min_inf],
[0, 0, 0, 0, min_inf],
[0, 0, 0, 0, 0],
]
],
[
[
[0, 0, 0, 0, 0],
[min_inf, 0, min_inf, min_inf, min_inf],
[min_inf, 0, 0, min_inf, min_inf],
[min_inf, 0, 0, 0, min_inf],
[min_inf, 0, 0, 0, 0],
]
],
]
)
self.assertTrue(torch.equal(reference_output, result))
@require_torch
@slow
def test_unmask_unattended_right_padding(self):
attention_mask = torch.Tensor([[1, 1, 1, 0], [1, 1, 1, 1], [1, 1, 0, 0]]).to(torch.int64)
attn_mask_converter = AttentionMaskConverter(is_causal=True)
past_key_values_length = 0
key_value_length = attention_mask.shape[-1] + past_key_values_length
expanded_mask = attn_mask_converter.to_4d(
attention_mask, attention_mask.shape[-1], key_value_length=key_value_length, dtype=torch.float32
)
result = AttentionMaskConverter._unmask_unattended(expanded_mask, attention_mask, unmasked_value=0)
self.assertTrue(torch.equal(expanded_mask, result))
@require_torch
@slow
def test_unmask_unattended_random_mask(self):
attention_mask = torch.Tensor([[1, 0, 1, 0], [1, 0, 1, 1], [1, 1, 0, 1]]).to(torch.int64)
attn_mask_converter = AttentionMaskConverter(is_causal=True)
past_key_values_length = 0
key_value_length = attention_mask.shape[-1] + past_key_values_length
expanded_mask = attn_mask_converter.to_4d(
attention_mask, attention_mask.shape[-1], key_value_length=key_value_length, dtype=torch.float32
)
result = AttentionMaskConverter._unmask_unattended(expanded_mask, attention_mask, unmasked_value=0)
self.assertTrue(torch.equal(expanded_mask, result))
@require_torch
class TestAttentionImplementation(unittest.TestCase):
def test_error_no_sdpa_available(self):
with self.assertRaises(ValueError) as cm:
_ = AutoModel.from_pretrained("hf-tiny-model-private/tiny-random-MCTCTModel", attn_implementation="sdpa")
self.assertTrue(
"does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention"
in str(cm.exception)
)
_ = AutoModel.from_pretrained("hf-tiny-model-private/tiny-random-MCTCTModel")
def test_error_no_flash_available(self):
with self.assertRaises(ValueError) as cm:
_ = AutoModel.from_pretrained(
"hf-tiny-model-private/tiny-random-MCTCTModel", attn_implementation="flash_attention_2"
)
self.assertTrue("does not support Flash Attention 2.0" in str(cm.exception))
def test_error_wrong_attn_implementation(self):
with self.assertRaises(ValueError) as cm:
_ = AutoModel.from_pretrained("hf-tiny-model-private/tiny-random-MCTCTModel", attn_implementation="foo")
self.assertTrue('The only possible arguments are `attn_implementation="eager"' in str(cm.exception))
def test_not_available_flash(self):
if is_flash_attn_2_available():
self.skipTest("Please uninstall flash-attn package to run test_not_available_flash")
with self.assertRaises(ImportError) as cm:
_ = AutoModel.from_pretrained(
"hf-internal-testing/tiny-random-GPTBigCodeModel", attn_implementation="flash_attention_2"
)
self.assertTrue("the package flash_attn seems to be not installed" in str(cm.exception))
def test_not_available_sdpa(self):
if is_torch_sdpa_available():
self.skipTest("This test requires torch<=2.0")
with self.assertRaises(ImportError) as cm:
_ = AutoModel.from_pretrained(
"hf-internal-testing/tiny-random-GPTBigCodeModel", attn_implementation="sdpa"
)
self.assertTrue("PyTorch SDPA requirements in Transformers are not met" in str(cm.exception))
| 0 |
hf_public_repos/transformers | hf_public_repos/transformers/tests/test_pipeline_mixin.py | # coding=utf-8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import os
import random
import unittest
from pathlib import Path
from transformers.testing_utils import (
is_pipeline_test,
require_decord,
require_pytesseract,
require_timm,
require_torch,
require_torch_or_tf,
require_vision,
)
from transformers.utils import direct_transformers_import, logging
from .pipelines.test_pipelines_audio_classification import AudioClassificationPipelineTests
from .pipelines.test_pipelines_automatic_speech_recognition import AutomaticSpeechRecognitionPipelineTests
from .pipelines.test_pipelines_conversational import ConversationalPipelineTests
from .pipelines.test_pipelines_depth_estimation import DepthEstimationPipelineTests
from .pipelines.test_pipelines_document_question_answering import DocumentQuestionAnsweringPipelineTests
from .pipelines.test_pipelines_feature_extraction import FeatureExtractionPipelineTests
from .pipelines.test_pipelines_fill_mask import FillMaskPipelineTests
from .pipelines.test_pipelines_image_classification import ImageClassificationPipelineTests
from .pipelines.test_pipelines_image_segmentation import ImageSegmentationPipelineTests
from .pipelines.test_pipelines_image_to_image import ImageToImagePipelineTests
from .pipelines.test_pipelines_image_to_text import ImageToTextPipelineTests
from .pipelines.test_pipelines_mask_generation import MaskGenerationPipelineTests
from .pipelines.test_pipelines_object_detection import ObjectDetectionPipelineTests
from .pipelines.test_pipelines_question_answering import QAPipelineTests
from .pipelines.test_pipelines_summarization import SummarizationPipelineTests
from .pipelines.test_pipelines_table_question_answering import TQAPipelineTests
from .pipelines.test_pipelines_text2text_generation import Text2TextGenerationPipelineTests
from .pipelines.test_pipelines_text_classification import TextClassificationPipelineTests
from .pipelines.test_pipelines_text_generation import TextGenerationPipelineTests
from .pipelines.test_pipelines_text_to_audio import TextToAudioPipelineTests
from .pipelines.test_pipelines_token_classification import TokenClassificationPipelineTests
from .pipelines.test_pipelines_translation import TranslationPipelineTests
from .pipelines.test_pipelines_video_classification import VideoClassificationPipelineTests
from .pipelines.test_pipelines_visual_question_answering import VisualQuestionAnsweringPipelineTests
from .pipelines.test_pipelines_zero_shot import ZeroShotClassificationPipelineTests
from .pipelines.test_pipelines_zero_shot_audio_classification import ZeroShotAudioClassificationPipelineTests
from .pipelines.test_pipelines_zero_shot_image_classification import ZeroShotImageClassificationPipelineTests
from .pipelines.test_pipelines_zero_shot_object_detection import ZeroShotObjectDetectionPipelineTests
pipeline_test_mapping = {
"audio-classification": {"test": AudioClassificationPipelineTests},
"automatic-speech-recognition": {"test": AutomaticSpeechRecognitionPipelineTests},
"conversational": {"test": ConversationalPipelineTests},
"depth-estimation": {"test": DepthEstimationPipelineTests},
"document-question-answering": {"test": DocumentQuestionAnsweringPipelineTests},
"feature-extraction": {"test": FeatureExtractionPipelineTests},
"fill-mask": {"test": FillMaskPipelineTests},
"image-classification": {"test": ImageClassificationPipelineTests},
"image-segmentation": {"test": ImageSegmentationPipelineTests},
"image-to-image": {"test": ImageToImagePipelineTests},
"image-to-text": {"test": ImageToTextPipelineTests},
"mask-generation": {"test": MaskGenerationPipelineTests},
"object-detection": {"test": ObjectDetectionPipelineTests},
"question-answering": {"test": QAPipelineTests},
"summarization": {"test": SummarizationPipelineTests},
"table-question-answering": {"test": TQAPipelineTests},
"text2text-generation": {"test": Text2TextGenerationPipelineTests},
"text-classification": {"test": TextClassificationPipelineTests},
"text-generation": {"test": TextGenerationPipelineTests},
"text-to-audio": {"test": TextToAudioPipelineTests},
"token-classification": {"test": TokenClassificationPipelineTests},
"translation": {"test": TranslationPipelineTests},
"video-classification": {"test": VideoClassificationPipelineTests},
"visual-question-answering": {"test": VisualQuestionAnsweringPipelineTests},
"zero-shot": {"test": ZeroShotClassificationPipelineTests},
"zero-shot-audio-classification": {"test": ZeroShotAudioClassificationPipelineTests},
"zero-shot-image-classification": {"test": ZeroShotImageClassificationPipelineTests},
"zero-shot-object-detection": {"test": ZeroShotObjectDetectionPipelineTests},
}
for task, task_info in pipeline_test_mapping.items():
test = task_info["test"]
task_info["mapping"] = {
"pt": getattr(test, "model_mapping", None),
"tf": getattr(test, "tf_model_mapping", None),
}
# The default value `hf-internal-testing` is for running the pipeline testing against the tiny models on the Hub.
# For debugging purpose, we can specify a local path which is the `output_path` argument of a previous run of
# `utils/create_dummy_models.py`.
TRANSFORMERS_TINY_MODEL_PATH = os.environ.get("TRANSFORMERS_TINY_MODEL_PATH", "hf-internal-testing")
if TRANSFORMERS_TINY_MODEL_PATH == "hf-internal-testing":
TINY_MODEL_SUMMARY_FILE_PATH = os.path.join(Path(__file__).parent.parent, "tests/utils/tiny_model_summary.json")
else:
TINY_MODEL_SUMMARY_FILE_PATH = os.path.join(TRANSFORMERS_TINY_MODEL_PATH, "reports", "tiny_model_summary.json")
with open(TINY_MODEL_SUMMARY_FILE_PATH) as fp:
tiny_model_summary = json.load(fp)
PATH_TO_TRANSFORMERS = os.path.join(Path(__file__).parent.parent, "src/transformers")
# Dynamically import the Transformers module to grab the attribute classes of the processor form their names.
transformers_module = direct_transformers_import(PATH_TO_TRANSFORMERS)
logger = logging.get_logger(__name__)
class PipelineTesterMixin:
model_tester = None
pipeline_model_mapping = None
supported_frameworks = ["pt", "tf"]
def run_task_tests(self, task):
"""Run pipeline tests for a specific `task`
Args:
task (`str`):
A task name. This should be a key in the mapping `pipeline_test_mapping`.
"""
if task not in self.pipeline_model_mapping:
self.skipTest(
f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')} is skipped: `{task}` is not in "
f"`self.pipeline_model_mapping` for `{self.__class__.__name__}`."
)
model_architectures = self.pipeline_model_mapping[task]
if not isinstance(model_architectures, tuple):
model_architectures = (model_architectures,)
if not isinstance(model_architectures, tuple):
raise ValueError(f"`model_architectures` must be a tuple. Got {type(model_architectures)} instead.")
for model_architecture in model_architectures:
model_arch_name = model_architecture.__name__
# Get the canonical name
for _prefix in ["Flax", "TF"]:
if model_arch_name.startswith(_prefix):
model_arch_name = model_arch_name[len(_prefix) :]
break
tokenizer_names = []
processor_names = []
commit = None
if model_arch_name in tiny_model_summary:
tokenizer_names = tiny_model_summary[model_arch_name]["tokenizer_classes"]
processor_names = tiny_model_summary[model_arch_name]["processor_classes"]
if "sha" in tiny_model_summary[model_arch_name]:
commit = tiny_model_summary[model_arch_name]["sha"]
# Adding `None` (if empty) so we can generate tests
tokenizer_names = [None] if len(tokenizer_names) == 0 else tokenizer_names
processor_names = [None] if len(processor_names) == 0 else processor_names
repo_name = f"tiny-random-{model_arch_name}"
if TRANSFORMERS_TINY_MODEL_PATH != "hf-internal-testing":
repo_name = model_arch_name
self.run_model_pipeline_tests(
task, repo_name, model_architecture, tokenizer_names, processor_names, commit
)
def run_model_pipeline_tests(self, task, repo_name, model_architecture, tokenizer_names, processor_names, commit):
"""Run pipeline tests for a specific `task` with the give model class and tokenizer/processor class names
Args:
task (`str`):
A task name. This should be a key in the mapping `pipeline_test_mapping`.
repo_name (`str`):
A model repository id on the Hub.
model_architecture (`type`):
A subclass of `PretrainedModel` or `PretrainedModel`.
tokenizer_names (`List[str]`):
A list of names of a subclasses of `PreTrainedTokenizerFast` or `PreTrainedTokenizer`.
processor_names (`List[str]`):
A list of names of subclasses of `BaseImageProcessor` or `FeatureExtractionMixin`.
"""
# Get an instance of the corresponding class `XXXPipelineTests` in order to use `get_test_pipeline` and
# `run_pipeline_test`.
pipeline_test_class_name = pipeline_test_mapping[task]["test"].__name__
for tokenizer_name in tokenizer_names:
for processor_name in processor_names:
if self.is_pipeline_test_to_skip(
pipeline_test_class_name,
model_architecture.config_class,
model_architecture,
tokenizer_name,
processor_name,
):
logger.warning(
f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')} is skipped: test is "
f"currently known to fail for: model `{model_architecture.__name__}` | tokenizer "
f"`{tokenizer_name}` | processor `{processor_name}`."
)
continue
self.run_pipeline_test(task, repo_name, model_architecture, tokenizer_name, processor_name, commit)
def run_pipeline_test(self, task, repo_name, model_architecture, tokenizer_name, processor_name, commit):
"""Run pipeline tests for a specific `task` with the give model class and tokenizer/processor class name
The model will be loaded from a model repository on the Hub.
Args:
task (`str`):
A task name. This should be a key in the mapping `pipeline_test_mapping`.
repo_name (`str`):
A model repository id on the Hub.
model_architecture (`type`):
A subclass of `PretrainedModel` or `PretrainedModel`.
tokenizer_name (`str`):
The name of a subclass of `PreTrainedTokenizerFast` or `PreTrainedTokenizer`.
processor_name (`str`):
The name of a subclass of `BaseImageProcessor` or `FeatureExtractionMixin`.
"""
repo_id = f"{TRANSFORMERS_TINY_MODEL_PATH}/{repo_name}"
if TRANSFORMERS_TINY_MODEL_PATH != "hf-internal-testing":
model_type = model_architecture.config_class.model_type
repo_id = os.path.join(TRANSFORMERS_TINY_MODEL_PATH, model_type, repo_name)
tokenizer = None
if tokenizer_name is not None:
tokenizer_class = getattr(transformers_module, tokenizer_name)
tokenizer = tokenizer_class.from_pretrained(repo_id, revision=commit)
processor = None
if processor_name is not None:
processor_class = getattr(transformers_module, processor_name)
# If the required packages (like `Pillow` or `torchaudio`) are not installed, this will fail.
try:
processor = processor_class.from_pretrained(repo_id, revision=commit)
except Exception:
logger.warning(
f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')} is skipped: Could not load the "
f"processor from `{repo_id}` with `{processor_name}`."
)
return
# TODO: Maybe not upload such problematic tiny models to Hub.
if tokenizer is None and processor is None:
logger.warning(
f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')} is skipped: Could not find or load "
f"any tokenizer / processor from `{repo_id}`."
)
return
# TODO: We should check if a model file is on the Hub repo. instead.
try:
model = model_architecture.from_pretrained(repo_id, revision=commit)
except Exception:
logger.warning(
f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')} is skipped: Could not find or load "
f"the model from `{repo_id}` with `{model_architecture}`."
)
return
pipeline_test_class_name = pipeline_test_mapping[task]["test"].__name__
if self.is_pipeline_test_to_skip_more(pipeline_test_class_name, model.config, model, tokenizer, processor):
logger.warning(
f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')} is skipped: test is "
f"currently known to fail for: model `{model_architecture.__name__}` | tokenizer "
f"`{tokenizer_name}` | processor `{processor_name}`."
)
return
# validate
validate_test_components(self, task, model, tokenizer, processor)
if hasattr(model, "eval"):
model = model.eval()
# Get an instance of the corresponding class `XXXPipelineTests` in order to use `get_test_pipeline` and
# `run_pipeline_test`.
task_test = pipeline_test_mapping[task]["test"]()
pipeline, examples = task_test.get_test_pipeline(model, tokenizer, processor)
if pipeline is None:
# The test can disable itself, but it should be very marginal
# Concerns: Wav2Vec2ForCTC without tokenizer test (FastTokenizer don't exist)
logger.warning(
f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')} is skipped: Could not get the "
"pipeline for testing."
)
return
task_test.run_pipeline_test(pipeline, examples)
def run_batch_test(pipeline, examples):
# Need to copy because `Conversation` are stateful
if pipeline.tokenizer is not None and pipeline.tokenizer.pad_token_id is None:
return # No batching for this and it's OK
# 10 examples with batch size 4 means there needs to be a unfinished batch
# which is important for the unbatcher
def data(n):
for _ in range(n):
# Need to copy because Conversation object is mutated
yield copy.deepcopy(random.choice(examples))
out = []
if task == "conversational":
for item in pipeline(data(10), batch_size=4, max_new_tokens=5):
out.append(item)
else:
for item in pipeline(data(10), batch_size=4):
out.append(item)
self.assertEqual(len(out), 10)
run_batch_test(pipeline, examples)
@is_pipeline_test
def test_pipeline_audio_classification(self):
self.run_task_tests(task="audio-classification")
@is_pipeline_test
def test_pipeline_automatic_speech_recognition(self):
self.run_task_tests(task="automatic-speech-recognition")
@is_pipeline_test
def test_pipeline_conversational(self):
self.run_task_tests(task="conversational")
@is_pipeline_test
@require_vision
@require_timm
@require_torch
def test_pipeline_depth_estimation(self):
self.run_task_tests(task="depth-estimation")
@is_pipeline_test
@require_pytesseract
@require_torch
@require_vision
def test_pipeline_document_question_answering(self):
self.run_task_tests(task="document-question-answering")
@is_pipeline_test
def test_pipeline_feature_extraction(self):
self.run_task_tests(task="feature-extraction")
@is_pipeline_test
def test_pipeline_fill_mask(self):
self.run_task_tests(task="fill-mask")
@is_pipeline_test
@require_torch_or_tf
@require_vision
def test_pipeline_image_classification(self):
self.run_task_tests(task="image-classification")
@is_pipeline_test
@require_vision
@require_timm
@require_torch
def test_pipeline_image_segmentation(self):
self.run_task_tests(task="image-segmentation")
@is_pipeline_test
@require_vision
def test_pipeline_image_to_text(self):
self.run_task_tests(task="image-to-text")
@unittest.skip(reason="`run_pipeline_test` is currently not implemented.")
@is_pipeline_test
@require_vision
@require_torch
def test_pipeline_mask_generation(self):
self.run_task_tests(task="mask-generation")
@is_pipeline_test
@require_vision
@require_timm
@require_torch
def test_pipeline_object_detection(self):
self.run_task_tests(task="object-detection")
@is_pipeline_test
def test_pipeline_question_answering(self):
self.run_task_tests(task="question-answering")
@is_pipeline_test
def test_pipeline_summarization(self):
self.run_task_tests(task="summarization")
@is_pipeline_test
def test_pipeline_table_question_answering(self):
self.run_task_tests(task="table-question-answering")
@is_pipeline_test
def test_pipeline_text2text_generation(self):
self.run_task_tests(task="text2text-generation")
@is_pipeline_test
def test_pipeline_text_classification(self):
self.run_task_tests(task="text-classification")
@is_pipeline_test
@require_torch_or_tf
def test_pipeline_text_generation(self):
self.run_task_tests(task="text-generation")
@is_pipeline_test
@require_torch
def test_pipeline_text_to_audio(self):
self.run_task_tests(task="text-to-audio")
@is_pipeline_test
def test_pipeline_token_classification(self):
self.run_task_tests(task="token-classification")
@is_pipeline_test
def test_pipeline_translation(self):
self.run_task_tests(task="translation")
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
def test_pipeline_video_classification(self):
self.run_task_tests(task="video-classification")
@is_pipeline_test
@require_torch
@require_vision
def test_pipeline_visual_question_answering(self):
self.run_task_tests(task="visual-question-answering")
@is_pipeline_test
def test_pipeline_zero_shot(self):
self.run_task_tests(task="zero-shot")
@is_pipeline_test
@require_torch
def test_pipeline_zero_shot_audio_classification(self):
self.run_task_tests(task="zero-shot-audio-classification")
@is_pipeline_test
@require_vision
def test_pipeline_zero_shot_image_classification(self):
self.run_task_tests(task="zero-shot-image-classification")
@is_pipeline_test
@require_vision
@require_torch
def test_pipeline_zero_shot_object_detection(self):
self.run_task_tests(task="zero-shot-object-detection")
# This contains the test cases to be skipped without model architecture being involved.
def is_pipeline_test_to_skip(
self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name
):
"""Skip some tests based on the classes or their names without the instantiated objects.
This is to avoid calling `from_pretrained` (so reducing the runtime) if we already know the tests will fail.
"""
# No fix is required for this case.
if (
pipeline_test_casse_name == "DocumentQuestionAnsweringPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast")
):
# `DocumentQuestionAnsweringPipelineTests` requires a fast tokenizer.
return True
return False
def is_pipeline_test_to_skip_more(self, pipeline_test_casse_name, config, model, tokenizer, processor): # noqa
"""Skip some more tests based on the information from the instantiated objects."""
# No fix is required for this case.
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer is not None
and getattr(tokenizer, "pad_token", None) is None
and not tokenizer.__class__.__name__.endswith("Fast")
):
# `QAPipelineTests` doesn't work with a slow tokenizer that has no pad token.
return True
return False
def validate_test_components(test_case, task, model, tokenizer, processor):
# TODO: Move this to tiny model creation script
# head-specific (within a model type) necessary changes to the config
# 1. for `BlenderbotForCausalLM`
if model.__class__.__name__ == "BlenderbotForCausalLM":
model.config.encoder_no_repeat_ngram_size = 0
# TODO: Change the tiny model creation script: don't create models with problematic tokenizers
# Avoid `IndexError` in embedding layers
CONFIG_WITHOUT_VOCAB_SIZE = ["CanineConfig"]
if tokenizer is not None:
config_vocab_size = getattr(model.config, "vocab_size", None)
# For CLIP-like models
if config_vocab_size is None:
if hasattr(model.config, "text_config"):
config_vocab_size = getattr(model.config.text_config, "vocab_size", None)
elif hasattr(model.config, "text_encoder"):
config_vocab_size = getattr(model.config.text_encoder, "vocab_size", None)
if config_vocab_size is None and model.config.__class__.__name__ not in CONFIG_WITHOUT_VOCAB_SIZE:
raise ValueError(
"Could not determine `vocab_size` from model configuration while `tokenizer` is not `None`."
)
| 0 |
hf_public_repos/transformers | hf_public_repos/transformers/tests/test_image_processing_utils.py | # coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
SAMPLE_IMAGE_PROCESSING_CONFIG_DIR = get_tests_dir("fixtures")
class ImageProcessorUtilTester(unittest.TestCase):
def test_cached_files_are_used_when_internet_is_down(self):
# A mock response for an HTTP head request to emulate server down
response_mock = mock.Mock()
response_mock.status_code = 500
response_mock.headers = {}
response_mock.raise_for_status.side_effect = HTTPError
response_mock.json.return_value = {}
# Download this model to make sure it's in the cache.
_ = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit")
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request", return_value=response_mock) as mock_head:
_ = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit")
# This check we did call the fake head request
mock_head.assert_called()
def test_legacy_load_from_url(self):
# This test is for deprecated behavior and can be removed in v5
_ = ViTImageProcessor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json"
)
def test_image_processor_from_pretrained_subfolder(self):
with self.assertRaises(OSError):
# config is in subfolder, the following should not work without specifying the subfolder
_ = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants")
config = AutoImageProcessor.from_pretrained(
"hf-internal-testing/stable-diffusion-all-variants", subfolder="feature_extractor"
)
self.assertIsNotNone(config)
@is_staging_test
class ImageProcessorPushToHubTester(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._token = TOKEN
HfFolder.save_token(TOKEN)
@classmethod
def tearDownClass(cls):
try:
delete_repo(token=cls._token, repo_id="test-image-processor")
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="valid_org/test-image-processor-org")
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="test-dynamic-image-processor")
except HTTPError:
pass
def test_push_to_hub(self):
image_processor = ViTImageProcessor.from_pretrained(SAMPLE_IMAGE_PROCESSING_CONFIG_DIR)
image_processor.push_to_hub("test-image-processor", token=self._token)
new_image_processor = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor")
for k, v in image_processor.__dict__.items():
self.assertEqual(v, getattr(new_image_processor, k))
# Reset repo
delete_repo(token=self._token, repo_id="test-image-processor")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
tmp_dir, repo_id="test-image-processor", push_to_hub=True, token=self._token
)
new_image_processor = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor")
for k, v in image_processor.__dict__.items():
self.assertEqual(v, getattr(new_image_processor, k))
def test_push_to_hub_in_organization(self):
image_processor = ViTImageProcessor.from_pretrained(SAMPLE_IMAGE_PROCESSING_CONFIG_DIR)
image_processor.push_to_hub("valid_org/test-image-processor", token=self._token)
new_image_processor = ViTImageProcessor.from_pretrained("valid_org/test-image-processor")
for k, v in image_processor.__dict__.items():
self.assertEqual(v, getattr(new_image_processor, k))
# Reset repo
delete_repo(token=self._token, repo_id="valid_org/test-image-processor")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
tmp_dir, repo_id="valid_org/test-image-processor-org", push_to_hub=True, token=self._token
)
new_image_processor = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org")
for k, v in image_processor.__dict__.items():
self.assertEqual(v, getattr(new_image_processor, k))
def test_push_to_hub_dynamic_image_processor(self):
CustomImageProcessor.register_for_auto_class()
image_processor = CustomImageProcessor.from_pretrained(SAMPLE_IMAGE_PROCESSING_CONFIG_DIR)
image_processor.push_to_hub("test-dynamic-image-processor", token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map,
{"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"},
)
new_image_processor = AutoImageProcessor.from_pretrained(
f"{USER}/test-dynamic-image-processor", trust_remote_code=True
)
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__, "CustomImageProcessor")
| 0 |
hf_public_repos/transformers | hf_public_repos/transformers/tests/test_modeling_flax_common.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import inspect
import json
import random
import tempfile
from typing import List, Tuple
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import CaptureLogger, is_pt_flax_cross_test, require_flax, torch_device
from transformers.utils import CONFIG_NAME, GENERATION_CONFIG_NAME, logging
from transformers.utils.generic import ModelOutput
if is_flax_available():
import os
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
from transformers import (
FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
FLAX_MODEL_MAPPING,
FlaxAutoModel,
FlaxAutoModelForSequenceClassification,
FlaxBertModel,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.modeling_flax_utils import FLAX_WEIGHTS_INDEX_NAME, FLAX_WEIGHTS_NAME
os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def ids_tensor(shape, vocab_size, rng=None):
"""Creates a random int32 tensor of the shape within the vocab size."""
if rng is None:
rng = random.Random()
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(rng.randint(0, vocab_size - 1))
output = np.array(values, dtype=jnp.int32).reshape(shape)
return output
def floats_tensor(shape, scale=1.0, rng=None, name=None):
"""Creates a random float32 tensor"""
if rng is None:
rng = random.Random()
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(rng.random() * scale)
return np.array(values, dtype=jnp.float32).reshape(shape)
def random_attention_mask(shape, rng=None):
attn_mask = ids_tensor(shape, vocab_size=2, rng=rng)
# make sure that at least one token is attended to for each batch
attn_mask[:, -1] = 1
return attn_mask
def get_params(params, from_head_prefix=None):
"""Function extracts relevant parameters into flatten dict from model params,
appends batch normalization statistics if present"""
# If Both parameters and batch normalization statistics are present
if "batch_stats" in params:
# Extract only parameters for the specified head prefix (if specified) and add batch statistics
if from_head_prefix is not None:
extracted_params = flatten_dict(unfreeze(params["params"][from_head_prefix]))
extracted_params.update(flatten_dict(params["batch_stats"][from_head_prefix]))
else:
extracted_params = flatten_dict(unfreeze(params["params"]))
extracted_params.update(flatten_dict(params["batch_stats"]))
# Only parameters are present
else:
if from_head_prefix is not None:
extracted_params = flatten_dict(unfreeze(params[from_head_prefix]))
else:
extracted_params = flatten_dict(unfreeze(params))
return extracted_params
@require_flax
class FlaxModelTesterMixin:
model_tester = None
all_model_classes = ()
test_mismatched_shapes = True
is_encoder_decoder = False
test_head_masking = False
has_attentions = True
def _prepare_for_class(self, inputs_dict, model_class):
inputs_dict = copy.deepcopy(inputs_dict)
# hack for now until we have AutoModel classes
if "ForMultipleChoice" in model_class.__name__:
inputs_dict = {
k: jnp.broadcast_to(v[:, None], (v.shape[0], self.model_tester.num_choices, v.shape[-1]))
if isinstance(v, (jnp.ndarray, np.ndarray)) and k != "indices_prng_key"
else v
for k, v in inputs_dict.items()
}
return inputs_dict
def assert_almost_equals(self, a: np.ndarray, b: np.ndarray, tol: float):
diff = np.abs((a - b)).max()
self.assertLessEqual(diff, tol, f"Difference between torch and flax is {diff} (>= {tol}).")
def test_model_outputs_equivalence(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs)
dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple()
def recursive_check(tuple_object, dict_object):
if isinstance(tuple_object, (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif tuple_object is None:
return
else:
self.assert_almost_equals(jnp.nan_to_num(tuple_object), jnp.nan_to_num(dict_object), 1e-5)
recursive_check(tuple_output, dict_output)
for model_class in self.all_model_classes:
model = model_class(config)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
# (Copied from tests.test_modeling_common.ModelTesterMixin.check_pt_flax_outputs)
def check_pt_flax_outputs(self, fx_outputs, pt_outputs, model_class, tol=1e-5, name="outputs", attributes=None):
"""
Args:
model_class: The class of the model that is currently testing. For example, ..., etc.
Currently unused, but it could make debugging easier and faster.
names: A string, or a list of strings. These specify what fx_outputs/pt_outputs represent in the model outputs.
Currently unused, but in the future, we could use this information to make the error message clearer
by giving the name(s) of the output tensor(s) with large difference(s) between PT and Flax.
"""
self.assertEqual(type(name), str)
if attributes is not None:
self.assertEqual(type(attributes), tuple, f"{name}: The argument `attributes` should be a `tuple`")
# Allow `ModelOutput` (e.g. `CLIPOutput` has `text_model_output` and `vision_model_output`).
if isinstance(fx_outputs, ModelOutput):
self.assertTrue(
isinstance(pt_outputs, ModelOutput),
f"{name}: `pt_outputs` should an instance of `ModelOutput` when `fx_outputs` is",
)
fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None])
pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None])
self.assertEqual(fx_keys, pt_keys, f"{name}: Output keys differ between Flax and PyTorch")
# convert to the case of `tuple`
# appending each key to the current (string) `name`
attributes = tuple([f"{name}.{k}" for k in fx_keys])
self.check_pt_flax_outputs(
fx_outputs.to_tuple(), pt_outputs.to_tuple(), model_class, tol=tol, name=name, attributes=attributes
)
# Allow `list` (e.g. `TransfoXLModelOutput.mems` is a list of tensors.)
elif type(fx_outputs) in [tuple, list]:
self.assertEqual(
type(fx_outputs), type(pt_outputs), f"{name}: Output types differ between Flax and PyTorch"
)
self.assertEqual(
len(fx_outputs), len(pt_outputs), f"{name}: Output lengths differ between Flax and PyTorch"
)
if attributes is not None:
# case 1: each output has assigned name (e.g. a tuple form of a `ModelOutput`)
self.assertEqual(
len(attributes),
len(fx_outputs),
f"{name}: The tuple `attributes` should have the same length as `fx_outputs`",
)
else:
# case 2: each output has no assigned name (e.g. hidden states of each layer) -> add an index to `name`
attributes = tuple([f"{name}_{idx}" for idx in range(len(fx_outputs))])
for fx_output, pt_output, attr in zip(fx_outputs, pt_outputs, attributes):
self.check_pt_flax_outputs(fx_output, pt_output, model_class, tol=tol, name=attr)
elif isinstance(fx_outputs, jnp.ndarray):
self.assertTrue(
isinstance(pt_outputs, torch.Tensor), f"{name}: `pt_outputs` should a tensor when `fx_outputs` is"
)
# Using `np.asarray` gives `ValueError: assignment destination is read-only` at the line `fx_outputs[fx_nans] = 0`.
fx_outputs = np.array(fx_outputs)
pt_outputs = pt_outputs.detach().to("cpu").numpy()
self.assertEqual(
fx_outputs.shape, pt_outputs.shape, f"{name}: Output shapes differ between Flax and PyTorch"
)
# deal with NumPy's scalars to make replacing nan values by 0 work.
if np.isscalar(fx_outputs):
fx_outputs = np.array([fx_outputs])
pt_outputs = np.array([pt_outputs])
fx_nans = np.isnan(fx_outputs)
pt_nans = np.isnan(pt_outputs)
pt_outputs[fx_nans] = 0
fx_outputs[fx_nans] = 0
pt_outputs[pt_nans] = 0
fx_outputs[pt_nans] = 0
max_diff = np.amax(np.abs(fx_outputs - pt_outputs))
self.assertLessEqual(
max_diff, tol, f"{name}: Difference between PyTorch and Flax is {max_diff} (>= {tol})."
)
else:
raise ValueError(
"`fx_outputs` should be an instance of `ModelOutput`, a `tuple`, or an instance of `jnp.ndarray`. Got"
f" {type(fx_outputs)} instead."
)
@is_pt_flax_cross_test
def test_equivalence_pt_to_flax(self):
# It might be better to put this inside the for loop below (because we modify the config there).
# But logically, it is fine.
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# Output all for aggressive testing
config.output_hidden_states = True
config.output_attentions = self.has_attentions
# prepare inputs
prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
pt_inputs = {k: torch.tensor(v.tolist(), device=torch_device) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning
pt_model_class = getattr(transformers, pt_model_class_name)
pt_model = pt_model_class(config).eval()
# Flax models don't use the `use_cache` option and cache is not returned as a default.
# So we disable `use_cache` here for PyTorch model.
pt_model.config.use_cache = False
fx_model = model_class(config, dtype=jnp.float32)
fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model)
fx_model.params = fx_state
# send pytorch model to the correct device
pt_model.to(torch_device)
with torch.no_grad():
pt_outputs = pt_model(**pt_inputs)
fx_outputs = fx_model(**prepared_inputs_dict)
fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None])
pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None])
self.assertEqual(fx_keys, pt_keys)
self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class)
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(tmpdirname)
fx_model_loaded = model_class.from_pretrained(tmpdirname, from_pt=True)
fx_outputs_loaded = fx_model_loaded(**prepared_inputs_dict)
fx_keys = tuple([k for k, v in fx_outputs_loaded.items() if v is not None])
pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None])
self.assertEqual(fx_keys, pt_keys)
self.check_pt_flax_outputs(fx_outputs_loaded, pt_outputs, model_class)
@is_pt_flax_cross_test
def test_equivalence_flax_to_pt(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# Output all for aggressive testing
config.output_hidden_states = True
config.output_attentions = self.has_attentions
# prepare inputs
prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
pt_inputs = {k: torch.tensor(v.tolist(), device=torch_device) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning
pt_model_class = getattr(transformers, pt_model_class_name)
pt_model = pt_model_class(config).eval()
# Flax models don't use the `use_cache` option and cache is not returned as a default.
# So we disable `use_cache` here for PyTorch model.
pt_model.config.use_cache = False
fx_model = model_class(config, dtype=jnp.float32)
pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params)
# make sure weights are tied in PyTorch
pt_model.tie_weights()
# send pytorch model to the correct device
pt_model.to(torch_device)
with torch.no_grad():
pt_outputs = pt_model(**pt_inputs)
fx_outputs = fx_model(**prepared_inputs_dict)
fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None])
pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None])
self.assertEqual(fx_keys, pt_keys)
self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class)
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(tmpdirname)
pt_model_loaded = pt_model_class.from_pretrained(tmpdirname, from_flax=True)
# send pytorch model to the correct device
pt_model_loaded.to(torch_device)
pt_model_loaded.eval()
with torch.no_grad():
pt_outputs_loaded = pt_model_loaded(**pt_inputs)
fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None])
pt_keys = tuple([k for k, v in pt_outputs_loaded.items() if v is not None])
self.assertEqual(fx_keys, pt_keys)
self.check_pt_flax_outputs(fx_outputs, pt_outputs_loaded, model_class)
def test_from_pretrained_save_pretrained(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
model = model_class(config)
prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
outputs = model(**prepared_inputs_dict).to_tuple()
# verify that normal save_pretrained works as expected
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
# the config file (and the generation config file, if it can generate) should be saved
self.assertTrue(os.path.exists(os.path.join(tmpdirname, CONFIG_NAME)))
self.assertEqual(
model.can_generate(), os.path.exists(os.path.join(tmpdirname, GENERATION_CONFIG_NAME))
)
model_loaded = model_class.from_pretrained(tmpdirname)
outputs_loaded = model_loaded(**prepared_inputs_dict).to_tuple()
for output_loaded, output in zip(outputs_loaded, outputs):
self.assert_almost_equals(output_loaded, output, 1e-3)
# verify that save_pretrained for distributed training
# with `params=params` works as expected
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, params=model.params)
model_loaded = model_class.from_pretrained(tmpdirname)
outputs_loaded = model_loaded(**prepared_inputs_dict).to_tuple()
for output_loaded, output in zip(outputs_loaded, outputs):
self.assert_almost_equals(output_loaded, output, 1e-3)
def test_save_load_from_base(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
base_class = FLAX_MODEL_MAPPING[config.__class__]
for model_class in self.all_model_classes:
if model_class == base_class:
continue
model = base_class(config)
base_params = get_params(model.params)
# check that all base model weights are loaded correctly
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
head_model = model_class.from_pretrained(tmpdirname)
base_param_from_head = get_params(head_model.params, from_head_prefix=head_model.base_model_prefix)
for key in base_param_from_head.keys():
max_diff = (base_params[key] - base_param_from_head[key]).sum().item()
self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical")
def test_save_load_to_base(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
base_class = FLAX_MODEL_MAPPING[config.__class__]
for model_class in self.all_model_classes:
if model_class == base_class:
continue
model = model_class(config)
base_params_from_head = get_params(model.params, from_head_prefix=model.base_model_prefix)
# check that all base model weights are loaded correctly
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
base_model = base_class.from_pretrained(tmpdirname)
base_params = get_params(base_model.params)
for key in base_params_from_head.keys():
max_diff = (base_params[key] - base_params_from_head[key]).sum().item()
self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical")
@is_pt_flax_cross_test
def test_save_load_from_base_pt(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
base_class = FLAX_MODEL_MAPPING[config.__class__]
for model_class in self.all_model_classes:
if model_class == base_class:
continue
model = base_class(config)
base_params = get_params(model.params)
# convert Flax model to PyTorch model
pt_model_class = getattr(transformers, base_class.__name__[4:]) # Skip the "Flax" at the beginning
pt_model = pt_model_class(config).eval()
pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params)
# check that all base model weights are loaded correctly
with tempfile.TemporaryDirectory() as tmpdirname:
# save pt model
pt_model.save_pretrained(tmpdirname)
head_model = model_class.from_pretrained(tmpdirname, from_pt=True)
base_param_from_head = get_params(head_model.params, from_head_prefix=head_model.base_model_prefix)
for key in base_param_from_head.keys():
max_diff = (base_params[key] - base_param_from_head[key]).sum().item()
self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical")
@is_pt_flax_cross_test
def test_save_load_to_base_pt(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
base_class = FLAX_MODEL_MAPPING[config.__class__]
for model_class in self.all_model_classes:
if model_class == base_class:
continue
model = model_class(config)
base_params_from_head = get_params(model.params, from_head_prefix=model.base_model_prefix)
# convert Flax model to PyTorch model
pt_model_class = getattr(transformers, model_class.__name__[4:]) # Skip the "Flax" at the beginning
pt_model = pt_model_class(config).eval()
pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params)
# check that all base model weights are loaded correctly
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(tmpdirname)
base_model = base_class.from_pretrained(tmpdirname, from_pt=True)
base_params = get_params(base_model.params)
for key in base_params_from_head.keys():
max_diff = (base_params[key] - base_params_from_head[key]).sum().item()
self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical")
@is_pt_flax_cross_test
def test_save_load_bf16_to_base_pt(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
base_class = FLAX_MODEL_MAPPING[config.__class__]
for model_class in self.all_model_classes:
if model_class == base_class:
continue
model = model_class(config)
model.params = model.to_bf16(model.params)
base_params_from_head = get_params(model.params, from_head_prefix=model.base_model_prefix)
# convert Flax model to PyTorch model
pt_model_class = getattr(transformers, model_class.__name__[4:]) # Skip the "Flax" at the beginning
pt_model = pt_model_class(config).eval()
pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params)
# check that all base model weights are loaded correctly
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(tmpdirname)
base_model = base_class.from_pretrained(tmpdirname, from_pt=True)
base_params = get_params(base_model.params)
for key in base_params_from_head.keys():
max_diff = (base_params[key] - base_params_from_head[key]).sum().item()
self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical")
def test_jit_compilation(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
@jax.jit
def model_jitted(input_ids, attention_mask=None, **kwargs):
return model(input_ids=input_ids, attention_mask=attention_mask, **kwargs)
with self.subTest("JIT Enabled"):
jitted_outputs = model_jitted(**prepared_inputs_dict).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
outputs = model_jitted(**prepared_inputs_dict).to_tuple()
self.assertEqual(len(outputs), len(jitted_outputs))
for jitted_output, output in zip(jitted_outputs, outputs):
self.assertEqual(jitted_output.shape, output.shape)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
if model.config.is_encoder_decoder:
expected_arg_names = [
"input_ids",
"attention_mask",
"decoder_input_ids",
"decoder_attention_mask",
]
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
else:
expected_arg_names = ["input_ids", "attention_mask"]
self.assertListEqual(arg_names[:2], expected_arg_names)
def test_naming_convention(self):
for model_class in self.all_model_classes:
model_class_name = model_class.__name__
module_class_name = (
model_class_name[:-5] + "Module" if model_class_name[-5:] == "Model" else model_class_name + "Module"
)
bert_modeling_flax_module = __import__(model_class.__module__, fromlist=[module_class_name])
module_cls = getattr(bert_modeling_flax_module, module_class_name)
self.assertIsNotNone(module_cls)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
if hasattr(self.model_tester, "encoder_seq_length"):
seq_length = self.model_tester.encoder_seq_length
else:
seq_length = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)
if config.is_encoder_decoder:
hidden_states = outputs.decoder_hidden_states
self.assertIsInstance(hidden_states, (list, tuple))
self.assertEqual(len(hidden_states), expected_num_layers)
seq_len = getattr(self.model_tester, "seq_length", None)
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[decoder_seq_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_attention_outputs(self):
if not self.has_attentions:
self.skipTest(reason="Model does not output attentions")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
seq_length = getattr(self.model_tester, "seq_length", None)
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_length)
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_length)
decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
out_len = len(outputs)
if self.is_encoder_decoder:
correct_outlen = 5
# Question Answering model returns start_logits and end_logits
if model_class in get_values(FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING):
correct_outlen += 1 # start_logits and end_logits instead of only 1 output
self.assertEqual(out_len, correct_outlen)
# decoder attentions
decoder_attentions = outputs.decoder_attentions
self.assertIsInstance(decoder_attentions, (list, tuple))
self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],
)
# cross attentions
cross_attentions = outputs.cross_attentions
self.assertIsInstance(cross_attentions, (list, tuple))
self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(cross_attentions[0].shape[-3:]),
[
self.model_tester.num_attention_heads,
decoder_seq_length,
encoder_key_length,
],
)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if hasattr(self.model_tester, "num_hidden_states_types"):
added_hidden_states = self.model_tester.num_hidden_states_types
elif self.is_encoder_decoder:
added_hidden_states = 2
else:
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
def test_load_with_mismatched_shapes(self):
if not self.test_mismatched_shapes:
return
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
if model_class not in get_values(FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING):
continue
with self.subTest(msg=f"Testing {model_class}"):
with tempfile.TemporaryDirectory() as tmp_dir:
model = model_class(config)
model.save_pretrained(tmp_dir)
# Fails when we don't set ignore_mismatched_sizes=True
with self.assertRaises(ValueError):
new_model = FlaxAutoModelForSequenceClassification.from_pretrained(tmp_dir, num_labels=42)
with self.assertRaises(ValueError):
new_model_without_prefix = FlaxAutoModel.from_pretrained(tmp_dir, vocab_size=10)
logger = logging.get_logger("transformers.modeling_flax_utils")
with CaptureLogger(logger) as cl:
new_model = FlaxAutoModelForSequenceClassification.from_pretrained(
tmp_dir, num_labels=42, ignore_mismatched_sizes=True
)
self.assertIn("the shapes did not match", cl.out)
logits = new_model(**inputs_dict)["logits"]
self.assertEqual(logits.shape[1], 42)
with CaptureLogger(logger) as cl:
new_model_without_prefix = FlaxAutoModel.from_pretrained(
tmp_dir, vocab_size=10, ignore_mismatched_sizes=True
)
self.assertIn("the shapes did not match", cl.out)
input_ids = ids_tensor((2, 8), 10)
if self.is_encoder_decoder:
new_model_without_prefix(input_ids, decoder_input_ids=input_ids)
else:
new_model_without_prefix(input_ids)
def test_default_params_dtype(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# check if all params are still in float32 when dtype of computation is half-precision
model = model_class(config, dtype=jnp.float16)
types = jax.tree_util.tree_map(lambda x: x.dtype, model.params)
types = flatten_dict(types)
for name, type_ in types.items():
self.assertEquals(type_, jnp.float32, msg=f"param {name} is not initialized in fp32.")
def test_to_bf16(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
# cast all params to bf16
params = model.to_bf16(model.params)
types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params))
# test if all params are in bf16
for name, type_ in types.items():
self.assertEqual(type_, jnp.bfloat16, msg=f"param {name} is not in bf16.")
# test masking
flat_params = flatten_dict(params)
key = random.choice(list(flat_params.keys())) # choose a random param
mask = {path: path != key for path in flat_params} # don't cast the key
mask = unflatten_dict(mask)
params = model.to_bf16(model.params, mask)
types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params))
# test if all params are in bf16 except key
for name, type_ in types.items():
if name == key:
self.assertEqual(type_, jnp.float32, msg=f"param {name} should be in fp32.")
else:
self.assertEqual(type_, jnp.bfloat16, msg=f"param {name} is not in bf16.")
def test_to_fp16(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
# cast all params to fp16
params = model.to_fp16(model.params)
types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params))
# test if all params are in fp16
for name, type_ in types.items():
self.assertEqual(type_, jnp.float16, msg=f"param {name} is not in fp16.")
# test masking
flat_params = flatten_dict(params)
key = random.choice(list(flat_params.keys())) # choose a random param
mask = {path: path != key for path in flat_params} # don't cast the key
mask = unflatten_dict(mask)
params = model.to_fp16(model.params, mask)
types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params))
# test if all params are in fp16 except key
for name, type_ in types.items():
if name == key:
self.assertEqual(type_, jnp.float32, msg=f"param {name} should be in fp32.")
else:
self.assertEqual(type_, jnp.float16, msg=f"param {name} is not in fp16.")
def test_to_fp32(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
# cast all params to fp16 and back to fp32
params = model.to_fp16(model.params)
params = model.to_fp32(params)
# test if all params are in fp32
types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params))
for name, type_ in types.items():
self.assertEqual(type_, jnp.float32, msg=f"param {name} is not in fp32.")
# test masking
flat_params = flatten_dict(params)
key = random.choice(list(flat_params.keys())) # choose a random param
mask = {path: path != key for path in flat_params} # don't cast the key
mask = unflatten_dict(mask)
# cast to fp16 and back to fp32 with mask
params = model.to_fp16(model.params)
params = model.to_fp32(params, mask)
# test if all params are in fp32 except key
types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params))
for name, type_ in types.items():
if name == key:
self.assertEqual(type_, jnp.float16, msg=f"param {name} should be in fp16.")
else:
self.assertEqual(type_, jnp.float32, msg=f"param {name} is not in fp32.")
def test_save_load_in_fp16(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
# convert weights to fp16 and save
params = model.to_fp16(model.params)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, params=params)
# load the weights again and check if they are still in fp16
model = model_class.from_pretrained(tmpdirname)
types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, model.params))
for name, type_ in types.items():
self.assertEqual(type_, jnp.float16, msg=f"param {name} is not in fp16.")
def test_save_load_in_bf16(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
# convert weights to bf16 and save
params = model.to_bf16(model.params)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, params=params)
# load the weights again and check if they are still in fp16
model = model_class.from_pretrained(tmpdirname)
types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, model.params))
for name, type_ in types.items():
self.assertEqual(type_, jnp.bfloat16, msg=f"param {name} is not in bf16.")
def test_model_main_input_name(self):
for model_class in self.all_model_classes:
model_signature = inspect.signature(getattr(model_class, "__call__"))
# The main input is the name of the argument after `self`
observed_main_input_name = list(model_signature.parameters.keys())[1]
self.assertEqual(model_class.main_input_name, observed_main_input_name)
def test_headmasking(self):
if not self.test_head_masking:
return
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
def _prepare_layer_head_mask(i, attention_heads, num_hidden_layers):
if i == 0:
return np.concatenate([np.zeros(1, dtype=jnp.int32), np.ones(attention_heads - 1, dtype=jnp.int32)])
if i == num_hidden_layers - 1:
return np.concatenate([np.zeros(attention_heads - 1, dtype=jnp.int32), np.ones(1, dtype=jnp.int32)])
return np.ones(attention_heads, dtype=jnp.int32)
for model_class in self.all_model_classes:
model = model_class(config)
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
inputs = self._prepare_for_class(inputs_dict, model_class).copy()
# Prepare head mask
inputs["head_mask"] = np.stack(
[
_prepare_layer_head_mask(i, config.num_attention_heads, config.num_hidden_layers)
for i in range(config.num_hidden_layers)
]
)
outputs = model(**inputs)
def _check_attentions_validity(attentions):
# Remove NaN
for t in attentions:
# Check we don't have more than 25% nans (arbitrary)
self.assertLess(np.isnan(t).sum(), t.size / 4)
attentions = [np.where(np.isnan(t), 0.0, t) for t in attentions]
self.assertAlmostEqual(attentions[0][..., 0, :, :].sum(), 0.0)
self.assertNotEqual(attentions[0][..., -1, :, :].sum(), 0.0)
if len(attentions) > 2: # encoder-decodere models have only 2 layers in each modules
self.assertNotEqual(attentions[1][..., 0, :, :].sum(), 0.0)
self.assertAlmostEqual(attentions[-1][..., -2, :, :].sum(), 0.0)
self.assertNotEqual(attentions[-1][..., -1, :, :].sum(), 0.0)
if model.config.is_encoder_decoder:
raise NotImplementedError("The test has not been implemented for encoder-decoder models yet.")
else:
_check_attentions_validity(outputs.attentions)
def test_no_automatic_init(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
for model_class in self.all_model_classes:
model = model_class(config, _do_init=False)
# Check that accesing parmas raises an ValueError when _do_init is False
with self.assertRaises(ValueError):
params = model.params
# Check if we params can be properly initialized when calling init_weights
params = model.init_weights(model.key, model.input_shape)
self.assertIsInstance(params, FrozenDict)
# Check if all required parmas are initialized
keys = set(flatten_dict(unfreeze(params)).keys())
self.assertTrue(all(k in keys for k in model.required_params))
# Check if the shapes match
flat_params = flatten_dict(unfreeze(params))
for k, v in flatten_dict(unfreeze(model.params_shape_tree)).items():
self.assertEqual(
v.shape,
flat_params[k].shape,
"Shapes of {} do not match. Expecting {}, got {}.".format(k, v.shape, flat_params[k].shape),
)
# Check that setting params raises an ValueError when _do_init is False
with self.assertRaises(ValueError):
model.params = params
# Check if we can do a forward pass
inputs_dict["output_hidden_states"] = True
inputs = self._prepare_for_class(inputs_dict, model_class).copy()
model(**inputs, params=params)
def test_from_pretrained_with_no_automatic_init(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
def _assert_all_params_initialised(model, params):
# Check if all required parmas are loaded
keys = set(flatten_dict(unfreeze(params)).keys())
self.assertTrue(all(k in keys for k in model.required_params))
# Check if the shapes match
flat_params = flatten_dict(unfreeze(params))
for k, v in flatten_dict(unfreeze(model.params_shape_tree)).items():
self.assertEqual(
v.shape,
flat_params[k].shape,
"Shapes of {} do not match. Expecting {}, got {}.".format(k, v.shape, flat_params[k].shape),
)
for model_class in self.all_model_classes:
# init the model
model = model_class(config)
# save the model in the temporary directory
# load the saved model with _do_init=False
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model, params = model_class.from_pretrained(tmpdirname, _do_init=False)
# Check that accesing parmas raises an ValueError when _do_init is False
with self.assertRaises(ValueError):
params = model.params
# Check if all required parmas are loaded
_assert_all_params_initialised(model, params)
# Check that setting params raises an ValueError when _do_init is False
with self.assertRaises(ValueError):
model.params = params
# Check if init_weights initializes missing keys from from_pretrained
flat_params = flatten_dict(unfreeze(params))
random_key = random.choice(list(flat_params.keys()))
flat_params.pop(random_key)
params = freeze(unflatten_dict(flat_params))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, params=params)
model, params = model_class.from_pretrained(tmpdirname, _do_init=False)
params = model.init_weights(model.key, model.input_shape, params=params)
# Check if all required parmas are loaded
_assert_all_params_initialised(model, params)
def test_checkpoint_sharding_from_hub(self):
model = FlaxBertModel.from_pretrained("ArthurZ/flax-tiny-random-bert-sharded")
# the model above is the same as the model below, just a sharded version.
ref_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only")
for p1, p2 in zip(flatten_dict(model.params).values(), flatten_dict(ref_model.params).values()):
assert np.allclose(np.array(p1), np.array(p2))
def test_checkpoint_sharding_local(self):
model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only")
with tempfile.TemporaryDirectory() as tmp_dir:
# We use the same folder for various sizes to make sure a new save erases the old checkpoint.
for max_size in ["150kB", "150kiB", "200kB", "200kiB"]:
model.save_pretrained(tmp_dir, max_shard_size=max_size)
# Get each shard file and its size
shard_to_size = {}
for shard in os.listdir(tmp_dir):
if shard.endswith(".msgpack"):
shard_file = os.path.join(tmp_dir, shard)
shard_to_size[shard_file] = os.path.getsize(shard_file)
index_file = os.path.join(tmp_dir, FLAX_WEIGHTS_INDEX_NAME)
# Check there is an index but no regular weight file
self.assertTrue(os.path.isfile(index_file))
self.assertFalse(os.path.isfile(os.path.join(tmp_dir, FLAX_WEIGHTS_NAME)))
# Check a file is bigger than max_size only when it has a single weight
for shard_file, size in shard_to_size.items():
if max_size.endswith("kiB"):
max_size_int = int(max_size[:-3]) * 2**10
else:
max_size_int = int(max_size[:-2]) * 10**3
# Note: pickle adds some junk so the weight of the file can end up being slightly bigger than
# the size asked for (since we count parameters)
if size >= max_size_int + 50000:
with open(shard_file, "rb") as state_f:
state_file = from_bytes(FlaxBertModel, state_f.read())
self.assertEqual(len(state_file), 1)
# Check the index and the shard files found match
with open(index_file, "r", encoding="utf-8") as f:
index = json.loads(f.read())
all_shards = set(index["weight_map"].values())
shards_found = {f for f in os.listdir(tmp_dir) if f.endswith(".msgpack")}
self.assertSetEqual(all_shards, shards_found)
# Finally, check the model can be reloaded
new_model = FlaxBertModel.from_pretrained(tmp_dir)
for p1, p2 in zip(flatten_dict(model.params).values(), flatten_dict(new_model.params).values()):
self.assertTrue(np.allclose(np.array(p1), np.array(p2)))
@is_pt_flax_cross_test
def test_from_sharded_pt(self):
model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded", from_pt=True)
ref_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-random-bert-fx-only")
for key, ref_val in flatten_dict(ref_model.params).items():
val = flatten_dict(model.params)[key]
assert np.allclose(np.array(val), np.array(ref_val))
def test_gradient_checkpointing(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# prepare inputs
prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
remat_model = model_class(config)
try:
remat_model.enable_gradient_checkpointing()
except NotImplementedError:
continue
outputs = model(**prepared_inputs_dict)
remat_outputs = remat_model(**prepared_inputs_dict)
# ensure that the dicts of outputs contain the same keys
self.assertEqual(outputs.keys(), remat_outputs.keys())
outputs = outputs.to_tuple()
remat_outputs = remat_outputs.to_tuple()
# ensure that the outputs remain precisely equal
for output, remat_output in zip(outputs, remat_outputs):
self.assertTrue((output == remat_output).all())
| 0 |
hf_public_repos/transformers | hf_public_repos/transformers/tests/test_feature_extraction_utils.py | # coding=utf-8
# Copyright 2021 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, Wav2Vec2FeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR = get_tests_dir("fixtures")
class FeatureExtractorUtilTester(unittest.TestCase):
def test_cached_files_are_used_when_internet_is_down(self):
# A mock response for an HTTP head request to emulate server down
response_mock = mock.Mock()
response_mock.status_code = 500
response_mock.headers = {}
response_mock.raise_for_status.side_effect = HTTPError
response_mock.json.return_value = {}
# Download this model to make sure it's in the cache.
_ = Wav2Vec2FeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2")
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request", return_value=response_mock) as mock_head:
_ = Wav2Vec2FeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2")
# This check we did call the fake head request
mock_head.assert_called()
def test_legacy_load_from_url(self):
# This test is for deprecated behavior and can be removed in v5
_ = Wav2Vec2FeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json"
)
@is_staging_test
class FeatureExtractorPushToHubTester(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._token = TOKEN
HfFolder.save_token(TOKEN)
@classmethod
def tearDownClass(cls):
try:
delete_repo(token=cls._token, repo_id="test-feature-extractor")
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="valid_org/test-feature-extractor-org")
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="test-dynamic-feature-extractor")
except HTTPError:
pass
def test_push_to_hub(self):
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR)
feature_extractor.push_to_hub("test-feature-extractor", token=self._token)
new_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor")
for k, v in feature_extractor.__dict__.items():
self.assertEqual(v, getattr(new_feature_extractor, k))
# Reset repo
delete_repo(token=self._token, repo_id="test-feature-extractor")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
tmp_dir, repo_id="test-feature-extractor", push_to_hub=True, token=self._token
)
new_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor")
for k, v in feature_extractor.__dict__.items():
self.assertEqual(v, getattr(new_feature_extractor, k))
def test_push_to_hub_in_organization(self):
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR)
feature_extractor.push_to_hub("valid_org/test-feature-extractor", token=self._token)
new_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("valid_org/test-feature-extractor")
for k, v in feature_extractor.__dict__.items():
self.assertEqual(v, getattr(new_feature_extractor, k))
# Reset repo
delete_repo(token=self._token, repo_id="valid_org/test-feature-extractor")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
tmp_dir, repo_id="valid_org/test-feature-extractor-org", push_to_hub=True, token=self._token
)
new_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org")
for k, v in feature_extractor.__dict__.items():
self.assertEqual(v, getattr(new_feature_extractor, k))
def test_push_to_hub_dynamic_feature_extractor(self):
CustomFeatureExtractor.register_for_auto_class()
feature_extractor = CustomFeatureExtractor.from_pretrained(SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR)
feature_extractor.push_to_hub("test-dynamic-feature-extractor", token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map,
{"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"},
)
new_feature_extractor = AutoFeatureExtractor.from_pretrained(
f"{USER}/test-dynamic-feature-extractor", trust_remote_code=True
)
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__, "CustomFeatureExtractor")
| 0 |
hf_public_repos/transformers | hf_public_repos/transformers/tests/test_tokenization_utils.py | # coding=utf-8
# Copyright 2019 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPT2TokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class TokenizerUtilTester(unittest.TestCase):
def test_cached_files_are_used_when_internet_is_down(self):
# A mock response for an HTTP head request to emulate server down
response_mock = mock.Mock()
response_mock.status_code = 500
response_mock.headers = {}
response_mock.raise_for_status.side_effect = HTTPError
response_mock.json.return_value = {}
# Download this model to make sure it's in the cache.
_ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request", return_value=response_mock) as mock_head:
_ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def test_cached_files_are_used_when_internet_is_down_missing_files(self):
# A mock response for an HTTP head request to emulate server down
response_mock = mock.Mock()
response_mock.status_code = 500
response_mock.headers = {}
response_mock.raise_for_status.side_effect = HTTPError
response_mock.json.return_value = {}
# Download this model to make sure it's in the cache.
_ = GPT2TokenizerFast.from_pretrained("gpt2")
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request", return_value=response_mock) as mock_head:
_ = GPT2TokenizerFast.from_pretrained("gpt2")
# This check we did call the fake head request
mock_head.assert_called()
def test_legacy_load_from_one_file(self):
# This test is for deprecated behavior and can be removed in v5
try:
tmp_file = tempfile.mktemp()
with open(tmp_file, "wb") as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model", f)
_ = AlbertTokenizer.from_pretrained(tmp_file)
finally:
os.remove(tmp_file)
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json"):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json", "wb") as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json", f)
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size, 1000)
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json")
def test_legacy_load_from_url(self):
# This test is for deprecated behavior and can be removed in v5
_ = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model")
@is_staging_test
class TokenizerPushToHubTester(unittest.TestCase):
vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def setUpClass(cls):
cls._token = TOKEN
HfFolder.save_token(TOKEN)
@classmethod
def tearDownClass(cls):
try:
delete_repo(token=cls._token, repo_id="test-tokenizer")
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="valid_org/test-tokenizer-org")
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="test-dynamic-tokenizer")
except HTTPError:
pass
def test_push_to_hub(self):
with tempfile.TemporaryDirectory() as tmp_dir:
vocab_file = os.path.join(tmp_dir, "vocab.txt")
with open(vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens]))
tokenizer = BertTokenizer(vocab_file)
tokenizer.push_to_hub("test-tokenizer", token=self._token)
new_tokenizer = BertTokenizer.from_pretrained(f"{USER}/test-tokenizer")
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab)
# Reset repo
delete_repo(token=self._token, repo_id="test-tokenizer")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(tmp_dir, repo_id="test-tokenizer", push_to_hub=True, token=self._token)
new_tokenizer = BertTokenizer.from_pretrained(f"{USER}/test-tokenizer")
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab)
def test_push_to_hub_in_organization(self):
with tempfile.TemporaryDirectory() as tmp_dir:
vocab_file = os.path.join(tmp_dir, "vocab.txt")
with open(vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens]))
tokenizer = BertTokenizer(vocab_file)
tokenizer.push_to_hub("valid_org/test-tokenizer-org", token=self._token)
new_tokenizer = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org")
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab)
# Reset repo
delete_repo(token=self._token, repo_id="valid_org/test-tokenizer-org")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
tmp_dir, repo_id="valid_org/test-tokenizer-org", push_to_hub=True, token=self._token
)
new_tokenizer = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org")
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab)
@require_tokenizers
def test_push_to_hub_dynamic_tokenizer(self):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
vocab_file = os.path.join(tmp_dir, "vocab.txt")
with open(vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens]))
tokenizer = CustomTokenizer(vocab_file)
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer", token=self._token)
tokenizer = AutoTokenizer.from_pretrained(f"{USER}/test-dynamic-tokenizer", trust_remote_code=True)
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizer")
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
vocab_file = os.path.join(tmp_dir, "vocab.txt")
with open(vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens]))
bert_tokenizer = BertTokenizerFast.from_pretrained(tmp_dir)
bert_tokenizer.save_pretrained(tmp_dir)
tokenizer = CustomTokenizerFast.from_pretrained(tmp_dir)
tokenizer.push_to_hub("test-dynamic-tokenizer", token=self._token)
tokenizer = AutoTokenizer.from_pretrained(f"{USER}/test-dynamic-tokenizer", trust_remote_code=True)
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizerFast")
tokenizer = AutoTokenizer.from_pretrained(
f"{USER}/test-dynamic-tokenizer", use_fast=False, trust_remote_code=True
)
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizer")
class TrieTest(unittest.TestCase):
def test_trie(self):
trie = Trie()
trie.add("Hello 友達")
self.assertEqual(trie.data, {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}})
trie.add("Hello")
trie.data
self.assertEqual(trie.data, {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}})
def test_trie_split(self):
trie = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100"), ["[CLS] This is a extra_id_100"])
trie.add("[CLS]")
trie.add("extra_id_1")
trie.add("extra_id_100")
self.assertEqual(trie.split("[CLS] This is a extra_id_100"), ["[CLS]", " This is a ", "extra_id_100"])
def test_trie_single(self):
trie = Trie()
trie.add("A")
self.assertEqual(trie.split("ABC"), ["A", "BC"])
self.assertEqual(trie.split("BCA"), ["BC", "A"])
def test_trie_final(self):
trie = Trie()
trie.add("TOKEN]")
trie.add("[SPECIAL_TOKEN]")
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]"), ["This is something ", "[SPECIAL_TOKEN]"])
def test_trie_subtokens(self):
trie = Trie()
trie.add("A")
trie.add("P")
trie.add("[SPECIAL_TOKEN]")
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]"), ["This is something ", "[SPECIAL_TOKEN]"])
def test_trie_suffix_tokens(self):
trie = Trie()
trie.add("AB")
trie.add("B")
trie.add("C")
self.assertEqual(trie.split("ABC"), ["AB", "C"])
def test_trie_skip(self):
trie = Trie()
trie.add("ABC")
trie.add("B")
trie.add("CD")
self.assertEqual(trie.split("ABCD"), ["ABC", "D"])
def test_cut_text_hardening(self):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
trie = Trie()
parts = trie.cut_text("ABC", [0, 0, 2, 1, 2, 3])
self.assertEqual(parts, ["AB", "C"])
| 0 |
hf_public_repos/transformers | hf_public_repos/transformers/tests/test_cache_utils.py | # coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import set_seed
from transformers.testing_utils import is_torch_available, require_auto_gptq, require_torch, require_torch_gpu, slow
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, DynamicCache, LlamaForCausalLM, SinkCache
@require_torch
class CacheTest(unittest.TestCase):
def test_cache_equivalence(self):
"""Tests that we can convert back and forth between the legacy cache format and DynamicCache"""
legacy_cache = ()
new_cache = DynamicCache()
# Creates a new cache with 10 layers in both formats
for layer_idx in range(10):
new_key = torch.rand((2, 4, 8, 16))
new_value = torch.rand((2, 4, 8, 16))
new_cache.update(new_key, new_value, layer_idx)
legacy_cache += ((new_key, new_value),)
# Sanity check 1: they must have the same shapes
self.assertTrue(len(legacy_cache), len(new_cache))
for layer_idx in range(10):
self.assertTrue(len(legacy_cache[layer_idx]), len(legacy_cache[layer_idx]))
for key_value_idx in range(2):
self.assertTrue(
legacy_cache[layer_idx][key_value_idx].shape == new_cache[layer_idx][key_value_idx].shape
)
# Sanity check 2: we can get the sequence length in multiple ways with DynamicCache, and they return the
# expected value
self.assertTrue(legacy_cache[0][0].shape[-2] == new_cache[0][0].shape[-2] == new_cache.get_seq_length() == 8)
# Sanity check 3: they must be equal, and both support indexing
for layer_idx in range(10):
for key_value_idx in range(2):
self.assertTrue(
torch.allclose(new_cache[layer_idx][key_value_idx], legacy_cache[layer_idx][key_value_idx])
)
# Test 1: We can convert from legacy to new with no changes
from_legacy = DynamicCache.from_legacy_cache(legacy_cache)
for layer_idx in range(10):
for key_value_idx in range(2):
self.assertTrue(
torch.allclose(from_legacy[layer_idx][key_value_idx], legacy_cache[layer_idx][key_value_idx])
)
# Test 2: We can convert from new to legacy with no changes
to_legacy = new_cache.to_legacy_cache()
for layer_idx in range(10):
for key_value_idx in range(2):
self.assertTrue(
torch.allclose(to_legacy[layer_idx][key_value_idx], new_cache[layer_idx][key_value_idx])
)
def test_reorder_cache_retrocompatibility(self):
"""Tests that Cache.reorder_cache is retrocompatible with the legacy code path"""
legacy_reorder_fn = LlamaForCausalLM._reorder_cache # An example of a legacy `_reorder_cache` function
legacy_cache = ()
new_cache = DynamicCache()
# Creates a new cache with 10 layers in both formats
for layer_idx in range(10):
new_key = torch.rand((4, 4, 8, 16))
new_value = torch.rand((4, 4, 8, 16))
new_cache.update(new_key, new_value, layer_idx)
legacy_cache += ((new_key, new_value),)
# Let's create some dummy beam indices. From the shape above, it is equivalent to the case where num_beams=4
# and batch_size=1
beam_idx = torch.randint(low=0, high=4, size=(4,))
legacy_cache_reordered = legacy_reorder_fn(legacy_cache, beam_idx)
new_cache.reorder_cache(beam_idx)
# Let's check that the results are the same
for layer_idx in range(10):
for key_value_idx in range(2):
self.assertTrue(
torch.allclose(
new_cache[layer_idx][key_value_idx], legacy_cache_reordered[layer_idx][key_value_idx]
)
)
@require_torch_gpu
@slow
class CacheIntegrationTest(unittest.TestCase):
def test_dynamic_cache_hard(self):
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf", padding_side="left")
model = AutoModelForCausalLM.from_pretrained(
"meta-llama/Llama-2-7b-hf", device_map="auto", torch_dtype=torch.float16
)
inputs = tokenizer(["Here's everything I know about cats. Cats"], return_tensors="pt").to(model.device)
# DynamicCache and the legacy cache format should be equivalent
set_seed(0)
gen_out_legacy = model.generate(**inputs, do_sample=True, max_new_tokens=256)
set_seed(0)
gen_out = model.generate(**inputs, do_sample=True, max_new_tokens=256, past_key_values=DynamicCache())
self.assertListEqual(gen_out_legacy.tolist(), gen_out.tolist())
decoded = tokenizer.batch_decode(gen_out, skip_special_tokens=True)
expected_text = (
"Here's everything I know about cats. Cats are mysterious creatures. They can't talk, and they don't like "
"to be held. They don't play fetch, and they don't like to be hugged. But they do like to be petted.\n"
"Cats are also very independent. They don't like to be told what to do, and they don't like to be told "
"what to eat. They are also very territorial. They don't like to share their food or their toys.\nCats "
"are also very curious. They like to explore, and they like to play. They are also very fast. They can "
"run very fast, and they can jump very high.\nCats are also very smart. They can learn tricks, and they "
"can solve problems. They are also very playful. They like to play with toys, and they like to play with "
"other cats.\nCats are also very affectionate. They like to be petted, and they like to be held. They "
"also like to be scratched.\nCats are also very clean. They like to groom themselves, and they like to "
"clean their litter box.\nCats are also very independent. They don't"
)
self.assertEqual(decoded[0], expected_text)
def test_dynamic_cache_batched(self):
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf", padding_side="left")
tokenizer.pad_token = tokenizer.eos_token
model = AutoModelForCausalLM.from_pretrained(
"meta-llama/Llama-2-7b-hf", device_map="auto", torch_dtype=torch.float16
)
inputs = tokenizer(["A sequence: 1, 2, 3, 4, 5", "A sequence: A, B, C"], padding=True, return_tensors="pt").to(
model.device
)
gen_out = model.generate(**inputs, do_sample=False, max_new_tokens=10, past_key_values=DynamicCache())
decoded = tokenizer.batch_decode(gen_out, skip_special_tokens=True)
expected_text = ["A sequence: 1, 2, 3, 4, 5, 6, 7, 8,", "A sequence: A, B, C, D, E, F, G, H"]
self.assertListEqual(decoded, expected_text)
def test_dynamic_cache_beam_search(self):
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf", padding_side="left")
model = AutoModelForCausalLM.from_pretrained(
"meta-llama/Llama-2-7b-hf", device_map="auto", torch_dtype=torch.float16
)
inputs = tokenizer(["The best color is"], return_tensors="pt").to(model.device)
gen_out = model.generate(
**inputs,
do_sample=False,
max_new_tokens=20,
num_beams=2,
num_return_sequences=2,
)
decoded = tokenizer.batch_decode(gen_out, skip_special_tokens=True)
expected_text = [
"The best color is the one that makes you feel good.\nThe best color is the one that makes you feel good",
"The best color is the one that suits you.\nThe best color is the one that suits you. The",
]
self.assertListEqual(decoded, expected_text)
@require_auto_gptq
def test_sink_cache_hard(self):
tokenizer = AutoTokenizer.from_pretrained("TheBloke/LLaMa-7B-GPTQ")
model = AutoModelForCausalLM.from_pretrained("TheBloke/LLaMa-7B-GPTQ", device_map="auto")
inputs = tokenizer(["Vaswani et al. (2017) introduced the Transformers"], return_tensors="pt").to(model.device)
# Set up the SinkCache. Using a small window length to contain computational complexity. If this example is run
# without a SinkCache, the last few tokens are gibberish (ends in "of the of the of a of a of")
cache = SinkCache(window_length=508, num_sink_tokens=4)
gen_out = model.generate(**inputs, do_sample=False, max_new_tokens=3000, past_key_values=cache)
decoded = tokenizer.batch_decode(gen_out, skip_special_tokens=True)
self.assertTrue(decoded[0].endswith("to perform a variety of tasks. The Transformer is a neural network"))
def test_sink_cache_iterative_prompts(self):
"""Tests that SinkCache supports more than one new token at once, when shifting the cache"""
tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta")
model = AutoModelForCausalLM.from_pretrained(
"HuggingFaceH4/zephyr-7b-beta", device_map="auto", torch_dtype=torch.float16
)
prompt = (
"Compose an engaging travel blog post about a recent trip to Hawaii, highlighting cultural experiences "
"and must-see attractions."
)
# Prepare generation settings
cache = SinkCache(window_length=256, num_sink_tokens=4)
input_ids = torch.tensor([], device=model.device, dtype=torch.int)
for _ in range(3):
# Tokenize the prompt with the correct chat template
chat = [{"role": "user", "content": prompt}]
tokenized_chat = tokenizer.apply_chat_template(chat, return_tensors="pt", add_generation_prompt=True).to(
model.device
)
input_ids = torch.cat((input_ids, tokenized_chat), dim=1)
# Perform the generation
gen_out = model.generate(
input_ids, do_sample=False, max_new_tokens=100, past_key_values=cache, use_cache=True
)
input_ids = gen_out
# We went well beyond the cache length
self.assertTrue(input_ids.shape[1] > cache.get_max_length() * 1.5)
# And it still produces a coherent english
decoded = tokenizer.batch_decode(input_ids, skip_special_tokens=True)
last_output = (
"<|assistant|>\nAs the sun began to set over the Pacific Ocean, I found myself standing on the shores of "
"Waikiki Beach, my heart filled with awe and wonder. I had just returned from a two-week journey to the "
"beautiful island of Hawaii, and it had been an unforgettable experience filled with cultural experiences "
"and must-see attractions that left me breathless.\n\nOne of the most memorable experiences of my trip "
"was visiting the historic district of Honolulu. Here,"
)
self.assertTrue(decoded[0].endswith(last_output))
| 0 |
hf_public_repos/transformers | hf_public_repos/transformers/tests/test_sequence_feature_extraction_common.py | # coding=utf-8
# Copyright 2021 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class SequenceFeatureExtractionTestMixin(FeatureExtractionSavingTestMixin):
# to overwrite at feature extractactor specific tests
feat_extract_tester = None
feature_extraction_class = None
@property
def feat_extract_dict(self):
return self.feat_extract_tester.prepare_feat_extract_dict()
def test_feat_extract_common_properties(self):
feat_extract = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(feat_extract, "feature_size"))
self.assertTrue(hasattr(feat_extract, "sampling_rate"))
self.assertTrue(hasattr(feat_extract, "padding_value"))
def test_batch_feature(self):
speech_inputs = self.feat_extract_tester.prepare_inputs_for_common()
feat_extract = self.feature_extraction_class(**self.feat_extract_dict)
input_name = feat_extract.model_input_names[0]
processed_features = BatchFeature({input_name: speech_inputs})
self.assertTrue(all(len(x) == len(y) for x, y in zip(speech_inputs, processed_features[input_name])))
speech_inputs = self.feat_extract_tester.prepare_inputs_for_common(equal_length=True)
processed_features = BatchFeature({input_name: speech_inputs}, tensor_type="np")
batch_features_input = processed_features[input_name]
if len(batch_features_input.shape) < 3:
batch_features_input = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size)
)
@require_torch
def test_batch_feature_pt(self):
speech_inputs = self.feat_extract_tester.prepare_inputs_for_common(equal_length=True)
feat_extract = self.feature_extraction_class(**self.feat_extract_dict)
input_name = feat_extract.model_input_names[0]
processed_features = BatchFeature({input_name: speech_inputs}, tensor_type="pt")
batch_features_input = processed_features[input_name]
if len(batch_features_input.shape) < 3:
batch_features_input = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size)
)
@require_tf
def test_batch_feature_tf(self):
speech_inputs = self.feat_extract_tester.prepare_inputs_for_common(equal_length=True)
feat_extract = self.feature_extraction_class(**self.feat_extract_dict)
input_name = feat_extract.model_input_names[0]
processed_features = BatchFeature({input_name: speech_inputs}, tensor_type="tf")
batch_features_input = processed_features[input_name]
if len(batch_features_input.shape) < 3:
batch_features_input = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size)
)
def _check_padding(self, numpify=False):
def _inputs_have_equal_length(input):
length = len(input[0])
for input_slice in input[1:]:
if len(input_slice) != length:
return False
return True
def _inputs_are_equal(input_1, input_2):
if len(input_1) != len(input_2):
return False
for input_slice_1, input_slice_2 in zip(input_1, input_2):
if not np.allclose(np.asarray(input_slice_1), np.asarray(input_slice_2), atol=1e-3):
return False
return True
feat_extract = self.feature_extraction_class(**self.feat_extract_dict)
speech_inputs = self.feat_extract_tester.prepare_inputs_for_common(numpify=numpify)
input_name = feat_extract.model_input_names[0]
processed_features = BatchFeature({input_name: speech_inputs})
pad_diff = self.feat_extract_tester.seq_length_diff
pad_max_length = self.feat_extract_tester.max_seq_length + pad_diff
pad_min_length = self.feat_extract_tester.min_seq_length
batch_size = self.feat_extract_tester.batch_size
feature_size = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
input_1 = feat_extract.pad(processed_features, padding=False)
input_1 = input_1[input_name]
input_2 = feat_extract.pad(processed_features, padding="longest")
input_2 = input_2[input_name]
input_3 = feat_extract.pad(processed_features, padding="max_length", max_length=len(speech_inputs[-1]))
input_3 = input_3[input_name]
input_4 = feat_extract.pad(processed_features, padding="longest", return_tensors="np")
input_4 = input_4[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(ValueError):
feat_extract.pad(processed_features, padding="max_length")[input_name]
input_5 = feat_extract.pad(
processed_features, padding="max_length", max_length=pad_max_length, return_tensors="np"
)
input_5 = input_5[input_name]
self.assertFalse(_inputs_have_equal_length(input_1))
self.assertTrue(_inputs_have_equal_length(input_2))
self.assertTrue(_inputs_have_equal_length(input_3))
self.assertTrue(_inputs_are_equal(input_2, input_3))
self.assertTrue(len(input_1[0]) == pad_min_length)
self.assertTrue(len(input_1[1]) == pad_min_length + pad_diff)
self.assertTrue(input_4.shape[:2] == (batch_size, len(input_3[0])))
self.assertTrue(input_5.shape[:2] == (batch_size, pad_max_length))
if feature_size > 1:
self.assertTrue(input_4.shape[2] == input_5.shape[2] == feature_size)
# test padding for `pad_to_multiple_of` for List[int] + numpy
input_6 = feat_extract.pad(processed_features, pad_to_multiple_of=10)
input_6 = input_6[input_name]
input_7 = feat_extract.pad(processed_features, padding="longest", pad_to_multiple_of=10)
input_7 = input_7[input_name]
input_8 = feat_extract.pad(
processed_features, padding="max_length", pad_to_multiple_of=10, max_length=pad_max_length
)
input_8 = input_8[input_name]
input_9 = feat_extract.pad(
processed_features,
padding="max_length",
pad_to_multiple_of=10,
max_length=pad_max_length,
return_tensors="np",
)
input_9 = input_9[input_name]
self.assertTrue(all(len(x) % 10 == 0 for x in input_6))
self.assertTrue(_inputs_are_equal(input_6, input_7))
expected_mult_pad_length = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(x) == expected_mult_pad_length for x in input_8))
self.assertEqual(input_9.shape[:2], (batch_size, expected_mult_pad_length))
if feature_size > 1:
self.assertTrue(input_9.shape[2] == feature_size)
# Check padding value is correct
padding_vector_sum = (np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_2[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length))
< 1e-3
)
self.assertTrue(
abs(
np.asarray(input_2[1])[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff)
)
< 1e-3
)
self.assertTrue(
abs(
np.asarray(input_2[2])[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff)
)
< 1e-3
)
self.assertTrue(
abs(input_5[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1e-3
)
self.assertTrue(
abs(input_9[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length))
< 1e-3
)
def _check_truncation(self, numpify=False):
def _inputs_have_equal_length(input):
length = len(input[0])
for input_slice in input[1:]:
if len(input_slice) != length:
return False
return True
def _inputs_are_equal(input_1, input_2):
if len(input_1) != len(input_2):
return False
for input_slice_1, input_slice_2 in zip(input_1, input_2):
if not np.allclose(np.asarray(input_slice_1), np.asarray(input_slice_2), atol=1e-3):
return False
return True
feat_extract = self.feature_extraction_class(**self.feat_extract_dict)
speech_inputs = self.feat_extract_tester.prepare_inputs_for_common(numpify=numpify)
input_name = feat_extract.model_input_names[0]
processed_features = BatchFeature({input_name: speech_inputs})
# truncate to smallest
input_1 = feat_extract.pad(
processed_features, padding="max_length", max_length=len(speech_inputs[0]), truncation=True
)
input_1 = input_1[input_name]
input_2 = feat_extract.pad(processed_features, padding="max_length", max_length=len(speech_inputs[0]))
input_2 = input_2[input_name]
self.assertTrue(_inputs_have_equal_length(input_1))
self.assertFalse(_inputs_have_equal_length(input_2))
# truncate to smallest with np
input_3 = feat_extract.pad(
processed_features,
padding="max_length",
max_length=len(speech_inputs[0]),
return_tensors="np",
truncation=True,
)
input_3 = input_3[input_name]
input_4 = feat_extract.pad(
processed_features, padding="max_length", max_length=len(speech_inputs[0]), return_tensors="np"
)
input_4 = input_4[input_name]
self.assertTrue(_inputs_have_equal_length(input_3))
self.assertTrue(input_3.shape[1] == len(speech_inputs[0]))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(input_4))
# truncate to middle
input_5 = feat_extract.pad(
processed_features,
padding="max_length",
max_length=len(speech_inputs[1]),
truncation=True,
return_tensors="np",
)
input_5 = input_5[input_name]
input_6 = feat_extract.pad(
processed_features, padding="max_length", max_length=len(speech_inputs[1]), truncation=True
)
input_6 = input_6[input_name]
input_7 = feat_extract.pad(
processed_features, padding="max_length", max_length=len(speech_inputs[1]), return_tensors="np"
)
input_7 = input_7[input_name]
self.assertTrue(input_5.shape[1] == len(speech_inputs[1]))
self.assertTrue(_inputs_have_equal_length(input_5))
self.assertTrue(_inputs_have_equal_length(input_6))
self.assertTrue(_inputs_are_equal(input_5, input_6))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(input_7))
self.assertTrue(len(input_7[-1]) == len(speech_inputs[-1]))
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(ValueError):
feat_extract.pad(processed_features, truncation=True)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(ValueError):
feat_extract.pad(processed_features, padding="longest", truncation=True)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(ValueError):
feat_extract.pad(processed_features, padding="longest", truncation=True)[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(ValueError):
feat_extract.pad(processed_features, padding="max_length", truncation=True)[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
pad_to_multiple_of = 12
input_8 = feat_extract.pad(
processed_features,
padding="max_length",
max_length=len(speech_inputs[0]),
pad_to_multiple_of=pad_to_multiple_of,
truncation=True,
)
input_8 = input_8[input_name]
input_9 = feat_extract.pad(
processed_features,
padding="max_length",
max_length=len(speech_inputs[0]),
pad_to_multiple_of=pad_to_multiple_of,
)
input_9 = input_9[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
expected_length = len(speech_inputs[0])
if expected_length % pad_to_multiple_of != 0:
expected_length = ((len(speech_inputs[0]) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_8[0]) == expected_length)
self.assertTrue(_inputs_have_equal_length(input_8))
self.assertFalse(_inputs_have_equal_length(input_9))
def test_padding_from_list(self):
self._check_padding(numpify=False)
def test_padding_from_array(self):
self._check_padding(numpify=True)
def test_truncation_from_list(self):
self._check_truncation(numpify=False)
def test_truncation_from_array(self):
self._check_truncation(numpify=True)
@require_torch
def test_padding_accepts_tensors_pt(self):
feat_extract = self.feature_extraction_class(**self.feat_extract_dict)
speech_inputs = self.feat_extract_tester.prepare_inputs_for_common()
input_name = feat_extract.model_input_names[0]
processed_features = BatchFeature({input_name: speech_inputs})
input_np = feat_extract.pad(processed_features, padding="longest", return_tensors="np")[input_name]
input_pt = feat_extract.pad(processed_features, padding="longest", return_tensors="pt")[input_name]
self.assertTrue(abs(input_np.astype(np.float32).sum() - input_pt.numpy().astype(np.float32).sum()) < 1e-2)
@require_tf
def test_padding_accepts_tensors_tf(self):
feat_extract = self.feature_extraction_class(**self.feat_extract_dict)
speech_inputs = self.feat_extract_tester.prepare_inputs_for_common()
input_name = feat_extract.model_input_names[0]
processed_features = BatchFeature({input_name: speech_inputs})
input_np = feat_extract.pad(processed_features, padding="longest", return_tensors="np")[input_name]
input_tf = feat_extract.pad(processed_features, padding="longest", return_tensors="tf")[input_name]
self.assertTrue(abs(input_np.astype(np.float32).sum() - input_tf.numpy().astype(np.float32).sum()) < 1e-2)
def test_attention_mask(self):
feat_dict = self.feat_extract_dict
feat_dict["return_attention_mask"] = True
feat_extract = self.feature_extraction_class(**feat_dict)
speech_inputs = self.feat_extract_tester.prepare_inputs_for_common()
input_lengths = [len(x) for x in speech_inputs]
input_name = feat_extract.model_input_names[0]
processed = BatchFeature({input_name: speech_inputs})
processed = feat_extract.pad(processed, padding="longest", return_tensors="np")
self.assertIn("attention_mask", processed)
self.assertListEqual(list(processed.attention_mask.shape), list(processed[input_name].shape[:2]))
self.assertListEqual(processed.attention_mask.sum(-1).tolist(), input_lengths)
def test_attention_mask_with_truncation(self):
feat_dict = self.feat_extract_dict
feat_dict["return_attention_mask"] = True
feat_extract = self.feature_extraction_class(**feat_dict)
speech_inputs = self.feat_extract_tester.prepare_inputs_for_common()
input_lengths = [len(x) for x in speech_inputs]
input_name = feat_extract.model_input_names[0]
processed = BatchFeature({input_name: speech_inputs})
max_length = min(input_lengths)
processed_pad = feat_extract.pad(
processed, padding="max_length", max_length=max_length, truncation=True, return_tensors="np"
)
self.assertIn("attention_mask", processed_pad)
self.assertListEqual(
list(processed_pad.attention_mask.shape), [processed_pad[input_name].shape[0], max_length]
)
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1).tolist(), [max_length for x in speech_inputs]
)
| 0 |
hf_public_repos/transformers | hf_public_repos/transformers/tests/test_modeling_tf_common.py | # coding=utf-8
# Copyright 2019 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import copy
import inspect
import json
import os
import random
import tempfile
import unittest
from importlib import import_module
from math import isnan
from typing import List, Tuple
from datasets import Dataset
from transformers import is_tf_available, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import ( # noqa: F401
CaptureLogger,
_tf_gpu_memory_limit,
is_pt_tf_cross_test,
require_tf,
require_tf2onnx,
slow,
torch_device,
)
from transformers.utils import CONFIG_NAME, GENERATION_CONFIG_NAME, logging
from transformers.utils.generic import ModelOutput
logger = logging.get_logger(__name__)
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TFAutoModel,
TFAutoModelForSequenceClassification,
TFSharedEmbeddings,
)
from transformers.generation import (
TFBeamSampleDecoderOnlyOutput,
TFBeamSampleEncoderDecoderOutput,
TFBeamSearchDecoderOnlyOutput,
TFBeamSearchEncoderDecoderOutput,
TFGreedySearchDecoderOnlyOutput,
TFGreedySearchEncoderDecoderOutput,
TFSampleDecoderOnlyOutput,
TFSampleEncoderDecoderOutput,
)
tf.config.experimental.enable_tensor_float_32_execution(False)
if _tf_gpu_memory_limit is not None:
gpus = tf.config.list_physical_devices("GPU")
for gpu in gpus:
# Restrict TensorFlow to only allocate x GB of memory on the GPUs
try:
tf.config.set_logical_device_configuration(
gpu, [tf.config.LogicalDeviceConfiguration(memory_limit=_tf_gpu_memory_limit)]
)
logical_gpus = tf.config.list_logical_devices("GPU")
print("Logical GPUs", logical_gpus)
except RuntimeError as e:
# Virtual devices must be set before GPUs have been initialized
print(e)
if is_torch_available():
import torch
def _config_zero_init(config):
configs_no_init = copy.deepcopy(config)
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key:
setattr(configs_no_init, key, 0.0)
return configs_no_init
@require_tf
class TFModelTesterMixin:
model_tester = None
all_model_classes = ()
all_generative_model_classes = ()
test_mismatched_shapes = True
test_resize_embeddings = True
test_head_masking = True
is_encoder_decoder = False
has_attentions = True
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False) -> dict:
inputs_dict = copy.deepcopy(inputs_dict)
if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING):
inputs_dict = {
k: tf.tile(tf.expand_dims(v, 1), (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1))
if isinstance(v, tf.Tensor) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING):
inputs_dict["labels"] = tf.ones(self.model_tester.batch_size, dtype=tf.int32)
elif model_class in [
*get_values(TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING),
*get_values(TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING),
]:
inputs_dict["start_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
inputs_dict["end_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
elif model_class in [
*get_values(TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING),
*get_values(TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING),
]:
inputs_dict["labels"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
elif model_class in get_values(TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING):
inputs_dict["next_sentence_label"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
elif model_class in [
*get_values(TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING),
*get_values(TF_MODEL_FOR_CAUSAL_LM_MAPPING),
*get_values(TF_MODEL_FOR_MASKED_LM_MAPPING),
*get_values(TF_MODEL_FOR_PRETRAINING_MAPPING),
*get_values(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING),
*get_values(TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING),
] and "labels" in dict(inspect.signature(model_class.call).parameters):
inputs_dict["labels"] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32
)
elif model_class in get_values(TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING):
num_patches = self.model_tester.image_size // self.model_tester.patch_size
inputs_dict["bool_masked_pos"] = tf.zeros(
(self.model_tester.batch_size, num_patches**2), dtype=tf.int32
)
elif model_class in get_values(TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING):
batch_size, num_channels, height, width = inputs_dict["pixel_values"].shape
inputs_dict["labels"] = tf.zeros((self.model_tester.batch_size, height, width), dtype=tf.int32)
elif model_class.__name__.endswith("ForCTC"):
# When we have enough CTC models for an AutoClass, we should use their mapping instead of name checks
inputs_dict["labels"] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32
)
return inputs_dict
def test_initialization(self):
pass
def test_save_load(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, saved_model=False)
# the config file (and the generation config file, if it can generate) should be saved
self.assertTrue(os.path.exists(os.path.join(tmpdirname, CONFIG_NAME)))
self.assertEqual(
model.can_generate(), os.path.exists(os.path.join(tmpdirname, GENERATION_CONFIG_NAME))
)
model = model_class.from_pretrained(tmpdirname)
after_outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assert_outputs_same(after_outputs, outputs)
def test_save_load_config(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
model_config = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(model_config)
new_model = model_class.from_config(model.get_config())
# make sure it also accepts a normal config
_ = model_class.from_config(model.config)
_ = new_model(self._prepare_for_class(inputs_dict, model_class)) # Build model
new_model.set_weights(model.get_weights())
after_outputs = new_model(self._prepare_for_class(inputs_dict, model_class))
self.assert_outputs_same(after_outputs, outputs)
@slow
def test_saved_model_creation(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = False
config.output_attentions = False
if hasattr(config, "use_cache"):
config.use_cache = False
model_class = self.all_model_classes[0]
class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
model(class_inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, saved_model=True)
saved_model_dir = os.path.join(tmpdirname, "saved_model", "1")
self.assertTrue(os.path.exists(saved_model_dir))
def test_prepare_serving_output(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = self.has_attentions
for model_class in self.all_model_classes:
model = model_class(config)
inputs = self._prepare_for_class(inputs_dict, model_class)
outputs = model(inputs)
serving_outputs = model.serving_output(outputs)
for k, v in serving_outputs.items():
# Check that we have one of three possible outputs: None, tuple of tensors or a tensor
if isinstance(v, tuple):
self.assertTrue(all(isinstance(elem, tf.Tensor) for elem in v))
elif v is not None:
self.assertIsInstance(v, tf.Tensor)
else:
self.assertIsNone(v)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
if model.config.is_encoder_decoder:
expected_arg_names = [
"input_ids",
"attention_mask",
"decoder_input_ids",
"decoder_attention_mask",
]
expected_arg_names.extend(["decoder_position_ids"] if "decoder_position_ids" in arg_names else [])
expected_arg_names.extend(
["head_mask", "decoder_head_mask"] if "head_mask" and "decoder_head_mask" in arg_names else []
)
expected_arg_names.extend(
["cross_attn_head_mask", "encoder_outputs"]
if "cross_attn_head_mask" in arg_names
else ["encoder_outputs"]
)
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
else:
expected_arg_names = ["input_ids"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_onnx_compliancy(self):
if not self.test_onnx:
return
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
INTERNAL_OPS = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"ReadVariableOp",
"ResourceGather",
"TruncatedNormal",
"VarHandleOp",
"VarIsInitializedOp",
]
onnx_ops = []
with open(os.path.join(".", "utils", "tf_ops", "onnx.json")) as f:
onnx_opsets = json.load(f)["opsets"]
for i in range(1, self.onnx_min_opset + 1):
onnx_ops.extend(onnx_opsets[str(i)])
for model_class in self.all_model_classes:
model_op_names = set()
with tf.Graph().as_default() as g:
model = model_class(config)
model.build()
for op in g.get_operations():
model_op_names.add(op.node_def.op)
model_op_names = sorted(model_op_names)
incompatible_ops = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(op)
self.assertEqual(len(incompatible_ops), 0, incompatible_ops)
# `tf2onnx` issue page: https://github.com/onnx/tensorflow-onnx/issues/2172
# TODO: undo skip once a fix is done in `tf2onnx`
@unittest.skip("`tf2onnx` broke with TF 2.13")
@require_tf2onnx
@slow
def test_onnx_runtime_optimize(self):
if not self.test_onnx:
return
import onnxruntime
import tf2onnx
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:2]:
model = model_class(config)
model.build()
onnx_model_proto, _ = tf2onnx.convert.from_keras(model, opset=self.onnx_min_opset)
onnxruntime.InferenceSession(onnx_model_proto.SerializeToString())
def test_keras_save_load(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
tf_main_layer_classes = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__),)
for module_member_name in dir(module)
if module_member_name.endswith("MainLayer")
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("MainLayer")] == model_class.__name__[: -len("Model")]
for module_member in (getattr(module, module_member_name),)
if isinstance(module_member, type)
and tf.keras.layers.Layer in module_member.__bases__
and getattr(module_member, "_keras_serializable", False)
}
for main_layer_class in tf_main_layer_classes:
# T5MainLayer needs an embed_tokens parameter when called without the inputs_embeds parameter
if "T5" in main_layer_class.__name__:
# Take the same values than in TFT5ModelTester for this shared layer
shared = TFSharedEmbeddings(99, 32, name="shared")
config.use_cache = inputs_dict.pop("use_cache", None)
main_layer = main_layer_class(config, embed_tokens=shared)
else:
main_layer = main_layer_class(config)
symbolic_inputs = {
name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype) for name, tensor in inputs_dict.items()
}
model = tf.keras.Model(symbolic_inputs, outputs=main_layer(symbolic_inputs))
outputs = model(inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
filepath = os.path.join(tmpdirname, "keras_model.h5")
model.save(filepath)
if "T5" in main_layer_class.__name__:
model = tf.keras.models.load_model(
filepath,
custom_objects={
main_layer_class.__name__: main_layer_class,
"TFSharedEmbeddings": TFSharedEmbeddings,
},
)
else:
model = tf.keras.models.load_model(
filepath, custom_objects={main_layer_class.__name__: main_layer_class}
)
assert isinstance(model, tf.keras.Model)
after_outputs = model(inputs_dict)
self.assert_outputs_same(after_outputs, outputs)
def assert_outputs_same(self, after_outputs, outputs):
# Make sure we don't have nans
if isinstance(after_outputs, tf.Tensor):
out_1 = after_outputs.numpy()
elif isinstance(after_outputs, dict):
out_1 = after_outputs[list(after_outputs.keys())[0]].numpy()
else:
out_1 = after_outputs[0].numpy()
out_2 = outputs[0].numpy()
self.assertEqual(out_1.shape, out_2.shape)
out_1 = out_1[~np.isnan(out_1)]
out_2 = out_2[~np.isnan(out_2)]
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
# Don't copy this method to model specific test file!
# TODO: remove this method once the issues are all fixed!
def _make_attention_mask_non_null(self, inputs_dict):
"""Make sure no sequence has all zeros as attention mask"""
for k in ["attention_mask", "encoder_attention_mask", "decoder_attention_mask"]:
if k in inputs_dict:
attention_mask = inputs_dict[k]
# Make sure no all 0s attention masks - to avoid failure at this moment.
# Put `1` at the beginning of sequences to make it still work when combining causal attention masks.
# TODO: remove this line once a fix regarding large negative values for attention mask is done.
attention_mask = tf.concat(
[tf.ones_like(attention_mask[:, :1], dtype=attention_mask.dtype), attention_mask[:, 1:]], axis=-1
)
# Here we make the first sequence with all 0s as attention mask.
# Currently, this will fail for `TFWav2Vec2Model`. This is caused by the different large negative
# values, like `1e-4`, `1e-9`, `1e-30` and `-inf` for attention mask across models/frameworks.
# TODO: enable this block once the large negative values thing is cleaned up.
# (see https://github.com/huggingface/transformers/issues/14859)
# attention_mask = tf.concat(
# [
# tf.zeros_like(attention_mask[:1], dtype=tf.int32),
# tf.cast(attention_mask[1:], dtype=tf.int32)
# ],
# axis=0
# )
inputs_dict[k] = attention_mask
# Don't copy this method to model specific test file!
# TODO: remove this method once the issues are all fixed!
def _postprocessing_to_ignore_test_cases(self, tf_outputs, pt_outputs, model_class):
"""For temporarily ignoring some failed test cases (issues to be fixed)"""
tf_keys = {k for k, v in tf_outputs.items() if v is not None}
pt_keys = {k for k, v in pt_outputs.items() if v is not None}
key_differences = tf_keys.symmetric_difference(pt_keys)
if model_class.__name__ in [
"TFFlaubertWithLMHeadModel",
"TFFunnelForPreTraining",
"TFElectraForPreTraining",
"TFXLMWithLMHeadModel",
]:
for k in key_differences:
if k in ["loss", "losses"]:
tf_keys.discard(k)
pt_keys.discard(k)
elif model_class.__name__.startswith("TFGPT2"):
# `TFGPT2` has `past_key_values` as a tensor while `GPT2` has it as a tuple.
tf_keys.discard("past_key_values")
pt_keys.discard("past_key_values")
# create new outputs from the remaining fields
new_tf_outputs = type(tf_outputs)(**{k: tf_outputs[k] for k in tf_keys})
new_pt_outputs = type(pt_outputs)(**{k: pt_outputs[k] for k in pt_keys})
return new_tf_outputs, new_pt_outputs
def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=1e-5, name="outputs", attributes=None):
"""Check the outputs from PyTorch and TensorFlow models are close enough. Checks are done in a recursive way.
Args:
model_class: The class of the model that is currently testing. For example, `TFBertModel`,
TFBertForMaskedLM`, `TFBertForSequenceClassification`, etc. Mainly used for providing more informative
error messages.
name (`str`): The name of the output. For example, `output.hidden_states`, `output.attentions`, etc.
attributes (`Tuple[str]`): The names of the output's element if the output is a tuple/list with each element
being a named field in the output.
"""
self.assertEqual(type(name), str)
if attributes is not None:
self.assertEqual(type(attributes), tuple, f"{name}: The argument `attributes` should be a `tuple`")
# Allow `ModelOutput` (e.g. `CLIPOutput` has `text_model_output` and `vision_model_output`).
if isinstance(tf_outputs, ModelOutput):
self.assertTrue(
isinstance(pt_outputs, ModelOutput),
f"{name}: `pt_outputs` should an instance of `ModelOutput` when `tf_outputs` is",
)
# Don't copy this block to model specific test file!
# TODO: remove this method and this line after issues are fixed
tf_outputs, pt_outputs = self._postprocessing_to_ignore_test_cases(tf_outputs, pt_outputs, model_class)
tf_keys = [k for k, v in tf_outputs.items() if v is not None]
pt_keys = [k for k, v in pt_outputs.items() if v is not None]
self.assertEqual(tf_keys, pt_keys, f"{name}: Output keys differ between TF and PyTorch")
# convert to the case of `tuple`
# appending each key to the current (string) `names`
attributes = tuple([f"{name}.{k}" for k in tf_keys])
self.check_pt_tf_outputs(
tf_outputs.to_tuple(), pt_outputs.to_tuple(), model_class, tol=tol, name=name, attributes=attributes
)
# Allow `list` (e.g. `TransfoXLModelOutput.mems` is a list of tensors.)
elif type(tf_outputs) in [tuple, list]:
self.assertEqual(type(tf_outputs), type(pt_outputs), f"{name}: Output types differ between TF and PyTorch")
self.assertEqual(len(tf_outputs), len(pt_outputs), f"{name}: Output lengths differ between TF and PyTorch")
if attributes is not None:
# case 1: each output has assigned name (e.g. a tuple form of a `ModelOutput`)
self.assertEqual(
len(attributes),
len(tf_outputs),
f"{name}: The tuple `names` should have the same length as `tf_outputs`",
)
else:
# case 2: each output has no assigned name (e.g. hidden states of each layer) -> add an index to `names`
attributes = tuple([f"{name}_{idx}" for idx in range(len(tf_outputs))])
for tf_output, pt_output, attr in zip(tf_outputs, pt_outputs, attributes):
self.check_pt_tf_outputs(tf_output, pt_output, model_class, tol=tol, name=attr)
elif isinstance(tf_outputs, tf.Tensor):
self.assertTrue(
isinstance(pt_outputs, torch.Tensor), f"{name}: `pt_outputs` should a tensor when `tf_outputs` is"
)
tf_outputs = tf_outputs.numpy()
pt_outputs = pt_outputs.detach().to("cpu").numpy()
self.assertEqual(
tf_outputs.shape, pt_outputs.shape, f"{name}: Output shapes differ between TF and PyTorch"
)
# deal with NumPy's scalars to make replacing nan values by 0 work.
if np.isscalar(tf_outputs):
tf_outputs = np.array([tf_outputs])
pt_outputs = np.array([pt_outputs])
tf_nans = np.isnan(tf_outputs)
pt_nans = np.isnan(pt_outputs)
pt_outputs[tf_nans] = 0
tf_outputs[tf_nans] = 0
pt_outputs[pt_nans] = 0
tf_outputs[pt_nans] = 0
max_diff = np.amax(np.abs(tf_outputs - pt_outputs))
self.assertLessEqual(max_diff, tol, f"{name}: Difference between torch and tf is {max_diff} (>= {tol}).")
else:
raise ValueError(
"`tf_outputs` should be an instance of `tf.Tensor`, a `tuple`, or an instance of `tf.Tensor`. Got"
f" {type(tf_outputs)} instead."
)
def prepare_pt_inputs_from_tf_inputs(self, tf_inputs_dict):
pt_inputs_dict = {}
for name, key in tf_inputs_dict.items():
if isinstance(key, bool):
pt_inputs_dict[name] = key
elif name == "input_values":
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32)
elif name == "pixel_values":
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32)
elif name == "input_features":
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32)
# other general float inputs
elif tf_inputs_dict[name].dtype.is_floating:
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32)
else:
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.long)
return pt_inputs_dict
def check_pt_tf_models(self, tf_model, pt_model, tf_inputs_dict):
pt_inputs_dict = self.prepare_pt_inputs_from_tf_inputs(tf_inputs_dict)
# send pytorch inputs to the correct device
pt_inputs_dict = {
k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs_dict.items()
}
# send pytorch model to the correct device
pt_model.to(torch_device)
# Check predictions on first output (logits/hidden-states) are close enough given low-level computational differences
pt_model.eval()
with torch.no_grad():
pt_outputs = pt_model(**pt_inputs_dict)
tf_outputs = tf_model(tf_inputs_dict)
# tf models returned loss is usually a tensor rather than a scalar.
# (see `hf_compute_loss`: it uses `tf.keras.losses.Reduction.NONE`)
# Change it here to a scalar to match PyTorch models' loss
tf_loss = getattr(tf_outputs, "loss", None)
if tf_loss is not None:
tf_outputs.loss = tf.math.reduce_mean(tf_loss)
self.check_pt_tf_outputs(tf_outputs, pt_outputs, type(tf_model))
@is_pt_tf_cross_test
def test_pt_tf_model_equivalence(self, allow_missing_keys=False):
import transformers
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# Output all for aggressive testing
config.output_hidden_states = True
config.output_attentions = self.has_attentions
# Make sure no sequence has all zeros as attention mask, otherwise some tests fail due to the inconsistency
# of the usage `1e-4`, `1e-9`, `1e-30`, `-inf`.
# TODO: Use a uniform value for all models, make sure all tests pass without this processing, and remove it.
self._make_attention_mask_non_null(inputs_dict)
pt_model_class_name = model_class.__name__[2:] # Skip the "TF" at the beginning
pt_model_class = getattr(transformers, pt_model_class_name)
tf_model = model_class(config)
pt_model = pt_model_class(config)
tf_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
tf_inputs_dict_with_labels = self._prepare_for_class(
inputs_dict,
model_class,
# Not all models accept "labels" in the forward pass (yet :) )
return_labels=True if "labels" in inspect.signature(model_class.call).parameters.keys() else False,
)
# For some models (e.g. base models), there is no label returned.
# Set the input dict to `None` to avoid check outputs twice for the same input dicts.
if not set(tf_inputs_dict_with_labels.keys()).symmetric_difference(tf_inputs_dict.keys()):
tf_inputs_dict_with_labels = None
# Check we can load pt model in tf and vice-versa with model => model functions
tf_model = transformers.load_pytorch_model_in_tf2_model(
tf_model, pt_model, tf_inputs=tf_inputs_dict, allow_missing_keys=allow_missing_keys
)
pt_model = transformers.load_tf2_model_in_pytorch_model(
pt_model, tf_model, allow_missing_keys=allow_missing_keys
)
# Original test: check without `labels`
self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict)
# check with `labels`
if tf_inputs_dict_with_labels:
self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict_with_labels)
# Check we can load pt model in tf and vice-versa with checkpoint => model functions
with tempfile.TemporaryDirectory() as tmpdirname:
pt_checkpoint_path = os.path.join(tmpdirname, "pt_model.bin")
torch.save(pt_model.state_dict(), pt_checkpoint_path)
tf_model = transformers.load_pytorch_checkpoint_in_tf2_model(
tf_model, pt_checkpoint_path, allow_missing_keys=allow_missing_keys
)
tf_checkpoint_path = os.path.join(tmpdirname, "tf_model.h5")
tf_model.save_weights(tf_checkpoint_path)
pt_model = transformers.load_tf2_checkpoint_in_pytorch_model(
pt_model, tf_checkpoint_path, allow_missing_keys=allow_missing_keys
)
# Original test: check without `labels`
self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict)
# check with `labels`
if tf_inputs_dict_with_labels:
self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict_with_labels)
@slow
def test_compile_tf_model(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:2]:
# Prepare our model
model = model_class(config)
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
functional_inputs = {
key: tf.keras.Input(shape=val.shape[1:], dtype=val.dtype, name=key)
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
outputs_dict = model(functional_inputs)
hidden_states = outputs_dict[0]
# Compile extended model
functional_model = tf.keras.Model(inputs=functional_inputs, outputs=hidden_states)
model_out = functional_model.predict(model.dummy_inputs) # Check we can pass inputs with the Keras API
self.assertTrue(model_out is not None)
with tempfile.TemporaryDirectory() as tmpdirname:
functional_model.save(tmpdirname) # Ensure we can save/export the whole functional model
def test_keyword_and_dict_args(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
inputs = self._prepare_for_class(inputs_dict, model_class)
outputs_dict = model(inputs)
inputs_keywords = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
outputs_keywords = model(**inputs_keywords)
output_dict = outputs_dict[0].numpy()
output_keywords = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords)), 1e-6)
def test_attention_outputs(self):
if not self.has_attentions:
self.skipTest(reason="Model does not output attentions")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", self.model_tester.seq_length)
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length)
decoder_key_length = getattr(self.model_tester, "key_length", decoder_seq_length)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
def check_decoder_attentions_output(outputs):
out_len = len(outputs)
self.assertEqual(min(out_len % 2, out_len % 5), 0) # differentiation due to newly added cross_attentions
decoder_attentions = outputs.decoder_attentions
self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],
)
def check_encoder_attentions_output(outputs):
attentions = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
config.output_hidden_states = False
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
out_len = len(outputs)
self.assertEqual(config.output_hidden_states, False)
check_encoder_attentions_output(outputs)
if self.is_encoder_decoder:
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(config.output_hidden_states, False)
check_decoder_attentions_output(outputs)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(config.output_hidden_states, False)
check_encoder_attentions_output(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
config.output_hidden_states = True
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(outputs))
self.assertEqual(model.config.output_hidden_states, True)
check_encoder_attentions_output(outputs)
def test_headmasking(self):
if not self.test_head_masking:
return
random.Random().seed(42)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
random.Random().seed()
inputs_dict["output_attentions"] = True
config.output_hidden_states = True
configs_no_init = _config_zero_init(config) # To be sure we have no Nan
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
# Prepare head_mask
def prepare_layer_head_mask(i, attention_heads, num_hidden_layers):
if i == 0:
return tf.concat(
(tf.zeros(1, dtype=tf.float32), tf.ones(attention_heads - 1, dtype=tf.float32)), 0
)
elif i == num_hidden_layers - 1:
return tf.concat(
(tf.zeros(attention_heads - 1, dtype=tf.float32), tf.ones(1, dtype=tf.float32)), 0
)
else:
return tf.ones(attention_heads, dtype=tf.float32)
head_mask = tf.stack(
[
prepare_layer_head_mask(i, config.num_attention_heads, config.num_hidden_layers)
for i in range(config.num_hidden_layers)
],
0,
)
inputs = self._prepare_for_class(inputs_dict, model_class).copy()
inputs["head_mask"] = head_mask
if model.config.is_encoder_decoder:
signature = inspect.signature(model.call)
arg_names = [*signature.parameters.keys()]
if "decoder_head_mask" in arg_names: # necessary diferentiation because of T5 model
inputs["decoder_head_mask"] = head_mask
if "cross_attn_head_mask" in arg_names:
inputs["cross_attn_head_mask"] = head_mask
outputs = model(**inputs, return_dict=True)
def check_attentions_validity(attentions):
# Remove Nan
for t in attentions:
self.assertLess(
(tf.math.reduce_sum(tf.cast(tf.math.is_nan(t), tf.float32))).numpy(), (tf.size(t) / 4).numpy()
) # Check we don't have more than 25% nans (arbitrary)
attentions = [
tf.where(tf.math.is_nan(t), 0.0, t) for t in attentions
] # remove them (the test is less complete)
self.assertAlmostEqual(tf.math.reduce_sum(attentions[0][..., 0, :, :]).numpy(), 0.0)
self.assertNotEqual(tf.math.reduce_sum(attentions[0][..., -1, :, :]).numpy(), 0.0)
if len(attentions) > 2: # encoder-decodere models have only 2 layers in each modules
self.assertNotEqual(tf.math.reduce_sum(attentions[1][..., 0, :, :]).numpy(), 0.0)
self.assertAlmostEqual(tf.math.reduce_sum(attentions[-1][..., -2, :, :]).numpy(), 0.0)
self.assertNotEqual(tf.math.reduce_sum(attentions[-1][..., -1, :, :]).numpy(), 0.0)
if model.config.is_encoder_decoder:
check_attentions_validity(outputs.encoder_attentions)
check_attentions_validity(outputs.decoder_attentions)
if "cross_attn_head_mask" in arg_names:
check_attentions_validity(outputs.cross_attentions)
else:
check_attentions_validity(outputs.attentions)
def test_hidden_states_output(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_hidden_states_output(config, inputs_dict, model_class):
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
if model.config.is_encoder_decoder:
encoder_hidden_states = outputs.encoder_hidden_states
decoder_hidden_states = outputs.decoder_hidden_states
self.assertEqual(config.output_attentions, False)
self.assertEqual(len(encoder_hidden_states), expected_num_layers)
self.assertListEqual(
list(encoder_hidden_states[0].shape[-2:]),
[self.model_tester.seq_length, self.model_tester.hidden_size],
)
self.assertEqual(len(decoder_hidden_states), expected_num_layers)
self.assertListEqual(
list(decoder_hidden_states[0].shape[-2:]),
[self.model_tester.seq_length, self.model_tester.hidden_size],
)
else:
hidden_states = outputs.hidden_states
self.assertEqual(config.output_attentions, False)
self.assertEqual(len(hidden_states), expected_num_layers)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[self.model_tester.seq_length, self.model_tester.hidden_size],
)
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(config, inputs_dict, model_class)
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(config, inputs_dict, model_class)
def test_model_common_attributes(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
text_in_text_out_models = (
get_values(TF_MODEL_FOR_CAUSAL_LM_MAPPING)
+ get_values(TF_MODEL_FOR_MASKED_LM_MAPPING)
+ get_values(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING)
)
speech_in_text_out_models = get_values(TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING)
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), tf.keras.layers.Layer)
legacy_text_in_text_out = model.get_lm_head() is not None
if model_class in text_in_text_out_models or legacy_text_in_text_out:
out_embeddings = model.get_output_embeddings()
self.assertIsInstance(out_embeddings, tf.keras.layers.Layer)
bias = model.get_bias()
if bias is not None:
self.assertIsInstance(bias, dict)
for _, v in bias.items():
self.assertIsInstance(v, tf.Variable)
elif model_class in speech_in_text_out_models:
out_embeddings = model.get_output_embeddings()
self.assertIsInstance(out_embeddings, tf.keras.layers.Layer)
bias = model.get_bias()
self.assertIsNone(bias)
else:
out_embeddings = model.get_output_embeddings()
assert out_embeddings is None
bias = model.get_bias()
self.assertIsNone(bias)
def test_determinism(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
first, second = (
model(self._prepare_for_class(inputs_dict, model_class), training=False)[0],
model(self._prepare_for_class(inputs_dict, model_class), training=False)[0],
)
out_1 = first.numpy()
out_2 = second.numpy()
out_1 = out_1[~np.isnan(out_1)]
out_2 = out_2[~np.isnan(out_2)]
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
def test_model_outputs_equivalence(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
tuple_output = model(tuple_inputs, return_dict=False, **additional_kwargs)
dict_output = model(dict_inputs, return_dict=True, **additional_kwargs).to_tuple()
def recursive_check(tuple_object, dict_object):
if isinstance(tuple_object, (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(tuple_object, dict_object)),
msg=(
"Tuple and dict output are not equal. Difference:"
f" {tf.math.reduce_max(tf.abs(tuple_object - dict_object))}"
),
)
recursive_check(tuple_output, dict_output)
for model_class in self.all_model_classes:
model = model_class(config)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
if self.has_attentions:
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True})
# Not all models accept "labels" in the forward pass (yet :) )
if "labels" in inspect.signature(model.call).parameters.keys():
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
if self.has_attentions:
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(
model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True}
)
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
inputs = copy.deepcopy(inputs_dict)
if not self.is_encoder_decoder:
input_ids = inputs["input_ids"]
del inputs["input_ids"]
else:
encoder_input_ids = inputs["input_ids"]
decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
del inputs["input_ids"]
inputs.pop("decoder_input_ids", None)
if not self.is_encoder_decoder:
inputs["inputs_embeds"] = model.get_input_embeddings()(input_ids)
else:
inputs["inputs_embeds"] = model.get_input_embeddings()(encoder_input_ids)
inputs["decoder_inputs_embeds"] = model.get_input_embeddings()(decoder_input_ids)
inputs = self._prepare_for_class(inputs, model_class)
model(inputs)
def test_numpy_arrays_inputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def prepare_numpy_arrays(inputs_dict):
inputs_np_dict = {}
for k, v in inputs_dict.items():
if tf.is_tensor(v):
inputs_np_dict[k] = v.numpy()
else:
inputs_np_dict[k] = np.array(k)
return inputs_np_dict
for model_class in self.all_model_classes:
model = model_class(config)
inputs = self._prepare_for_class(inputs_dict, model_class)
inputs_np = prepare_numpy_arrays(inputs)
output_for_dict_input = model(inputs_np)
output_for_kw_input = model(**inputs_np)
self.assert_outputs_same(output_for_dict_input, output_for_kw_input)
def test_valid_input_signature_and_dummies(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
call_args = inspect.signature(model.call).parameters
for key in model.input_signature:
self.assertIn(key, call_args)
for key in model.dummy_inputs:
self.assertIn(key, call_args)
def test_resize_token_embeddings(self):
# TODO (joao): after the embeddings refactor is complete, rework this test so as to rely exclusively on
# tf.keras.layers.Embedding
if not self.test_resize_embeddings:
return
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(model, embedding_layer):
if isinstance(embedding_layer, tf.keras.layers.Embedding):
# builds the embeddings layer
model.build()
return embedding_layer.embeddings
else:
return model._get_word_embedding_weight(embedding_layer)
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10, None]:
# build the embeddings
model = model_class(config=copy.deepcopy(config)) # `resize_token_embeddings` mutates `config`
old_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings())
old_bias = model.get_bias()
old_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings())
# reshape the embeddings
model.resize_token_embeddings(size)
new_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings())
new_bias = model.get_bias()
new_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings())
# check that the resized embeddings size matches the desired size.
assert_size = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0], assert_size)
# check that weights remain the same after resizing
models_equal = True
for p1, p2 in zip(old_input_embeddings.value(), new_input_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
models_equal = False
self.assertTrue(models_equal)
if old_bias is not None and new_bias is not None:
for old_weight, new_weight in zip(old_bias.values(), new_bias.values()):
self.assertEqual(new_weight.shape[-1], assert_size)
models_equal = True
for p1, p2 in zip(tf.squeeze(old_weight), tf.squeeze(new_weight)):
if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
models_equal = False
self.assertTrue(models_equal)
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0], assert_size)
self.assertEqual(new_output_embeddings.shape[1], old_output_embeddings.shape[1])
models_equal = True
for p1, p2 in zip(old_output_embeddings.value(), new_output_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
models_equal = False
self.assertTrue(models_equal)
# TODO (Joao): this test is not slow, but it's tagged as such to keep track of failures on the scheduled CI runs,
# while passing push CI. Fix the underlying issues and remove the tag.
@slow
def test_save_load_after_resize_token_embeddings(self):
if not self.test_resize_embeddings:
return
config, original_inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# create a model with resized (expended) embeddings
new_tokens_size = 10
old_total_size = config.vocab_size
new_total_size = old_total_size + new_tokens_size
model = model_class(config=copy.deepcopy(config)) # `resize_token_embeddings` mutates `config`
model.build()
model.resize_token_embeddings(new_total_size)
# fetch the output for an input exclusively made of new members of the vocabulary
inputs_dict = copy.deepcopy(original_inputs_dict)
ids_feat_name = None
if "input_ids" in inputs_dict:
ids_feat_name = "input_ids"
elif "decoder_input_ids" in inputs_dict:
ids_feat_name = "decoder_input_ids"
else:
assert False, "No input ids feature found in the inputs dict"
new_vocab_input_ids = ids_tensor(inputs_dict[ids_feat_name].shape, new_tokens_size)
new_vocab_input_ids += old_total_size
inputs_dict[ids_feat_name] = new_vocab_input_ids
if "input_ids" in inputs_dict:
inputs_dict["input_ids"] = new_vocab_input_ids
if "decoder_input_ids" in inputs_dict:
inputs_dict["decoder_input_ids"] = new_vocab_input_ids
prepared_inputs = self._prepare_for_class(inputs_dict, model_class)
outputs = model(**prepared_inputs)
# save and load the model
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, saved_model=False)
model = model_class.from_pretrained(tmpdirname)
restored_model_outputs = model(**prepared_inputs)
# check that the output for the restored model is the same
self.assert_outputs_same(restored_model_outputs, outputs)
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0,
reason="This test always passes on CPU.",
)
def test_embeddings_out_of_bounds_raise_exception(self):
# TF embeddings layers don't raise an exception when an index is out of bounds on GPU, so we manually raise it.
# This test should only fail on GPU for models where we haven't added the safety check.
if not self.test_resize_embeddings:
return
config, original_inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config=config)
inputs_dict = copy.deepcopy(original_inputs_dict)
if "input_ids" in inputs_dict:
inputs_dict["input_ids"] = inputs_dict["input_ids"] * int(1e9)
if "decoder_input_ids" in inputs_dict:
inputs_dict["decoder_input_ids"] = inputs_dict["decoder_input_ids"] * int(1e9)
prepared_inputs = self._prepare_for_class(inputs_dict, model_class)
with self.assertRaises(tf.errors.InvalidArgumentError):
model(**prepared_inputs)
def test_lm_head_model_random_no_beam_search_generate(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
input_ids = inputs_dict.get("input_ids", None)
# iterate over all generative models
for model_class in self.all_generative_model_classes:
model = model_class(config)
if config.bos_token_id is None:
# if bos token id is not defined model needs input_ids
with self.assertRaises(ValueError):
model.generate(do_sample=True, max_length=5)
# num_return_sequences = 1
self._check_generated_ids(model.generate(input_ids, do_sample=True))
elif model_class.__name__ not in ["TFSpeech2TextForConditionalGeneration"]:
# Models with non-text inputs won't work here; num_return_sequences = 1
self._check_generated_ids(model.generate(do_sample=True, max_length=5))
with self.assertRaises(ValueError):
# generating multiple sequences when no beam search generation
# is not allowed as it would always generate the same sequences
model.generate(input_ids, do_sample=False, num_return_sequences=2)
# num_return_sequences > 1, sample
self._check_generated_ids(model.generate(input_ids, do_sample=True, num_return_sequences=2))
# check bad words tokens language generation
# create list of 1-seq bad token and list of 2-seq of bad tokens
bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)]
output_tokens = model.generate(
input_ids, do_sample=True, bad_words_ids=bad_words_ids, num_return_sequences=2
)
# only count generated tokens
generated_ids = output_tokens[:, input_ids.shape[-1] :]
self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids))
def test_lm_head_model_no_beam_search_generate_dict_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
input_ids = inputs_dict.get("input_ids", None)
if input_ids is None:
input_ids = inputs_dict.get("input_features", None)
# iterate over all generative models
for model_class in self.all_generative_model_classes:
model = model_class(config)
output_greedy = model.generate(
input_ids,
do_sample=False,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
output_sample = model.generate(
input_ids,
do_sample=True,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
if model.config.is_encoder_decoder:
self.assertIsInstance(output_greedy, TFGreedySearchEncoderDecoderOutput)
self.assertIsInstance(output_sample, TFSampleEncoderDecoderOutput)
else:
self.assertIsInstance(output_greedy, TFGreedySearchDecoderOnlyOutput)
self.assertIsInstance(output_sample, TFSampleDecoderOnlyOutput)
def test_lm_head_model_random_beam_search_generate(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
input_ids = inputs_dict.get("input_ids", None)
for model_class in self.all_generative_model_classes:
model = model_class(config)
if config.bos_token_id is None:
# if bos token id is not defined model needs input_ids, num_return_sequences = 1
self._check_generated_ids(model.generate(input_ids, do_sample=True, num_beams=2))
else:
# num_return_sequences = 1
self._check_generated_ids(model.generate(do_sample=True, max_length=5, num_beams=2))
with self.assertRaises(ValueError):
# generating more sequences than having beams leads is not possible
model.generate(input_ids, do_sample=False, num_return_sequences=3, num_beams=2)
# num_return_sequences > 1, sample
self._check_generated_ids(
model.generate(
input_ids,
do_sample=True,
num_beams=2,
num_return_sequences=2,
)
)
# num_return_sequences > 1, greedy
self._check_generated_ids(model.generate(input_ids, do_sample=False, num_beams=2, num_return_sequences=2))
# check bad words tokens language generation
# create list of 1-seq bad token and list of 2-seq of bad tokens
bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)]
output_tokens = model.generate(
input_ids, do_sample=False, bad_words_ids=bad_words_ids, num_beams=2, num_return_sequences=2
)
# only count generated tokens
generated_ids = output_tokens[:, input_ids.shape[-1] :]
self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids))
def test_lm_head_model_beam_search_generate_dict_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
input_ids = inputs_dict.get("input_ids", None)
if input_ids is None:
input_ids = inputs_dict.get("input_features", None)
# iterate over all generative models
for model_class in self.all_generative_model_classes:
model = model_class(config)
output_beam_search = model.generate(
input_ids,
num_beams=2,
do_sample=False,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
output_beam_sample = model.generate(
input_ids,
num_beams=2,
do_sample=True,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
if model.config.is_encoder_decoder:
self.assertIsInstance(output_beam_search, TFBeamSearchEncoderDecoderOutput)
self.assertIsInstance(output_beam_sample, TFBeamSampleEncoderDecoderOutput)
else:
self.assertIsInstance(output_beam_search, TFBeamSearchDecoderOnlyOutput)
self.assertIsInstance(output_beam_sample, TFBeamSampleDecoderOnlyOutput)
def test_loss_computation(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
# The number of elements in the loss should be the same as the number of elements in the label
prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
added_label_names = sorted(prepared_for_class.keys() - inputs_dict.keys(), reverse=True)
if not added_label_names:
continue # This test is only for models with easily-separable labels
added_label = prepared_for_class[added_label_names[0]]
expected_loss_size = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
possible_input_names = {"input_ids", "pixel_values", "input_features", "input_values"}
input_name = possible_input_names.intersection(set(prepared_for_class)).pop()
model_input = prepared_for_class.pop(input_name)
outputs = model(model_input, **prepared_for_class)
if not isinstance(outputs, ModelOutput) or not hasattr(outputs, "loss"):
continue
loss = outputs.loss
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
# Test that model correctly compute the loss when we mask some positions
prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
possible_input_names = {"input_ids", "pixel_values", "input_features", "input_values"}
input_name = possible_input_names.intersection(set(prepared_for_class)).pop()
model_input = prepared_for_class.pop(input_name)
if "labels" in prepared_for_class:
labels = prepared_for_class["labels"].numpy()
if len(labels.shape) > 1 and labels.shape[1] != 1:
labels[0] = -100
prepared_for_class["labels"] = tf.convert_to_tensor(labels)
loss = model(model_input, **prepared_for_class)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
self.assertTrue(not np.any(np.isnan(loss.numpy())))
# Test that model correctly compute the loss with a dict
prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
loss = model(prepared_for_class)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
# Test that model correctly compute the loss with a tuple
prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
# Get keys that were added with the _prepare_for_class function
label_keys = prepared_for_class.keys() - inputs_dict.keys()
signature = inspect.signature(model.call).parameters
signature_names = list(signature.keys())
# Create a dictionary holding the location of the tensors in the tuple
tuple_index_mapping = {0: input_name}
for label_key in label_keys:
label_key_index = signature_names.index(label_key)
tuple_index_mapping[label_key_index] = label_key
sorted_tuple_index_mapping = sorted(tuple_index_mapping.items())
# Initialize a list with their default values, update the values and convert to a tuple
list_input = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default)
for index, value in sorted_tuple_index_mapping:
list_input[index] = prepared_for_class[value]
tuple_input = tuple(list_input)
# Send to model
loss = model(tuple_input[:-1])[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
def check_keras_fit_results(self, val_loss1, val_loss2, atol=1e-2, rtol=1e-3):
self.assertTrue(np.allclose(val_loss1, val_loss2, atol=atol, rtol=rtol))
@slow
def test_keras_fit(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
# Test that model correctly compute the loss with kwargs
prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
# We also remove "return_loss" as this is covered by the train_step when using fit()
prepared_for_class = {
key: val
for key, val in prepared_for_class.items()
if key not in ("head_mask", "decoder_head_mask", "cross_attn_head_mask", "return_loss")
}
if "labels" in prepared_for_class and "decoder_input_ids" in prepared_for_class:
del prepared_for_class["decoder_input_ids"]
accuracy_classes = [
"ForPreTraining",
"ForCausalLM",
"ForMaskedLM",
"ForQuestionAnswering",
"ForMultipleChoice",
"ForSequenceClassification",
"ForTokenClassification",
"ForNextSentencePrediction",
"LMHeadModel",
]
for accuracy_class in accuracy_classes:
if model.__class__.__name__.endswith(accuracy_class):
metrics = [tf.keras.metrics.SparseCategoricalAccuracy()]
break
else:
metrics = []
if hasattr(self.model_tester, "batch_size"):
sample_weight = tf.convert_to_tensor([0.5] * self.model_tester.batch_size, dtype=tf.float32)
else:
sample_weight = None
# Build the model so we can get some constant weights and check outputs
outputs = model(prepared_for_class)
if getattr(outputs, "loss", None) is None:
continue
model_weights = model.get_weights()
# Run eagerly to save some expensive compilation times
model.compile(optimizer=tf.keras.optimizers.SGD(0.0), run_eagerly=True, metrics=metrics)
# Make sure the model fits without crashing regardless of where we pass the labels
history1 = model.fit(
prepared_for_class,
validation_data=prepared_for_class,
sample_weight=sample_weight,
steps_per_epoch=1,
validation_steps=1,
shuffle=False,
)
val_loss1 = history1.history["val_loss"][0]
self.assertTrue(not isnan(val_loss1))
accuracy1 = {key: val[0] for key, val in history1.history.items() if key.endswith("accuracy")}
possible_label_cols = {
"labels",
"label",
"label_ids",
"start_positions",
"start_position",
"end_positions",
"end_position",
"next_sentence_label",
}
label_names = possible_label_cols.intersection(set(prepared_for_class))
if len(label_names) == 0:
# The next tests only make sense for models with separate inputs and labels, and do not make
# sense for models that don't clearly distinguish between the two (e.g. CLIP)
return
labels = {key: val for key, val in prepared_for_class.items() if key in label_names}
inputs_minus_labels = {key: val for key, val in prepared_for_class.items() if key not in label_names}
self.assertGreater(len(inputs_minus_labels), 0)
# We reinitialize the model here even though our learning rate was zero
# because BatchNorm updates weights by means other than gradient descent.
model.set_weights(model_weights)
history2 = model.fit(
inputs_minus_labels,
labels,
validation_data=(inputs_minus_labels, labels),
sample_weight=sample_weight,
steps_per_epoch=1,
validation_steps=1,
shuffle=False,
)
val_loss2 = history2.history["val_loss"][0]
self.assertTrue(not isnan(val_loss2))
accuracy2 = {key: val[0] for key, val in history2.history.items() if key.endswith("accuracy")}
self.check_keras_fit_results(val_loss1, val_loss2)
self.assertEqual(history1.history.keys(), history2.history.keys())
for key in history1.history.keys():
if not key.startswith("val_"):
self.assertTrue("val_" + key in history1.history.keys(), "Outputs differ in train/test step!")
if metrics:
self.assertTrue(len(accuracy1) == len(accuracy2) > 0, "Missing metrics!")
def test_int_support(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
prepared_for_class = self._prepare_for_class(
inputs_dict.copy(),
model_class,
return_labels=True if "labels" in inspect.signature(model_class.call).parameters.keys() else False,
)
if not any(
tensor.dtype.is_integer for tensor in prepared_for_class.values() if isinstance(tensor, tf.Tensor)
):
return # No integer inputs means no need for this test
prepared_for_class = {
key: tf.cast(tensor, tf.int64) if isinstance(tensor, tf.Tensor) and tensor.dtype.is_integer else tensor
for key, tensor in prepared_for_class.items()
}
model = model_class(config)
model(**prepared_for_class) # No assertion, we're just checking this doesn't throw an error
int32_prepared_for_class = {
key: tf.cast(tensor, tf.int32) if isinstance(tensor, tf.Tensor) and tensor.dtype.is_integer else tensor
for key, tensor in prepared_for_class.items()
}
model(**int32_prepared_for_class) # No assertion, we're just checking this doesn't throw an error
# After testing that the model accepts all int inputs, confirm that its dummies are int32
for key, tensor in model.dummy_inputs.items():
self.assertTrue(
isinstance(tensor, tf.Tensor) or tf.keras.backend.is_keras_tensor(tensor),
"Dummy inputs should be tf.Tensor!",
)
if tensor.dtype.is_integer:
self.assertTrue(tensor.dtype == tf.int32, "Integer dummy inputs should be tf.int32!")
# Also confirm that the input_signature uses int32
for key, tensor_spec in model.input_signature.items():
if tensor_spec.dtype.is_integer:
self.assertTrue(tensor_spec.dtype == tf.int32, "Input signatures should use tf.int32 for ints!")
def test_generate_with_headmasking(self):
attention_names = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_generative_model_classes:
model = model_class(config)
# We want to test only encoder-decoder models
if not config.is_encoder_decoder:
continue
head_masking = {
"head_mask": tf.zeros((config.encoder_layers, config.encoder_attention_heads)),
"decoder_head_mask": tf.zeros((config.decoder_layers, config.decoder_attention_heads)),
"cross_attn_head_mask": tf.zeros((config.decoder_layers, config.decoder_attention_heads)),
}
signature = inspect.signature(model.call)
if set(head_masking.keys()) < {*signature.parameters.keys()}:
continue
for attn_name, (name, mask) in zip(attention_names, head_masking.items()):
out = model.generate(
inputs_dict["input_ids"],
num_beams=1,
max_length=inputs_dict["input_ids"] + 5,
output_attentions=True,
return_dict_in_generate=True,
**{name: mask},
)
# We check the state of decoder_attentions and cross_attentions just from the last step
attn_weights = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([tf.reduce_sum(w).numpy() for w in attn_weights]), 0.0)
def test_load_with_mismatched_shapes(self):
if not self.test_mismatched_shapes:
return
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
if model_class not in get_values(TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING):
continue
with self.subTest(msg=f"Testing {model_class}"):
with tempfile.TemporaryDirectory() as tmp_dir:
model = model_class(config)
inputs = self._prepare_for_class(inputs_dict, model_class)
_ = model(**inputs)
model.save_pretrained(tmp_dir)
# Fails when we don't set ignore_mismatched_sizes=True
with self.assertRaises(ValueError):
new_model = TFAutoModelForSequenceClassification.from_pretrained(tmp_dir, num_labels=42)
with self.assertRaises(ValueError):
new_model_without_prefix = TFAutoModel.from_pretrained(tmp_dir, vocab_size=10)
logger = logging.get_logger("transformers.modeling_tf_utils")
with CaptureLogger(logger) as cl:
new_model = TFAutoModelForSequenceClassification.from_pretrained(
tmp_dir, num_labels=42, ignore_mismatched_sizes=True
)
self.assertIn("the shapes did not match", cl.out)
logits = new_model(**inputs).logits
self.assertEqual(logits.shape[1], 42)
with CaptureLogger(logger) as cl:
new_model_without_prefix = TFAutoModel.from_pretrained(
tmp_dir, vocab_size=10, ignore_mismatched_sizes=True
)
self.assertIn("the shapes did not match", cl.out)
# Although Tf models always have a prefix pointing to `MainLayer`,
# we still add this "without prefix" test to keep a consistency between tf and pt tests.
input_ids = ids_tensor((2, 8), 10)
if self.is_encoder_decoder:
new_model_without_prefix(input_ids, decoder_input_ids=input_ids)
else:
new_model_without_prefix(input_ids)
def test_model_main_input_name(self):
for model_class in self.all_model_classes:
model_signature = inspect.signature(getattr(model_class, "call"))
# The main input is the name of the argument after `self`
observed_main_input_name = list(model_signature.parameters.keys())[1]
self.assertEqual(model_class.main_input_name, observed_main_input_name)
def test_dataset_conversion(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
tf_inputs_dict = self._prepare_for_class(inputs_dict, model_class, return_labels=False)
if "labels" in tf_inputs_dict:
return # This is some kinda funky decoder model that needs labels in its forward pass
tf_inputs_dict = {
key: val
for key, val in tf_inputs_dict.items()
if "head_mask" not in key and isinstance(val, tf.Tensor)
}
tf_inputs_dict["extra_unwanted_column"] = list(tf_inputs_dict.values())[0] # Use a random other tensor
input_dataset = Dataset.from_dict(tf_inputs_dict)
tf_dataset = model.prepare_tf_dataset(
input_dataset, batch_size=len(input_dataset), drop_remainder=False, shuffle=False
)
test_batch = next(iter(tf_dataset))
if isinstance(test_batch, tf.Tensor):
self.assertEqual(len(test_batch), len(input_dataset)) # Assert we didn't lose any data
elif isinstance(test_batch, dict):
# Assert we discarded the unwanted extra column but kept everything else
self.assertEqual(len(test_batch), len(input_dataset.features) - 1)
self.assertNotIn("extra_unwanted_column", test_batch)
for tensor in test_batch.values():
self.assertTrue(isinstance(tensor, tf.Tensor))
self.assertEqual(len(tensor), len(input_dataset)) # Assert we didn't lose any data
model(test_batch, training=False)
if "labels" in inspect.signature(model_class.call).parameters.keys():
tf_inputs_dict = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
if "labels" not in tf_inputs_dict:
return # This model isn't giving us labels after all, don't try training with it
tf_inputs_dict = {key: val for key, val in tf_inputs_dict.items() if "head_mask" not in key}
tf_inputs_dict["extra_unwanted_column"] = list(tf_inputs_dict.values())[0] # Use a random other tensor
input_dataset = Dataset.from_dict(tf_inputs_dict)
tf_dataset = model.prepare_tf_dataset(
input_dataset, batch_size=len(input_dataset), drop_remainder=False, shuffle=False
)
test_batch, test_batch_labels = next(iter(tf_dataset))
self.assertGreater(len(test_batch_labels), 0) # Assert the labels are present
feature_columns = 1 if isinstance(test_batch, tf.Tensor) else len(test_batch)
label_columns = 1 if isinstance(test_batch_labels, tf.Tensor) else len(test_batch_labels)
# Assert we discarded the unwanted extra column but kept everything else
self.assertEqual(feature_columns + label_columns, len(input_dataset.features) - 1)
if isinstance(test_batch, dict):
self.assertNotIn("extra_unwanted_column", test_batch)
if isinstance(test_batch_labels, dict):
self.assertNotIn("extra_unwanted_column", test_batch_labels)
model.compile(optimizer="sgd", run_eagerly=True)
model.train_on_batch(test_batch, test_batch_labels)
def _test_xla_generate(self, **generate_kwargs):
def _generate_and_check_results(model, inputs_dict):
if "input_ids" in inputs_dict:
inputs = inputs_dict["input_ids"]
# make sure there are no pad tokens in prompt, which may trigger unwanted behavior
if model.generation_config.pad_token_id is not None:
if config.pad_token_id == 0:
new_pad_token = model.generation_config.pad_token_id + 1
else:
new_pad_token = model.generation_config.pad_token_id - 1
else:
new_pad_token = None
inputs = tf.where(inputs != model.generation_config.pad_token_id, inputs, new_pad_token)
elif "input_features" in inputs_dict:
inputs = inputs_dict["input_features"]
else:
raise ValueError("No valid generate input found in inputs_dict")
generated = model.generate(inputs, **generate_kwargs).numpy()
generate_xla = tf.function(model.generate, jit_compile=True)
generated_xla = generate_xla(inputs, **generate_kwargs).numpy()
# Due to numerical instability, let's fail the test only if there are more than 10% of input sequences give
# different outputs between XLA and non-XLA versions. If there are less than 10 examples, let's be strict
# and not allow any difference.
diff = [[], []]
for _generated, _generated_xla in zip(generated.tolist(), generated_xla.tolist()):
if _generated != _generated_xla:
diff[0].append(_generated)
diff[1].append(_generated_xla)
ratio = len(diff[0]) / len(generated)
if ratio > 0.1 or (len(diff[0]) > 0 and len(generated) < 10):
self.assertListEqual(diff[0], diff[1])
for model_class in self.all_generative_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.eos_token_id = None # Generate until max length
config.do_sample = False
# fix config for models with additional sequence-length limiting settings
for var_name in ["max_position_embeddings", "max_target_positions"]:
attr = getattr(config, var_name, None)
if attr is not None and attr < generate_kwargs["max_new_tokens"]:
try:
setattr(config, var_name, generate_kwargs["max_new_tokens"])
except NotImplementedError:
# xlnet will raise an exception when trying to set
# max_position_embeddings.
pass
model = model_class(config)
if model.supports_xla_generation:
_generate_and_check_results(model, inputs_dict)
else:
with self.assertRaises(ValueError):
_generate_and_check_results(model, inputs_dict)
def test_xla_generate_fast(self):
"""
Basic quick test for generate-compatible classes that confirms that XLA-generated tokens are the same as their
non XLA counterparts.
Either the model supports XLA generation and passes the inner test, or it raises an appropriate exception
"""
self._test_xla_generate(num_beams=1, num_return_sequences=1, max_new_tokens=3)
@slow
def test_xla_generate_contrastive(self):
"""
Slow and challenging version of `test_xla_generate_fast` for contrastive search -- contrastive search directly
manipulates the model cache and other outputs, and this test ensures that they are in a valid format that is
also supported by XLA.
Either the model supports XLA generation and passes the inner test, or it raises an appropriate exception
"""
self._test_xla_generate(num_beams=1, num_return_sequences=1, max_new_tokens=16, penalty_alpha=0.5, top_k=4)
@slow
def test_xla_generate_slow(self):
"""
Slow and challenging version of `test_xla_generate_fast` -- this test asks for several long sequences using
beam search, with and without XLA. The two outputs should match, and a failure in this test indicates that the
model may need further analysis if it is to be used for XLA generation.
Either the model supports XLA generation and passes the inner test, or it raises an appropriate exception
"""
self._test_xla_generate(num_beams=8, num_return_sequences=2, max_new_tokens=128)
def _generate_random_bad_tokens(self, num_bad_tokens, model):
# special tokens cannot be bad tokens
special_tokens = []
if model.config.bos_token_id is not None:
special_tokens.append(model.config.bos_token_id)
if model.config.pad_token_id is not None:
special_tokens.append(model.config.pad_token_id)
if model.config.eos_token_id is not None:
special_tokens.append(model.config.eos_token_id)
# create random bad tokens that are not special tokens
bad_tokens = []
while len(bad_tokens) < num_bad_tokens:
token = tf.squeeze(ids_tensor((1, 1), self.model_tester.vocab_size), 0).numpy()[0]
if token not in special_tokens:
bad_tokens.append(token)
return bad_tokens
def _check_generated_ids(self, output_ids):
for token_id in output_ids[0].numpy().tolist():
self.assertGreaterEqual(token_id, 0)
self.assertLess(token_id, self.model_tester.vocab_size)
def _check_match_tokens(self, generated_ids, bad_words_ids):
# for all bad word tokens
for bad_word_ids in bad_words_ids:
# for all slices in batch
for generated_ids_slice in generated_ids:
# for all word idx
for i in range(len(bad_word_ids), len(generated_ids_slice)):
# if tokens match
if generated_ids_slice[i - len(bad_word_ids) : i] == bad_word_ids:
return True
return False
def ids_tensor(shape, vocab_size, rng=None, name=None, dtype=None):
"""Creates a random int32 tensor of the shape within the vocab size."""
if rng is None:
rng = random.Random()
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(rng.randint(0, vocab_size - 1))
output = tf.constant(values, shape=shape, dtype=dtype if dtype is not None else tf.int32)
return output
def random_attention_mask(shape, rng=None, name=None, dtype=None):
attn_mask = ids_tensor(shape, vocab_size=2, rng=None, name=None, dtype=dtype)
# make sure that at least one token is attended to for each batch
attn_mask = tf.concat([attn_mask[:, :-1], tf.ones_like(attn_mask[:, -1:], dtype=dtype)], axis=-1)
return attn_mask
def floats_tensor(shape, scale=1.0, rng=None, name=None, dtype=None):
"""Creates a random float32 tensor"""
if rng is None:
rng = random.Random()
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(rng.random() * scale)
return tf.reshape(tf.constant(values, dtype=dtype if dtype is not None else tf.float32), shape=shape)
| 0 |
hf_public_repos/transformers | hf_public_repos/transformers/tests/test_image_transforms.py | # coding=utf-8
# Copyright 2022 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from parameterized import parameterized
from transformers.testing_utils import require_flax, require_tf, require_torch, require_vision
from transformers.utils.import_utils import is_flax_available, is_tf_available, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
if is_flax_available():
import jax
if is_vision_available():
import PIL.Image
from transformers.image_transforms import (
center_crop,
center_to_corners_format,
convert_to_rgb,
corners_to_center_format,
flip_channel_order,
get_resize_output_image_size,
id_to_rgb,
normalize,
pad,
resize,
rgb_to_id,
to_channel_dimension_format,
to_pil_image,
)
def get_random_image(height, width, num_channels=3, channels_first=True):
shape = (num_channels, height, width) if channels_first else (height, width, num_channels)
random_array = np.random.randint(0, 256, shape, dtype=np.uint8)
return random_array
@require_vision
class ImageTransformsTester(unittest.TestCase):
@parameterized.expand(
[
("numpy_float_channels_first", (3, 4, 5), np.float32),
("numpy_float_channels_last", (4, 5, 3), np.float32),
("numpy_float_channels_first", (3, 4, 5), np.float64),
("numpy_float_channels_last", (4, 5, 3), np.float64),
("numpy_int_channels_first", (3, 4, 5), np.int32),
("numpy_uint_channels_first", (3, 4, 5), np.uint8),
]
)
@require_vision
def test_to_pil_image(self, name, image_shape, dtype):
image = np.random.randint(0, 256, image_shape).astype(dtype)
pil_image = to_pil_image(image)
self.assertIsInstance(pil_image, PIL.Image.Image)
self.assertEqual(pil_image.size, (5, 4))
# make sure image is correctly rescaled
self.assertTrue(np.abs(np.asarray(pil_image)).sum() > 0)
@parameterized.expand(
[
("numpy_float_channels_first", (3, 4, 5), np.float32),
("numpy_float_channels_first", (3, 4, 5), np.float64),
("numpy_float_channels_last", (4, 5, 3), np.float32),
("numpy_float_channels_last", (4, 5, 3), np.float64),
]
)
@require_vision
def test_to_pil_image_from_float(self, name, image_shape, dtype):
image = np.random.rand(*image_shape).astype(dtype)
pil_image = to_pil_image(image)
self.assertIsInstance(pil_image, PIL.Image.Image)
self.assertEqual(pil_image.size, (5, 4))
# make sure image is correctly rescaled
self.assertTrue(np.abs(np.asarray(pil_image)).sum() > 0)
# Make sure that an exception is raised if image is not in [0, 1]
image = np.random.randn(*image_shape).astype(dtype)
with self.assertRaises(ValueError):
to_pil_image(image)
@require_vision
def test_to_pil_image_from_mask(self):
# Make sure binary mask remains a binary mask
image = np.random.randint(0, 2, (3, 4, 5)).astype(np.uint8)
pil_image = to_pil_image(image)
self.assertIsInstance(pil_image, PIL.Image.Image)
self.assertEqual(pil_image.size, (5, 4))
np_img = np.asarray(pil_image)
self.assertTrue(np_img.min() == 0)
self.assertTrue(np_img.max() == 1)
image = np.random.randint(0, 2, (3, 4, 5)).astype(np.float32)
pil_image = to_pil_image(image)
self.assertIsInstance(pil_image, PIL.Image.Image)
self.assertEqual(pil_image.size, (5, 4))
np_img = np.asarray(pil_image)
self.assertTrue(np_img.min() == 0)
self.assertTrue(np_img.max() == 1)
@require_tf
def test_to_pil_image_from_tensorflow(self):
# channels_first
image = tf.random.uniform((3, 4, 5))
pil_image = to_pil_image(image)
self.assertIsInstance(pil_image, PIL.Image.Image)
self.assertEqual(pil_image.size, (5, 4))
# channels_last
image = tf.random.uniform((4, 5, 3))
pil_image = to_pil_image(image)
self.assertIsInstance(pil_image, PIL.Image.Image)
self.assertEqual(pil_image.size, (5, 4))
@require_torch
def test_to_pil_image_from_torch(self):
# channels first
image = torch.rand((3, 4, 5))
pil_image = to_pil_image(image)
self.assertIsInstance(pil_image, PIL.Image.Image)
self.assertEqual(pil_image.size, (5, 4))
# channels last
image = torch.rand((4, 5, 3))
pil_image = to_pil_image(image)
self.assertIsInstance(pil_image, PIL.Image.Image)
self.assertEqual(pil_image.size, (5, 4))
@require_flax
def test_to_pil_image_from_jax(self):
key = jax.random.PRNGKey(0)
# channel first
image = jax.random.uniform(key, (3, 4, 5))
pil_image = to_pil_image(image)
self.assertIsInstance(pil_image, PIL.Image.Image)
self.assertEqual(pil_image.size, (5, 4))
# channel last
image = jax.random.uniform(key, (4, 5, 3))
pil_image = to_pil_image(image)
self.assertIsInstance(pil_image, PIL.Image.Image)
self.assertEqual(pil_image.size, (5, 4))
def test_to_channel_dimension_format(self):
# Test that function doesn't reorder if channel dim matches the input.
image = np.random.rand(3, 4, 5)
image = to_channel_dimension_format(image, "channels_first")
self.assertEqual(image.shape, (3, 4, 5))
image = np.random.rand(4, 5, 3)
image = to_channel_dimension_format(image, "channels_last")
self.assertEqual(image.shape, (4, 5, 3))
# Test that function reorders if channel dim doesn't match the input.
image = np.random.rand(3, 4, 5)
image = to_channel_dimension_format(image, "channels_last")
self.assertEqual(image.shape, (4, 5, 3))
image = np.random.rand(4, 5, 3)
image = to_channel_dimension_format(image, "channels_first")
self.assertEqual(image.shape, (3, 4, 5))
# Can pass in input_data_format and works if data format is ambiguous or unknown.
image = np.random.rand(4, 5, 6)
image = to_channel_dimension_format(image, "channels_first", input_channel_dim="channels_last")
self.assertEqual(image.shape, (6, 4, 5))
def test_get_resize_output_image_size(self):
image = np.random.randint(0, 256, (3, 224, 224))
# Test the output size defaults to (x, x) if an int is given.
self.assertEqual(get_resize_output_image_size(image, 10), (10, 10))
self.assertEqual(get_resize_output_image_size(image, [10]), (10, 10))
self.assertEqual(get_resize_output_image_size(image, (10,)), (10, 10))
# Test the output size is the same as the input if a two element tuple/list is given.
self.assertEqual(get_resize_output_image_size(image, (10, 20)), (10, 20))
self.assertEqual(get_resize_output_image_size(image, [10, 20]), (10, 20))
self.assertEqual(get_resize_output_image_size(image, (10, 20), default_to_square=True), (10, 20))
# To match pytorch behaviour, max_size is only relevant if size is an int
self.assertEqual(get_resize_output_image_size(image, (10, 20), max_size=5), (10, 20))
# Test output size = (int(size * height / width), size) if size is an int and height > width
image = np.random.randint(0, 256, (3, 50, 40))
self.assertEqual(get_resize_output_image_size(image, 20, default_to_square=False), (25, 20))
# Test output size = (size, int(size * width / height)) if size is an int and width <= height
image = np.random.randint(0, 256, (3, 40, 50))
self.assertEqual(get_resize_output_image_size(image, 20, default_to_square=False), (20, 25))
# Test size is resized if longer size > max_size
image = np.random.randint(0, 256, (3, 50, 40))
self.assertEqual(get_resize_output_image_size(image, 20, default_to_square=False, max_size=22), (22, 17))
# Test output size = (int(size * height / width), size) if size is an int and height > width and
# input has 4 channels
image = np.random.randint(0, 256, (4, 50, 40))
self.assertEqual(
get_resize_output_image_size(image, 20, default_to_square=False, input_data_format="channels_first"),
(25, 20),
)
# Test correct channel dimension is returned if output size if height == 3
# Defaults to input format - channels first
image = np.random.randint(0, 256, (3, 18, 97))
resized_image = resize(image, (3, 20))
self.assertEqual(resized_image.shape, (3, 3, 20))
# Defaults to input format - channels last
image = np.random.randint(0, 256, (18, 97, 3))
resized_image = resize(image, (3, 20))
self.assertEqual(resized_image.shape, (3, 20, 3))
image = np.random.randint(0, 256, (3, 18, 97))
resized_image = resize(image, (3, 20), data_format="channels_last")
self.assertEqual(resized_image.shape, (3, 20, 3))
image = np.random.randint(0, 256, (18, 97, 3))
resized_image = resize(image, (3, 20), data_format="channels_first")
self.assertEqual(resized_image.shape, (3, 3, 20))
def test_resize(self):
image = np.random.randint(0, 256, (3, 224, 224))
# Check the channel order is the same by default
resized_image = resize(image, (30, 40))
self.assertIsInstance(resized_image, np.ndarray)
self.assertEqual(resized_image.shape, (3, 30, 40))
# Check channel order is changed if specified
resized_image = resize(image, (30, 40), data_format="channels_last")
self.assertIsInstance(resized_image, np.ndarray)
self.assertEqual(resized_image.shape, (30, 40, 3))
# Check PIL.Image.Image is returned if return_numpy=False
resized_image = resize(image, (30, 40), return_numpy=False)
self.assertIsInstance(resized_image, PIL.Image.Image)
# PIL size is in (width, height) order
self.assertEqual(resized_image.size, (40, 30))
# Check an image with float values between 0-1 is returned with values in this range
image = np.random.rand(3, 224, 224)
resized_image = resize(image, (30, 40))
self.assertIsInstance(resized_image, np.ndarray)
self.assertEqual(resized_image.shape, (3, 30, 40))
self.assertTrue(np.all(resized_image >= 0))
self.assertTrue(np.all(resized_image <= 1))
# Check that an image with 4 channels is resized correctly
image = np.random.randint(0, 256, (4, 224, 224))
resized_image = resize(image, (30, 40), input_data_format="channels_first")
self.assertIsInstance(resized_image, np.ndarray)
self.assertEqual(resized_image.shape, (4, 30, 40))
def test_normalize(self):
image = np.random.randint(0, 256, (224, 224, 3)) / 255
# Test that exception is raised if inputs are incorrect
# Not a numpy array image
with self.assertRaises(ValueError):
normalize(5, 5, 5)
# Number of mean values != number of channels
with self.assertRaises(ValueError):
normalize(image, mean=(0.5, 0.6), std=1)
# Number of std values != number of channels
with self.assertRaises(ValueError):
normalize(image, mean=1, std=(0.5, 0.6))
# Test result is correct - output data format is channels_first and normalization
# correctly computed
mean = (0.5, 0.6, 0.7)
std = (0.1, 0.2, 0.3)
expected_image = ((image - mean) / std).transpose((2, 0, 1))
normalized_image = normalize(image, mean=mean, std=std, data_format="channels_first")
self.assertIsInstance(normalized_image, np.ndarray)
self.assertEqual(normalized_image.shape, (3, 224, 224))
self.assertTrue(np.allclose(normalized_image, expected_image, atol=1e-6))
# Test image with 4 channels is normalized correctly
image = np.random.randint(0, 256, (224, 224, 4)) / 255
mean = (0.5, 0.6, 0.7, 0.8)
std = (0.1, 0.2, 0.3, 0.4)
expected_image = (image - mean) / std
self.assertTrue(
np.allclose(
normalize(image, mean=mean, std=std, input_data_format="channels_last"), expected_image, atol=1e-6
)
)
# Test float32 image input keeps float32 dtype
image = np.random.randint(0, 256, (224, 224, 3)).astype(np.float32) / 255
mean = (0.5, 0.6, 0.7)
std = (0.1, 0.2, 0.3)
expected_image = ((image - mean) / std).astype(np.float32)
normalized_image = normalize(image, mean=mean, std=std)
self.assertEqual(normalized_image.dtype, np.float32)
self.assertTrue(np.allclose(normalized_image, expected_image, atol=1e-6))
# Test float16 image input keeps float16 dtype
image = np.random.randint(0, 256, (224, 224, 3)).astype(np.float16) / 255
mean = (0.5, 0.6, 0.7)
std = (0.1, 0.2, 0.3)
# The mean and std are cast to match the dtype of the input image
cast_mean = np.array(mean, dtype=np.float16)
cast_std = np.array(std, dtype=np.float16)
expected_image = (image - cast_mean) / cast_std
normalized_image = normalize(image, mean=mean, std=std)
self.assertEqual(normalized_image.dtype, np.float16)
self.assertTrue(np.allclose(normalized_image, expected_image, atol=1e-6))
# Test int image input is converted to float32
image = np.random.randint(0, 2, (224, 224, 3), dtype=np.uint8)
mean = (0.5, 0.6, 0.7)
std = (0.1, 0.2, 0.3)
expected_image = (image.astype(np.float32) - mean) / std
normalized_image = normalize(image, mean=mean, std=std)
self.assertEqual(normalized_image.dtype, np.float32)
self.assertTrue(np.allclose(normalized_image, expected_image, atol=1e-6))
def test_center_crop(self):
image = np.random.randint(0, 256, (3, 224, 224))
# Test that exception is raised if inputs are incorrect
with self.assertRaises(ValueError):
center_crop(image, 10)
# Test result is correct - output data format is channels_first and center crop
# correctly computed
expected_image = image[:, 52:172, 82:142].transpose(1, 2, 0)
cropped_image = center_crop(image, (120, 60), data_format="channels_last")
self.assertIsInstance(cropped_image, np.ndarray)
self.assertEqual(cropped_image.shape, (120, 60, 3))
self.assertTrue(np.allclose(cropped_image, expected_image))
# Test that image is padded with zeros if crop size is larger than image size
expected_image = np.zeros((300, 260, 3))
expected_image[38:262, 18:242, :] = image.transpose((1, 2, 0))
cropped_image = center_crop(image, (300, 260), data_format="channels_last")
self.assertIsInstance(cropped_image, np.ndarray)
self.assertEqual(cropped_image.shape, (300, 260, 3))
self.assertTrue(np.allclose(cropped_image, expected_image))
# Test image with 4 channels is cropped correctly
image = np.random.randint(0, 256, (224, 224, 4))
expected_image = image[52:172, 82:142, :]
self.assertTrue(np.allclose(center_crop(image, (120, 60), input_data_format="channels_last"), expected_image))
def test_center_to_corners_format(self):
bbox_center = np.array([[10, 20, 4, 8], [15, 16, 3, 4]])
expected = np.array([[8, 16, 12, 24], [13.5, 14, 16.5, 18]])
self.assertTrue(np.allclose(center_to_corners_format(bbox_center), expected))
# Check that the function and inverse function are inverse of each other
self.assertTrue(np.allclose(corners_to_center_format(center_to_corners_format(bbox_center)), bbox_center))
def test_corners_to_center_format(self):
bbox_corners = np.array([[8, 16, 12, 24], [13.5, 14, 16.5, 18]])
expected = np.array([[10, 20, 4, 8], [15, 16, 3, 4]])
self.assertTrue(np.allclose(corners_to_center_format(bbox_corners), expected))
# Check that the function and inverse function are inverse of each other
self.assertTrue(np.allclose(center_to_corners_format(corners_to_center_format(bbox_corners)), bbox_corners))
def test_rgb_to_id(self):
# test list input
rgb = [125, 4, 255]
self.assertEqual(rgb_to_id(rgb), 16712829)
# test numpy array input
color = np.array(
[
[
[213, 54, 165],
[88, 207, 39],
[156, 108, 128],
],
[
[183, 194, 46],
[137, 58, 88],
[114, 131, 233],
],
]
)
expected = np.array([[10827477, 2608984, 8416412], [3064503, 5782153, 15303538]])
self.assertTrue(np.allclose(rgb_to_id(color), expected))
def test_id_to_rgb(self):
# test int input
self.assertEqual(id_to_rgb(16712829), [125, 4, 255])
# test array input
id_array = np.array([[10827477, 2608984, 8416412], [3064503, 5782153, 15303538]])
color = np.array(
[
[
[213, 54, 165],
[88, 207, 39],
[156, 108, 128],
],
[
[183, 194, 46],
[137, 58, 88],
[114, 131, 233],
],
]
)
self.assertTrue(np.allclose(id_to_rgb(id_array), color))
def test_pad(self):
# fmt: off
image = np.array([[
[0, 1],
[2, 3],
]])
# fmt: on
# Test that exception is raised if unknown padding mode is specified
with self.assertRaises(ValueError):
pad(image, 10, mode="unknown")
# Test that exception is raised if invalid padding is specified
with self.assertRaises(ValueError):
# Cannot pad on channel dimension
pad(image, (5, 10, 10))
# Test image is padded equally on all sides is padding is an int
# fmt: off
expected_image = np.array([
[[0, 0, 0, 0],
[0, 0, 1, 0],
[0, 2, 3, 0],
[0, 0, 0, 0]],
])
# fmt: on
self.assertTrue(np.allclose(expected_image, pad(image, 1)))
# Test the left and right of each axis is padded (pad_left, pad_right)
# fmt: off
expected_image = np.array(
[[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 2, 3, 0],
[0, 0, 0, 0, 0]])
# fmt: on
self.assertTrue(np.allclose(expected_image, pad(image, (2, 1))))
# Test only one axis is padded (pad_left, pad_right)
# fmt: off
expected_image = np.array([[
[9, 9],
[9, 9],
[0, 1],
[2, 3],
[9, 9]
]])
# fmt: on
self.assertTrue(np.allclose(expected_image, pad(image, ((2, 1), (0, 0)), constant_values=9)))
# Test padding with a constant value
# fmt: off
expected_image = np.array([[
[8, 8, 0, 1, 9],
[8, 8, 2, 3, 9],
[8, 8, 7, 7, 9],
[8, 8, 7, 7, 9]
]])
# fmt: on
self.assertTrue(np.allclose(expected_image, pad(image, ((0, 2), (2, 1)), constant_values=((6, 7), (8, 9)))))
# fmt: off
image = np.array([[
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
]])
# fmt: on
# Test padding with PaddingMode.REFLECT
# fmt: off
expected_image = np.array([[
[2, 1, 0, 1, 2, 1],
[5, 4, 3, 4, 5, 4],
[8, 7, 6, 7, 8, 7],
[5, 4, 3, 4, 5, 4],
[2, 1, 0, 1, 2, 1],
]])
# fmt: on
self.assertTrue(np.allclose(expected_image, pad(image, ((0, 2), (2, 1)), mode="reflect")))
# Test padding with PaddingMode.REPLICATE
# fmt: off
expected_image = np.array([[
[0, 0, 0, 1, 2, 2],
[3, 3, 3, 4, 5, 5],
[6, 6, 6, 7, 8, 8],
[6, 6, 6, 7, 8, 8],
[6, 6, 6, 7, 8, 8],
]])
# fmt: on
self.assertTrue(np.allclose(expected_image, pad(image, ((0, 2), (2, 1)), mode="replicate")))
# Test padding with PaddingMode.SYMMETRIC
# fmt: off
expected_image = np.array([[
[1, 0, 0, 1, 2, 2],
[4, 3, 3, 4, 5, 5],
[7, 6, 6, 7, 8, 8],
[7, 6, 6, 7, 8, 8],
[4, 3, 3, 4, 5, 5],
]])
# fmt: on
self.assertTrue(np.allclose(expected_image, pad(image, ((0, 2), (2, 1)), mode="symmetric")))
# Test we can specify the output data format
# Test padding with PaddingMode.REFLECT
# fmt: off
image = np.array([[
[0, 1],
[2, 3],
]])
expected_image = np.array([
[[0], [1], [0], [1], [0]],
[[2], [3], [2], [3], [2]],
[[0], [1], [0], [1], [0]],
[[2], [3], [2], [3], [2]]
])
# fmt: on
self.assertTrue(
np.allclose(expected_image, pad(image, ((0, 2), (2, 1)), mode="reflect", data_format="channels_last"))
)
# Test we can pad on an image with 2 channels
# fmt: off
image = np.array([
[[0, 1], [2, 3]],
])
expected_image = np.array([
[[0, 0], [0, 1], [2, 3]],
[[0, 0], [0, 0], [0, 0]],
])
# fmt: on
self.assertTrue(
np.allclose(
expected_image, pad(image, ((0, 1), (1, 0)), mode="constant", input_data_format="channels_last")
)
)
@require_vision
def test_convert_to_rgb(self):
# Test that an RGBA image is converted to RGB
image = np.array([[[1, 2, 3, 4], [5, 6, 7, 8]]], dtype=np.uint8)
pil_image = PIL.Image.fromarray(image)
self.assertEqual(pil_image.mode, "RGBA")
self.assertEqual(pil_image.size, (2, 1))
# For the moment, numpy images are returned as is
rgb_image = convert_to_rgb(image)
self.assertEqual(rgb_image.shape, (1, 2, 4))
self.assertTrue(np.allclose(rgb_image, image))
# And PIL images are converted
rgb_image = convert_to_rgb(pil_image)
self.assertEqual(rgb_image.mode, "RGB")
self.assertEqual(rgb_image.size, (2, 1))
self.assertTrue(np.allclose(np.array(rgb_image), np.array([[[1, 2, 3], [5, 6, 7]]], dtype=np.uint8)))
# Test that a grayscale image is converted to RGB
image = np.array([[0, 255]], dtype=np.uint8)
pil_image = PIL.Image.fromarray(image)
self.assertEqual(pil_image.mode, "L")
self.assertEqual(pil_image.size, (2, 1))
rgb_image = convert_to_rgb(pil_image)
self.assertEqual(rgb_image.mode, "RGB")
self.assertEqual(rgb_image.size, (2, 1))
self.assertTrue(np.allclose(np.array(rgb_image), np.array([[[0, 0, 0], [255, 255, 255]]], dtype=np.uint8)))
def test_flip_channel_order(self):
# fmt: off
img_channels_first = np.array([
[[ 0, 1, 2, 3],
[ 4, 5, 6, 7]],
[[ 8, 9, 10, 11],
[12, 13, 14, 15]],
[[16, 17, 18, 19],
[20, 21, 22, 23]],
])
# fmt: on
img_channels_last = np.moveaxis(img_channels_first, 0, -1)
# fmt: off
flipped_img_channels_first = np.array([
[[16, 17, 18, 19],
[20, 21, 22, 23]],
[[ 8, 9, 10, 11],
[12, 13, 14, 15]],
[[ 0, 1, 2, 3],
[ 4, 5, 6, 7]],
])
# fmt: on
flipped_img_channels_last = np.moveaxis(flipped_img_channels_first, 0, -1)
self.assertTrue(np.allclose(flip_channel_order(img_channels_first), flipped_img_channels_first))
self.assertTrue(
np.allclose(flip_channel_order(img_channels_first, "channels_last"), flipped_img_channels_last)
)
self.assertTrue(np.allclose(flip_channel_order(img_channels_last), flipped_img_channels_last))
self.assertTrue(
np.allclose(flip_channel_order(img_channels_last, "channels_first"), flipped_img_channels_first)
)
# Can flip when the image has 2 channels
# fmt: off
img_channels_first = np.array([
[[ 0, 1, 2, 3],
[ 4, 5, 6, 7]],
[[ 8, 9, 10, 11],
[12, 13, 14, 15]],
])
# fmt: on
flipped_img_channels_first = img_channels_first[::-1, :, :]
self.assertTrue(
np.allclose(
flip_channel_order(img_channels_first, input_data_format="channels_first"), flipped_img_channels_first
)
)
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/benchmark/test_benchmark.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class BenchmarkTest(unittest.TestCase):
def check_results_dict_not_empty(self, results):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"], model_result["ss"]):
result = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(result)
def test_inference_no_configs(self):
MODEL_ID = "sshleifer/tiny-gpt2"
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=False,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = PyTorchBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_inference_no_configs_only_pretrain(self):
MODEL_ID = "sgugger/tiny-distilbert-classification"
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=False,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
only_pretrain_model=True,
)
benchmark = PyTorchBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_inference_torchscript(self):
MODEL_ID = "sshleifer/tiny-gpt2"
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=False,
inference=True,
torchscript=True,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = PyTorchBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == "cpu", "Cant do half precision")
def test_inference_fp16(self):
MODEL_ID = "sshleifer/tiny-gpt2"
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=False,
inference=True,
fp16=True,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = PyTorchBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_inference_no_model_no_architectures(self):
MODEL_ID = "sshleifer/tiny-gpt2"
config = AutoConfig.from_pretrained(MODEL_ID)
# set architectures equal to `None`
config.architectures = None
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=True,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = PyTorchBenchmark(benchmark_args, configs=[config])
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_train_no_configs(self):
MODEL_ID = "sshleifer/tiny-gpt2"
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=True,
inference=False,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = PyTorchBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == "cpu", "Can't do half precision")
def test_train_no_configs_fp16(self):
MODEL_ID = "sshleifer/tiny-gpt2"
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=True,
inference=False,
sequence_lengths=[8],
batch_sizes=[1],
fp16=True,
multi_process=False,
)
benchmark = PyTorchBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def test_inference_with_configs(self):
MODEL_ID = "sshleifer/tiny-gpt2"
config = AutoConfig.from_pretrained(MODEL_ID)
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=False,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = PyTorchBenchmark(benchmark_args, configs=[config])
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_inference_encoder_decoder_with_configs(self):
MODEL_ID = "sshleifer/tinier_bart"
config = AutoConfig.from_pretrained(MODEL_ID)
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=False,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = PyTorchBenchmark(benchmark_args, configs=[config])
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_train_with_configs(self):
MODEL_ID = "sshleifer/tiny-gpt2"
config = AutoConfig.from_pretrained(MODEL_ID)
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=True,
inference=False,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = PyTorchBenchmark(benchmark_args, configs=[config])
results = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def test_train_encoder_decoder_with_configs(self):
MODEL_ID = "sshleifer/tinier_bart"
config = AutoConfig.from_pretrained(MODEL_ID)
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=True,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = PyTorchBenchmark(benchmark_args, configs=[config])
results = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def test_save_csv_files(self):
MODEL_ID = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=True,
inference=True,
save_to_csv=True,
sequence_lengths=[8],
batch_sizes=[1],
inference_time_csv_file=os.path.join(tmp_dir, "inf_time.csv"),
train_memory_csv_file=os.path.join(tmp_dir, "train_mem.csv"),
inference_memory_csv_file=os.path.join(tmp_dir, "inf_mem.csv"),
train_time_csv_file=os.path.join(tmp_dir, "train_time.csv"),
env_info_csv_file=os.path.join(tmp_dir, "env.csv"),
multi_process=False,
)
benchmark = PyTorchBenchmark(benchmark_args)
benchmark.run()
self.assertTrue(Path(os.path.join(tmp_dir, "inf_time.csv")).exists())
self.assertTrue(Path(os.path.join(tmp_dir, "train_time.csv")).exists())
self.assertTrue(Path(os.path.join(tmp_dir, "inf_mem.csv")).exists())
self.assertTrue(Path(os.path.join(tmp_dir, "train_mem.csv")).exists())
self.assertTrue(Path(os.path.join(tmp_dir, "env.csv")).exists())
def test_trace_memory(self):
MODEL_ID = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(summary):
self.assertTrue(hasattr(summary, "sequential"))
self.assertTrue(hasattr(summary, "cumulative"))
self.assertTrue(hasattr(summary, "current"))
self.assertTrue(hasattr(summary, "total"))
with tempfile.TemporaryDirectory() as tmp_dir:
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=True,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
log_filename=os.path.join(tmp_dir, "log.txt"),
log_print=True,
trace_memory_line_by_line=True,
multi_process=False,
)
benchmark = PyTorchBenchmark(benchmark_args)
result = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(tmp_dir, "log.txt")).exists())
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/benchmark/test_benchmark_tf.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class TFBenchmarkTest(unittest.TestCase):
def check_results_dict_not_empty(self, results):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"], model_result["ss"]):
result = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(result)
def test_inference_no_configs_eager(self):
MODEL_ID = "sshleifer/tiny-gpt2"
benchmark_args = TensorFlowBenchmarkArguments(
models=[MODEL_ID],
training=False,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
eager_mode=True,
multi_process=False,
)
benchmark = TensorFlowBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_inference_no_configs_only_pretrain(self):
MODEL_ID = "sgugger/tiny-distilbert-classification"
benchmark_args = TensorFlowBenchmarkArguments(
models=[MODEL_ID],
training=False,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
only_pretrain_model=True,
)
benchmark = TensorFlowBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_inference_no_configs_graph(self):
MODEL_ID = "sshleifer/tiny-gpt2"
benchmark_args = TensorFlowBenchmarkArguments(
models=[MODEL_ID],
training=False,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = TensorFlowBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_inference_with_configs_eager(self):
MODEL_ID = "sshleifer/tiny-gpt2"
config = AutoConfig.from_pretrained(MODEL_ID)
benchmark_args = TensorFlowBenchmarkArguments(
models=[MODEL_ID],
training=False,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
eager_mode=True,
multi_process=False,
)
benchmark = TensorFlowBenchmark(benchmark_args, [config])
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_inference_with_configs_graph(self):
MODEL_ID = "sshleifer/tiny-gpt2"
config = AutoConfig.from_pretrained(MODEL_ID)
benchmark_args = TensorFlowBenchmarkArguments(
models=[MODEL_ID],
training=False,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = TensorFlowBenchmark(benchmark_args, [config])
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_train_no_configs(self):
MODEL_ID = "sshleifer/tiny-gpt2"
benchmark_args = TensorFlowBenchmarkArguments(
models=[MODEL_ID],
training=True,
inference=False,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = TensorFlowBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def test_train_with_configs(self):
MODEL_ID = "sshleifer/tiny-gpt2"
config = AutoConfig.from_pretrained(MODEL_ID)
benchmark_args = TensorFlowBenchmarkArguments(
models=[MODEL_ID],
training=True,
inference=False,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = TensorFlowBenchmark(benchmark_args, [config])
results = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def test_inference_encoder_decoder_with_configs(self):
MODEL_ID = "patrickvonplaten/t5-tiny-random"
config = AutoConfig.from_pretrained(MODEL_ID)
benchmark_args = TensorFlowBenchmarkArguments(
models=[MODEL_ID],
training=False,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = TensorFlowBenchmark(benchmark_args, configs=[config])
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU")) == 0, "Cannot do xla on CPU.")
def test_inference_no_configs_xla(self):
MODEL_ID = "sshleifer/tiny-gpt2"
benchmark_args = TensorFlowBenchmarkArguments(
models=[MODEL_ID],
training=False,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
use_xla=True,
multi_process=False,
)
benchmark = TensorFlowBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_save_csv_files(self):
MODEL_ID = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
benchmark_args = TensorFlowBenchmarkArguments(
models=[MODEL_ID],
inference=True,
save_to_csv=True,
sequence_lengths=[8],
batch_sizes=[1],
inference_time_csv_file=os.path.join(tmp_dir, "inf_time.csv"),
inference_memory_csv_file=os.path.join(tmp_dir, "inf_mem.csv"),
env_info_csv_file=os.path.join(tmp_dir, "env.csv"),
multi_process=False,
)
benchmark = TensorFlowBenchmark(benchmark_args)
benchmark.run()
self.assertTrue(Path(os.path.join(tmp_dir, "inf_time.csv")).exists())
self.assertTrue(Path(os.path.join(tmp_dir, "inf_mem.csv")).exists())
self.assertTrue(Path(os.path.join(tmp_dir, "env.csv")).exists())
def test_trace_memory(self):
MODEL_ID = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(summary):
self.assertTrue(hasattr(summary, "sequential"))
self.assertTrue(hasattr(summary, "cumulative"))
self.assertTrue(hasattr(summary, "current"))
self.assertTrue(hasattr(summary, "total"))
with tempfile.TemporaryDirectory() as tmp_dir:
benchmark_args = TensorFlowBenchmarkArguments(
models=[MODEL_ID],
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
log_filename=os.path.join(tmp_dir, "log.txt"),
log_print=True,
trace_memory_line_by_line=True,
eager_mode=True,
multi_process=False,
)
benchmark = TensorFlowBenchmark(benchmark_args)
result = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
self.assertTrue(Path(os.path.join(tmp_dir, "log.txt")).exists())
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/deepspeed/ds_config_zero2.json | {
"fp16": {
"enabled": "auto",
"loss_scale": 0,
"loss_scale_window": 1000,
"initial_scale_power": 16,
"hysteresis": 2,
"min_loss_scale": 1
},
"bf16": {
"enabled": "auto"
},
"optimizer": {
"type": "AdamW",
"params": {
"lr": "auto",
"betas": "auto",
"eps": "auto",
"weight_decay": "auto"
}
},
"scheduler": {
"type": "WarmupLR",
"params": {
"warmup_min_lr": "auto",
"warmup_max_lr": "auto",
"warmup_num_steps": "auto"
}
},
"zero_optimization": {
"stage": 2,
"offload_optimizer": {
"device": "cpu",
"pin_memory": true
},
"allgather_partitions": true,
"allgather_bucket_size": 2e8,
"overlap_comm": true,
"reduce_scatter": true,
"reduce_bucket_size": 2e8,
"contiguous_gradients": true
},
"gradient_accumulation_steps": "auto",
"gradient_clipping": "auto",
"steps_per_print": 2000,
"train_batch_size": "auto",
"train_micro_batch_size_per_gpu": "auto",
"wall_clock_breakdown": false
}
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/deepspeed/ds_config_zero3.json | {
"fp16": {
"enabled": "auto",
"loss_scale": 0,
"loss_scale_window": 1000,
"initial_scale_power": 16,
"hysteresis": 2,
"min_loss_scale": 1
},
"bf16": {
"enabled": "auto"
},
"optimizer": {
"type": "AdamW",
"params": {
"lr": "auto",
"betas": "auto",
"eps": "auto",
"weight_decay": "auto"
}
},
"scheduler": {
"type": "WarmupLR",
"params": {
"warmup_min_lr": "auto",
"warmup_max_lr": "auto",
"warmup_num_steps": "auto"
}
},
"zero_optimization": {
"stage": 3,
"offload_optimizer": {
"device": "cpu",
"pin_memory": true
},
"offload_param": {
"device": "cpu",
"pin_memory": true
},
"overlap_comm": true,
"contiguous_gradients": true,
"sub_group_size": 1e9,
"reduce_bucket_size": "auto",
"stage3_prefetch_bucket_size": "auto",
"stage3_param_persistence_threshold": "auto",
"stage3_max_live_parameters": 1e9,
"stage3_max_reuse_distance": 1e9,
"stage3_gather_16bit_weights_on_model_save": true
},
"gradient_accumulation_steps": "auto",
"gradient_clipping": "auto",
"steps_per_print": 2000,
"train_batch_size": "auto",
"train_micro_batch_size_per_gpu": "auto",
"wall_clock_breakdown": false
}
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/deepspeed/test_deepspeed.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import io
import itertools
import json
import os
import unittest
from copy import deepcopy
from functools import partial
import datasets
from parameterized import parameterized
import tests.trainer.test_trainer
from tests.trainer.test_trainer import TrainerIntegrationCommon # noqa
from transformers import AutoModel, TrainingArguments, is_torch_available, logging
from transformers.integrations.deepspeed import (
HfDeepSpeedConfig,
is_deepspeed_available,
unset_hf_deepspeed_config,
)
from transformers.testing_utils import (
CaptureLogger,
CaptureStd,
CaptureStderr,
LoggingLevel,
TestCasePlus,
backend_device_count,
execute_subprocess_async,
mockenv_context,
require_deepspeed,
require_optuna,
require_torch_accelerator,
require_torch_multi_accelerator,
slow,
torch_device,
)
from transformers.trainer_utils import get_last_checkpoint, set_seed
from transformers.utils import SAFE_WEIGHTS_NAME, is_torch_bf16_available_on_device
if is_torch_available():
from tests.trainer.test_trainer import ( # noqa
RegressionModelConfig,
RegressionPreTrainedModel,
)
# hack to restore original logging level pre #21700
get_regression_trainer = partial(tests.trainer.test_trainer.get_regression_trainer, log_level="info")
set_seed(42)
# default torch.distributed port
DEFAULT_MASTER_PORT = "10999"
T5_SMALL = "t5-small"
T5_TINY = "patrickvonplaten/t5-tiny-random"
GPT2_TINY = "sshleifer/tiny-gpt2"
def load_json(path):
with open(path) as f:
return json.load(f)
def get_master_port(real_launcher=False):
"""
When using a single gpu launcher emulation (i.e. not deepspeed or python -m torch.distributed)
the issue is that once the port is tied it can't be used anywhere else outside of this process,
since torch.dist doesn't free the port until the process exits. Therefore for the sake of being
able to run both emulated launcher and normal launcher tests we need 2 distinct ports.
This function will give the right port in the right context. For real launcher it'll give the
base port, for emulated launcher it'll give the base port + 1. In both cases a string is
returned.
Args:
`real_launcher`: whether a real launcher is going to be used, or the emulated one
"""
master_port_base = os.environ.get("DS_TEST_PORT", DEFAULT_MASTER_PORT)
if not real_launcher:
master_port_base = str(int(master_port_base) + 1)
return master_port_base
def require_deepspeed_aio(test_case):
"""
Decorator marking a test that requires deepspeed aio (nvme)
"""
if not is_deepspeed_available():
return unittest.skip("test requires deepspeed")(test_case)
import deepspeed
from deepspeed.ops.aio import AsyncIOBuilder
if not deepspeed.ops.__compatible_ops__[AsyncIOBuilder.NAME]:
return unittest.skip("test requires deepspeed async-io")(test_case)
else:
return test_case
if is_deepspeed_available():
from deepspeed.utils import logger as deepspeed_logger # noqa
from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
from transformers.integrations.deepspeed import deepspeed_config, is_deepspeed_zero3_enabled # noqa
def get_launcher(distributed=False):
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
num_gpus = min(2, backend_device_count(torch_device)) if distributed else 1
master_port = get_master_port(real_launcher=True)
return f"deepspeed --num_nodes 1 --num_gpus {num_gpus} --master_port {master_port}".split()
ZERO2 = "zero2"
ZERO3 = "zero3"
FP16 = "fp16"
BF16 = "bf16"
HF_OPTIM = "hf_optim"
HF_SCHEDULER = "hf_scheduler"
DS_OPTIM = "ds_optim"
DS_SCHEDULER = "ds_scheduler"
optims = [HF_OPTIM, DS_OPTIM]
schedulers = [HF_SCHEDULER, DS_SCHEDULER]
stages = [ZERO2, ZERO3]
if is_torch_bf16_available_on_device(torch_device):
dtypes = [FP16, BF16]
else:
dtypes = [FP16]
def parameterized_custom_name_func(func, param_num, param):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
param_based_name = parameterized.to_safe_name("_".join(str(x) for x in param.args))
return f"{func.__name__}_{param_based_name}"
# Cartesian-product of zero stages with models to test
params = list(itertools.product(stages, dtypes))
params_with_optims_and_schedulers = list(itertools.product(stages, dtypes, optims, schedulers))
@require_deepspeed
@require_torch_accelerator
class CoreIntegrationDeepSpeed(TestCasePlus, TrainerIntegrationCommon):
"""
Testing non-Trainer DeepSpeed integration
"""
def setUp(self):
super().setUp()
master_port = get_master_port(real_launcher=False)
self.dist_env_1_gpu = {
"MASTER_ADDR": "localhost",
"MASTER_PORT": master_port,
"RANK": "0",
"LOCAL_RANK": "0",
"WORLD_SIZE": "1",
}
def tearDown(self):
super().tearDown()
# reset the ds config global so that tests state doesn't leak
unset_hf_deepspeed_config()
def test_init_zero3_fp16(self):
# test that zero.Init() works correctly under zero3/fp16
ds_config = {
"train_batch_size": 1,
"zero_optimization": {
"stage": 3,
},
}
dschf = HfDeepSpeedConfig(ds_config)
self.assertTrue(dschf.is_zero3())
self.assertTrue(is_deepspeed_zero3_enabled())
with LoggingLevel(logging.INFO):
with mockenv_context(**self.dist_env_1_gpu):
logger = logging.get_logger("transformers.modeling_utils")
with CaptureLogger(logger) as cl:
AutoModel.from_pretrained(T5_TINY)
self.assertIn("Detected DeepSpeed ZeRO-3", cl.out)
# now remove zero optimization
del ds_config["zero_optimization"]
dschf = HfDeepSpeedConfig(ds_config)
self.assertFalse(dschf.is_zero3())
self.assertFalse(is_deepspeed_zero3_enabled())
with LoggingLevel(logging.INFO):
with mockenv_context(**self.dist_env_1_gpu):
logger = logging.get_logger("transformers.modeling_utils")
with CaptureLogger(logger) as cl:
AutoModel.from_pretrained(T5_TINY)
self.assertNotIn("Detected DeepSpeed ZeRO-3", cl.out)
class TrainerIntegrationDeepSpeedWithCustomConfig(TestCasePlus):
def setUp(self):
super().setUp()
args = TrainingArguments(".")
self.n_epochs = args.num_train_epochs
self.batch_size = args.train_batch_size
master_port = get_master_port(real_launcher=False)
self.dist_env_1_gpu = {
"MASTER_ADDR": "localhost",
"MASTER_PORT": master_port,
"RANK": "0",
"LOCAL_RANK": "0",
"WORLD_SIZE": "1",
}
self.ds_config_file = {
"zero2": f"{self.test_file_dir_str}/ds_config_zero2.json",
"zero3": f"{self.test_file_dir_str}/ds_config_zero3.json",
}
# use self.get_config_dict(stage) to use these to ensure the original is not modified
with io.open(self.ds_config_file[ZERO2], "r", encoding="utf-8") as f:
config_zero2 = json.load(f)
with io.open(self.ds_config_file[ZERO3], "r", encoding="utf-8") as f:
config_zero3 = json.load(f)
# The following setting slows things down, so don't enable it by default unless needed by a test.
# It's in the file as a demo for users since we want everything to work out of the box even if slower.
config_zero3["zero_optimization"]["stage3_gather_16bit_weights_on_model_save"] = False
self.ds_config_dict = {
"zero2": config_zero2,
"zero3": config_zero3,
}
def tearDown(self):
super().tearDown()
# reset the ds config global so that tests state doesn't leak
unset_hf_deepspeed_config()
def get_config_dict(self, stage):
# As some tests modify the dict, always make a copy
return deepcopy(self.ds_config_dict[stage])
@require_deepspeed
@require_torch_accelerator
class TrainerIntegrationDeepSpeed(TrainerIntegrationDeepSpeedWithCustomConfig, TrainerIntegrationCommon):
"""
This class is for testing directly via get_regression_trainer
It mixes in `TrainerIntegrationCommon` which already has a lot of helper validation methods
which we can re-use here.
Important: this class' setup can only work with a single gpu because it runs within the current
pytest worker. For multi-gpu tests use TestDeepSpeedWithLauncher.
Note: if any of the tests of this class get run there will be at least one gpu occupied by them
until this pytest worker exits. This is because the gpu memory allocated by the cuda-kernels
won't be released until this pytest worker exits.
This may appear as some run-away tests if you watch `nvidia-smi` while other tests that fork new
processes are run. So there will be one or two "stale" processes reported in `nvidia-smi`. This
is not a bug.
"""
# --- These tests are enough to run on one of zero stages --- #
def test_hf_ds_config_mismatch(self):
ds_config = self.get_config_dict(ZERO2)
# Purposefully configure these values to mismatch TrainingArguments values.
# This currently doesn't cover all keys (but it could)
per_device_train_batch_size = 2
ds_config["train_micro_batch_size_per_gpu"] = per_device_train_batch_size + 2
ds_config["train_batch_size"] = 1000
gradient_accumulation_steps = 2
ds_config["gradient_accumulation_steps"] = gradient_accumulation_steps + 2
max_grad_norm = 1.0
ds_config["gradient_clipping"] = max_grad_norm + 0.1
adam_beta1, adam_beta2 = 0.9, 0.99
ds_config["optimizer"]["params"]["betas"] = [adam_beta1 - 0.1, adam_beta2 - 0.1]
fp16 = True
ds_config["fp16"]["enabled"] = not fp16
keys = [
"per_device_train_batch_size",
"train_batch_size",
"gradient_accumulation_steps",
"max_grad_norm",
"betas",
"fp16",
]
with mockenv_context(**self.dist_env_1_gpu):
trainer = get_regression_trainer(
local_rank=0,
fp16=fp16,
deepspeed=ds_config,
per_device_train_batch_size=per_device_train_batch_size,
gradient_accumulation_steps=gradient_accumulation_steps,
max_grad_norm=max_grad_norm,
adam_beta1=adam_beta1,
adam_beta2=adam_beta2,
)
with self.assertRaises(Exception) as context:
trainer.train()
for key in keys:
self.assertTrue(
key in str(context.exception),
f"{key} is not in the exception message:\n{context.exception}",
)
# Test various combos
# 1. DS scheduler + DS optimizer: this is already tested by most other tests
# 2. HF scheduler + HF optimizer:
# 3. DS scheduler + HF optimizer:
# 4. HF scheduler + DS optimizer:
def test_hf_scheduler_hf_optimizer(self):
a = 0
with mockenv_context(**self.dist_env_1_gpu):
ds_config_zero2_dict = self.get_config_dict(ZERO2)
del ds_config_zero2_dict["optimizer"] # force default HF Trainer optimizer
del ds_config_zero2_dict["scheduler"] # force default HF Trainer scheduler
ds_config_zero2_dict["zero_optimization"]["offload_optimizer"]["device"] = "none"
ds_config_zero2_dict["fp16"]["initial_scale_power"] = 1 # force optimizer on the first step
trainer = get_regression_trainer(a=a, local_rank=0, fp16=True, deepspeed=ds_config_zero2_dict)
trainer.train()
new_a = trainer.model.a.item()
self.assertNotEqual(new_a, a)
def test_ds_scheduler_hf_optimizer(self):
a = 0
with mockenv_context(**self.dist_env_1_gpu):
ds_config_zero2_dict = self.get_config_dict(ZERO2)
del ds_config_zero2_dict["optimizer"] # force default HF Trainer optimizer
ds_config_zero2_dict["zero_optimization"]["offload_optimizer"]["device"] = "none"
ds_config_zero2_dict["fp16"]["initial_scale_power"] = 1 # force optimizer on the first step
trainer = get_regression_trainer(a=a, local_rank=0, fp16=True, deepspeed=ds_config_zero2_dict)
trainer.train()
new_a = trainer.model.a.item()
self.assertNotEqual(new_a, a)
def test_hf_scheduler_ds_optimizer(self):
a = 0
with mockenv_context(**self.dist_env_1_gpu):
ds_config_zero2_dict = self.get_config_dict(ZERO2)
del ds_config_zero2_dict["scheduler"] # force default HF Trainer scheduler
ds_config_zero2_dict["zero_optimization"]["offload_optimizer"]["device"] = "none"
ds_config_zero2_dict["fp16"]["initial_scale_power"] = 1 # force optimizer on the first step
trainer = get_regression_trainer(a=a, local_rank=0, fp16=True, deepspeed=ds_config_zero2_dict)
trainer.train()
new_a = trainer.model.a.item()
self.assertNotEqual(new_a, a)
@require_deepspeed_aio
def test_stage3_nvme_offload(self):
with mockenv_context(**self.dist_env_1_gpu):
# this actually doesn't have to be on NVMe, any storage will do since this test only
# runs a simple check that we can use some directory as if it were NVMe
nvme_path = self.get_auto_remove_tmp_dir()
nvme_config = {"device": "nvme", "nvme_path": nvme_path}
ds_config_zero3_dict = self.get_config_dict(ZERO3)
ds_config_zero3_dict["zero_optimization"]["offload_optimizer"] = nvme_config
ds_config_zero3_dict["zero_optimization"]["offload_param"] = nvme_config
trainer = get_regression_trainer(local_rank=0, fp16=True, deepspeed=ds_config_zero3_dict)
with CaptureLogger(deepspeed_logger) as cl:
trainer.train()
self.assertIn("DeepSpeed info", cl.out, "expected DeepSpeed logger output but got none")
@require_optuna
def test_hyperparameter_search(self):
with mockenv_context(**self.dist_env_1_gpu):
ds_config_zero3_dict = self.get_config_dict(ZERO3)
# hyperparameter_search requires model_init() to recreate the model for each trial
def model_init():
config = RegressionModelConfig(a=0, b=0, double_output=False)
model = RegressionPreTrainedModel(config)
return model
trainer = get_regression_trainer(
local_rank=0,
fp16=True,
model_init=model_init,
deepspeed=ds_config_zero3_dict,
)
n_trials = 3
with CaptureLogger(deepspeed_logger) as cl:
with CaptureStd() as cs:
trainer.hyperparameter_search(direction="maximize", n_trials=n_trials)
self.assertIn("DeepSpeed info", cl.out, "expected DeepSpeed logger output but got none")
self.assertIn(f"Trial {n_trials-1} finished with value", cs.err, "expected hyperparameter_search output")
self.assertIn("Best is trial", cs.err, "expected hyperparameter_search output")
# --- These tests need to run on both zero stages --- #
@parameterized.expand(params, name_func=parameterized_custom_name_func)
def test_hf_optimizer_with_offload(self, stage, dtype):
# non-DS optimizers can be used with ZERO-offload (as long as they have both CPU and GPU implementation (except LAMB))
ds_config_dict = self.get_config_dict(stage)
del ds_config_dict["optimizer"] # force default HF Trainer optimizer
# force cpu offload
ds_config_dict["zero_optimization"]["offload_optimizer"]["device"] = "cpu"
ds_config_dict["zero_force_ds_cpu_optimizer"] = False # offload is not efficient w/o CPUAdam
with mockenv_context(**self.dist_env_1_gpu):
kwargs = {"local_rank": 0, "deepspeed": ds_config_dict}
kwargs[dtype] = True
trainer = get_regression_trainer(**kwargs)
with CaptureLogger(deepspeed_logger) as cl:
trainer.train()
self.assertIn("DeepSpeed info", cl.out, "expected DeepSpeed logger output but got none")
@parameterized.expand(params, name_func=parameterized_custom_name_func)
def test_fake_notebook_no_launcher(self, stage, dtype):
# this setup emulates a notebook where a launcher needs to be emulated by hand
# note that unittest resets sys.stdout each test, so `CaptureStd` will work here to capture
# DeepSpeed log if this test happens to run first in this pytest worker. But it will fail if
# it's run not as a first test as `sys.stdout` will no longer be the same. So we either have
# to reset `deepspeed_logger.handlers[0].setStream(sys.stdout)` or directly capture from the deepspeed_logger.
with mockenv_context(**self.dist_env_1_gpu):
kwargs = {"local_rank": 0, "deepspeed": self.get_config_dict(stage)}
kwargs[dtype] = True
trainer = get_regression_trainer(**kwargs)
with CaptureLogger(deepspeed_logger) as cl:
trainer.train()
self.assertIn("DeepSpeed info", cl.out, "expected DeepSpeed logger output but got none")
@parameterized.expand(params, name_func=parameterized_custom_name_func)
def test_early_get_last_lr(self, stage, dtype):
# with deepspeed's fp16 and dynamic loss scale enabled the optimizer/scheduler steps may
# not run for the first few dozen steps while loss scale is too large, and thus during
# that time `get_last_lr` will fail if called during that warm up stage,
#
# setting `logging_steps=1` forces an early `trainer._maybe_log_save_evaluate()` which calls
# `self.lr_scheduler.get_last_lr()` and originally it'd fail on the very first step.
with mockenv_context(**self.dist_env_1_gpu):
a = b = 0.0
kwargs = {
"a": a,
"b": b,
"local_rank": 0,
"train_len": 8,
"deepspeed": self.get_config_dict(stage),
"per_device_train_batch_size": 8,
"logging_steps": 1,
}
kwargs[dtype] = True
trainer = get_regression_trainer(**kwargs)
trainer.train()
post_train_a = trainer.model.a.item()
# XXX: for some reason the following check fails with zero3/fp16 and any/bf16 - not a
# broken but a different qualitative outcome - as if optimizer did run
# oddly getting 1.0 for both a and b from 0.0 - there is a bug somewhere
# print(trainer.model.a.item())
# print(trainer.model.b.item())
# need to investigate at some point
if (stage == ZERO3 and dtype == FP16) or (dtype == BF16):
return
# it's enough that train didn't fail for this test, but we must check that
# optimizer/scheduler didn't run (since if it did this test isn't testing the right thing)
self.assertEqual(post_train_a, a)
@parameterized.expand(params, name_func=parameterized_custom_name_func)
def test_gradient_accumulation(self, stage, dtype):
# this test measures that we get identical weights and similar loss with:
# 1. per_device_train_batch_size=8, gradient_accumulation_steps=1
# 2. per_device_train_batch_size=4, gradient_accumulation_steps=2
# since the 2nd should produce the effective batch of 1st, with the same results
#
# I can get an identical loss for a small train_len=32, plus the power of the initial
# dynamic loss scale value set to:
# "fp16.initial_scale_power": 1
# plus having the same WarmupLR's warmup_min_lr == warmup_max_lr in the config file
# but for some reason going to train_len=64 the weights, weights start to mismatch with this setup.
# the culprit seems to be `initial_scale_power` - putting it back to its default 32 keeps the weights identical
train_len = 64
a = b = 0.0
kwargs = {
"a": a,
"b": b,
"local_rank": 0,
"train_len": train_len,
"deepspeed": self.get_config_dict(stage),
}
kwargs[dtype] = True
with mockenv_context(**self.dist_env_1_gpu):
no_grad_accum_trainer = get_regression_trainer(
**kwargs,
per_device_train_batch_size=16,
gradient_accumulation_steps=1,
)
no_grad_accum_result = no_grad_accum_trainer.train()
no_grad_accum_loss = no_grad_accum_result.training_loss
no_grad_accum_a = no_grad_accum_trainer.model.a.item()
no_grad_accum_b = no_grad_accum_trainer.model.b.item()
# make sure the optimizer kicked in - if it hasn't changed from the original value of a then make train_len bigger
self.assertNotEqual(no_grad_accum_a, a)
with mockenv_context(**self.dist_env_1_gpu):
yes_grad_accum_trainer = get_regression_trainer(
**kwargs,
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
)
yes_grad_accum_result = yes_grad_accum_trainer.train()
yes_grad_accum_loss = yes_grad_accum_result.training_loss
yes_grad_accum_a = yes_grad_accum_trainer.model.a.item()
yes_grad_accum_b = yes_grad_accum_trainer.model.b.item()
self.assertNotEqual(yes_grad_accum_a, a)
# training with half the batch size but accumulation steps as 2 should give the same
# weights, but sometimes get a slight difference still of 1e-6
self.assertAlmostEqual(no_grad_accum_a, yes_grad_accum_a, places=5)
self.assertAlmostEqual(no_grad_accum_b, yes_grad_accum_b, places=5)
# see the note above how to get identical loss on a small bs
self.assertAlmostEqual(no_grad_accum_loss, yes_grad_accum_loss, places=2)
def check_saved_checkpoints_deepspeed(self, output_dir, freq, total, stage, dtype):
# adapted from TrainerIntegrationCommon.check_saved_checkpoints
file_list = [SAFE_WEIGHTS_NAME, "training_args.bin", "trainer_state.json", "config.json"]
if stage == ZERO2:
ds_file_list = ["mp_rank_00_model_states.pt"]
elif stage == ZERO3:
ds_file_list = ["zero_pp_rank_0_mp_rank_00_model_states.pt"]
else:
raise ValueError(f"unknown stage {stage}")
if dtype == "bf16":
ds_file_list.append("bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt")
for step in range(freq, total, freq):
checkpoint = os.path.join(output_dir, f"checkpoint-{step}")
self.assertTrue(os.path.isdir(checkpoint), f"[{stage}] {checkpoint} dir is not found")
# common files
for filename in file_list:
path = os.path.join(checkpoint, filename)
self.assertTrue(os.path.isfile(path), f"[{stage}] {path} is not found")
# ds files
ds_path = os.path.join(checkpoint, f"global_step{step}")
for filename in ds_file_list:
# filename = os.path.join(path, filename)
# print(filename)
path = os.path.join(ds_path, filename)
self.assertTrue(os.path.isfile(path), f"[{stage}] {path} is not found")
@parameterized.expand(params, name_func=parameterized_custom_name_func)
def test_save_checkpoints(self, stage, dtype):
# adapted from TrainerIntegrationTest.test_save_checkpoints
freq = 5
output_dir = self.get_auto_remove_tmp_dir()
ds_config_dict = self.get_config_dict(stage)
if dtype == FP16:
ds_config_dict["fp16"]["initial_scale_power"] = 1 # force optimizer on the first step
# XXX:
if stage == ZERO3:
ds_config_dict["zero_optimization"]["stage3_gather_16bit_weights_on_model_save"] = True
# save checkpoints
with mockenv_context(**self.dist_env_1_gpu):
kwargs = {
"output_dir": output_dir,
"save_steps": freq,
"deepspeed": ds_config_dict,
}
kwargs[dtype] = True
trainer = get_regression_trainer(**kwargs)
trainer.train()
total = int(self.n_epochs * 64 / self.batch_size)
self.check_saved_checkpoints_deepspeed(output_dir, freq, total, stage, dtype)
@parameterized.expand(params, name_func=parameterized_custom_name_func)
def test_can_resume_training_errors(self, stage, dtype):
with mockenv_context(**self.dist_env_1_gpu):
ds_config_dict = self.get_config_dict(stage)
output_dir = self.get_auto_remove_tmp_dir()
kwargs = {"output_dir": output_dir, "deepspeed": ds_config_dict}
kwargs[dtype] = True
trainer = get_regression_trainer(**kwargs)
# 1. fail to find any checkpoint - due a fresh output_dir
with self.assertRaises(Exception) as context:
trainer.train(resume_from_checkpoint=True)
self.assertTrue(
"No valid checkpoint found in output directory" in str(context.exception),
f"got exception: {context.exception}",
)
# 2. fail to find a bogus checkpoint
with self.assertRaises(Exception) as context:
checkpoint = os.path.join(output_dir, "checkpoint-5")
trainer.train(resume_from_checkpoint=f"{checkpoint}-bogus")
self.assertTrue(
"Can't find a valid checkpoint at" in str(context.exception), f"got exception: {context.exception}"
)
@parameterized.expand(params_with_optims_and_schedulers, name_func=parameterized_custom_name_func)
def test_can_resume_training_normal(self, stage, dtype, optim, scheduler):
# adapted from TrainerIntegrationTest.test_can_resume_training
# test normal resume for each stage separately, error-handling is tested in a different test
# ToDo: Currently, hf_optim + hf_scheduler resumes with the correct states and
# also has same losses for few steps but then slowly diverges. Need to figure it out.
if optim == HF_OPTIM and scheduler == HF_SCHEDULER:
return
output_dir = self.get_auto_remove_tmp_dir("./xxx", after=False)
ds_config_dict = self.get_config_dict(stage)
if dtype == FP16:
ds_config_dict["fp16"]["initial_scale_power"] = 1 # force optimizer on the first step
# XXX:
if stage == ZERO3:
ds_config_dict["zero_optimization"]["stage3_gather_16bit_weights_on_model_save"] = True
if optim == HF_OPTIM:
del ds_config_dict["optimizer"]
if scheduler == HF_SCHEDULER:
del ds_config_dict["scheduler"]
kwargs = {
"output_dir": output_dir,
"train_len": 128,
"save_steps": 5,
"learning_rate": 0.1,
"deepspeed": ds_config_dict,
}
kwargs[dtype] = True
with mockenv_context(**self.dist_env_1_gpu):
trainer = get_regression_trainer(**kwargs)
trainer.train()
(a, b) = trainer.model.a.item(), trainer.model.b.item()
state = dataclasses.asdict(trainer.state)
checkpoint = os.path.join(output_dir, "checkpoint-5")
# Reinitialize trainer
trainer = get_regression_trainer(**kwargs)
trainer.train(resume_from_checkpoint=checkpoint)
(a1, b1) = trainer.model.a.item(), trainer.model.b.item()
state1 = dataclasses.asdict(trainer.state)
self.assertEqual(a, a1)
self.assertEqual(b, b1)
self.check_trainer_state_are_the_same(state, state1)
# Now check with a later checkpoint that it also works when we span over one epoch
checkpoint = os.path.join(output_dir, "checkpoint-15")
# Reinitialize trainer and load model
trainer = get_regression_trainer(**kwargs)
trainer.train(resume_from_checkpoint=checkpoint)
(a1, b1) = trainer.model.a.item(), trainer.model.b.item()
state1 = dataclasses.asdict(trainer.state)
self.assertEqual(a, a1)
self.assertEqual(b, b1)
self.check_trainer_state_are_the_same(state, state1)
# Finally, should be able to resume with the same trainer/same deepspeed engine instance
# XXX: but currently this not possible due DS bug: https://github.com/microsoft/DeepSpeed/issues/1612
# trainer.train(resume_from_checkpoint=checkpoint)
# a workaround needs to be used that re-creates the deepspeed engine
@parameterized.expand(params, name_func=parameterized_custom_name_func)
def test_load_state_dict_from_zero_checkpoint(self, stage, dtype):
# test that we can load fp32 weights directly from the zero checkpoint into the current model
output_dir = self.get_auto_remove_tmp_dir() # "./xxx", after=False, before=False)
ds_config_dict = self.get_config_dict(stage)
kwargs = {
"output_dir": output_dir,
"train_len": 4,
"per_device_train_batch_size": 4,
"num_train_epochs": 1,
"save_strategy": "steps",
"save_steps": 1,
"learning_rate": 0.1,
"deepspeed": ds_config_dict,
}
kwargs[dtype] = True
with mockenv_context(**self.dist_env_1_gpu):
trainer = get_regression_trainer(**kwargs)
trainer.train()
(a, b) = trainer.model.a.item(), trainer.model.b.item()
state = dataclasses.asdict(trainer.state)
checkpoint_dir = get_last_checkpoint(output_dir)
model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
(a1, b1) = model.a.item(), model.b.item()
state1 = dataclasses.asdict(trainer.state)
self.assertEqual(a, a1)
self.assertEqual(b, b1)
self.check_trainer_state_are_the_same(state, state1)
def test_config_object(self):
# test that we can switch from zero2 to zero3 in the same process for example
# test is_zero, etc.
output_dir = self.get_auto_remove_tmp_dir()
kwargs = {"output_dir": output_dir, "train_len": 8, "fp16": True}
ds_config_zero3_dict = self.get_config_dict(ZERO3)
ds_config_zero2_dict = self.get_config_dict(ZERO2)
with mockenv_context(**self.dist_env_1_gpu):
trainer = get_regression_trainer(deepspeed=ds_config_zero3_dict, **kwargs)
self.assertTrue(is_deepspeed_zero3_enabled())
# test we can repeat that and with train this time
trainer = get_regression_trainer(deepspeed=ds_config_zero3_dict, **kwargs)
trainer.train()
self.assertTrue(is_deepspeed_zero3_enabled())
# test zero3 is disabled
trainer = get_regression_trainer(deepspeed=ds_config_zero2_dict, **kwargs)
self.assertFalse(is_deepspeed_zero3_enabled())
# check config obj
config = deepspeed_config()
self.assertTrue(bool(config), "Deepspeed config should be accessible")
# with accelerate integration below line is additionally required for this test to pass
trainer.accelerator.state._reset_state()
del trainer
# now weakref should gc the global and we shouldn't get anything here
config = deepspeed_config()
self.assertFalse(is_deepspeed_zero3_enabled())
self.assertFalse(bool(config), "Deepspeed config should not be accessible")
@parameterized.expand(params, name_func=parameterized_custom_name_func)
def test_load_best_model(self, stage, dtype):
# Test that forced deepspeed reinit doesn't break the model. the forced re-init after
# loading the best model in Trainer is there to workaround this bug in Deepspeed
# https://github.com/microsoft/DeepSpeed/issues/1612
#
# The test is derived from a repro script submitted in this Issue:
# https://github.com/huggingface/transformers/issues/17114
#
# One additional feature of this test is that we use a non-AdamW optimizer to test that
# deepspeed doesn't fallback to AdamW, which would prevent the optimizer states from loading
# correctly
from transformers import T5ForConditionalGeneration, T5Tokenizer, Trainer # noqa
output_dir = self.get_auto_remove_tmp_dir() # "./xxx", after=False, before=False)
ds_config_dict = self.get_config_dict(stage)
del ds_config_dict["optimizer"] # will use HF Trainer optimizer
del ds_config_dict["scheduler"] # will use HF Trainer scheduler
ds_config_dict["zero_force_ds_cpu_optimizer"] = False # offload is not efficient w/o CPUAdam
# must use this setting to get the reload path exercised
ds_config_dict["zero_optimization"]["stage3_gather_16bit_weights_on_model_save"] = True
with mockenv_context(**self.dist_env_1_gpu):
args_dict = {
"per_device_train_batch_size": 1,
"per_device_eval_batch_size": 1,
"gradient_accumulation_steps": 1,
"learning_rate": 1e-4,
"num_train_epochs": 1,
"do_train": True,
"do_eval": True,
"optim": "adafactor",
"evaluation_strategy": "steps",
"eval_steps": 1,
"save_strategy": "steps",
"save_steps": 1,
"load_best_model_at_end": True,
"max_steps": 1,
"deepspeed": ds_config_dict,
"report_to": "none",
}
training_args = TrainingArguments(output_dir, **args_dict)
tokenizer = T5Tokenizer.from_pretrained(T5_TINY)
model = T5ForConditionalGeneration.from_pretrained(T5_TINY)
def _add_eos_to_examples(example):
example["input_text"] = f"question: {example['question']} context: {example['context']}"
example["target_text"] = example["answers"]["text"][0] if len(example["answers"]["text"]) > 0 else ""
return example
def _convert_to_features(example_batch):
input_encodings = tokenizer.batch_encode_plus(
example_batch["input_text"], pad_to_max_length=True, max_length=512, truncation=True
)
target_encodings = tokenizer.batch_encode_plus(
example_batch["target_text"], pad_to_max_length=True, max_length=16, truncation=True
)
encodings = {
"input_ids": input_encodings["input_ids"],
"attention_mask": input_encodings["attention_mask"],
"labels": target_encodings["input_ids"],
}
return encodings
def get_dataset():
data_file = str(self.tests_dir / "fixtures/tests_samples/SQUAD/sample.json")
data_files = {"train": data_file, "validation": data_file}
raw_datasets = datasets.load_dataset("json", data_files=data_files, field="data")
train_dataset = raw_datasets["train"].map(_add_eos_to_examples).map(_convert_to_features, batched=True)
valid_dataset = deepcopy(train_dataset)
return train_dataset, valid_dataset
train_dataset, eval_dataset = get_dataset()
trainer = Trainer(
model=model,
tokenizer=tokenizer,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
)
trainer.train() # crash 1 was here
trainer.evaluate() # crash 2 was here
@slow
@require_deepspeed
@require_torch_accelerator
class TestDeepSpeedWithLauncher(TestCasePlus):
"""This class is for testing via an external script - can do multiple gpus"""
# Tests to devise #
#
# 1. predict_with_generate on multigpu - need to figure out how to give input sequences so that
# the 2 gpus will generate prediction sequences that aren't of the same length - this is because
# we had to code a special feature to sync the gpus when the predicted sequences aren't of the
# same length. In general this will tested as a side-effect through a variety of other tests -
# it'll simply hang trying to synchronize with other gpus if this problem is encountered. So as
# long as we have a few full tests running on zero3 + predict_with_generate this should be
# mostly covered.
#
# but there are 5 variations on beam search in `generate`- with identical code branched with `if
# synced_gpus`
#
# 2. most tests should probably be run on both: zero2 and zero3 configs
#
@parameterized.expand(params, name_func=parameterized_custom_name_func)
@require_torch_multi_accelerator
def test_basic_distributed(self, stage, dtype):
self.run_and_check(stage=stage, dtype=dtype, distributed=True)
def test_do_eval_no_train(self):
# testing only zero3 since zero2 makes no sense with inference
self.run_and_check(
stage=ZERO3,
dtype=FP16,
eval_steps=1,
distributed=False,
do_train=False,
do_eval=True,
)
@parameterized.expand(params, name_func=parameterized_custom_name_func)
def test_fp32_non_distributed(self, stage, dtype):
# real model needs too much GPU memory under stage2+fp32, so using tiny random model here -
# therefore no quality checks, just basic completion checks are done
self.run_and_check(
stage=stage,
dtype=dtype,
model_name=T5_TINY,
distributed=False,
do_train=True,
do_eval=True,
quality_checks=False,
fp32=True,
)
@parameterized.expand(params, name_func=parameterized_custom_name_func)
@require_torch_multi_accelerator
def test_fp32_distributed(self, stage, dtype):
# real model needs too much GPU memory under stage2+fp32, so using tiny random model here -
# therefore no quality checks, just basic completion checks are done
self.run_and_check(
stage=stage,
dtype=dtype,
model_name=T5_TINY,
distributed=True,
do_train=True,
do_eval=True,
quality_checks=False,
fp32=True,
)
@parameterized.expand(params, name_func=parameterized_custom_name_func)
def test_resume_train_not_from_ds_checkpoint(self, stage, dtype):
# do normal training and then resume not from the deepspeed checkpoint but explicitly from
# the saved model dir
do_train = True
do_eval = False
kwargs = {
"stage": stage,
"dtype": dtype,
"eval_steps": 1,
"distributed": True,
"do_train": do_train,
"do_eval": do_eval,
}
# 1. normal training
output_dir = self.run_and_check(**kwargs)
# 2. now resume explicitly from the saved weights, by passing --model_name_or_path output_dir
# - i.e. the same path the model was saved to in step 1
output_dir = self.run_trainer(**kwargs, model_name=output_dir)
self.do_checks(output_dir, do_train=do_train, do_eval=do_eval)
@parameterized.expand(["bf16", "fp16", "fp32"])
@require_torch_multi_accelerator
def test_inference(self, dtype):
if dtype == "bf16" and not is_torch_bf16_available_on_device(torch_device):
self.skipTest("test requires bfloat16 hardware support")
# this is just inference, so no optimizer should be loaded
# it only works for z3 (makes no sense with z1-z2)
fp32 = True if dtype == "fp32" else False
self.run_and_check(
stage=ZERO3,
dtype=FP16,
model_name=T5_TINY,
distributed=True,
do_train=False,
do_eval=True,
quality_checks=False,
fp32=fp32,
)
def do_checks(self, output_dir, do_train=True, do_eval=True, quality_checks=True):
if do_train:
train_metrics = load_json(os.path.join(output_dir, "train_results.json"))
self.assertIn("train_samples_per_second", train_metrics)
if quality_checks:
self.assertGreater(train_metrics["train_samples_per_second"], 0.5)
if do_eval:
eval_metrics = load_json(os.path.join(output_dir, "eval_results.json"))
self.assertIn("eval_bleu", eval_metrics)
if quality_checks:
self.assertGreater(eval_metrics["eval_bleu"], 1)
# XXX: need to do better validation beyond just that the run was successful
def run_and_check(
self,
stage,
dtype,
model_name: str = T5_SMALL,
eval_steps: int = 10,
distributed: bool = True,
do_train: bool = True,
do_eval: bool = True,
quality_checks: bool = True,
fp32: bool = False,
extra_args_str: str = None,
remove_args_str: str = None,
):
# we are doing quality testing so using a small real model
output_dir = self.run_trainer(
stage=stage,
dtype=dtype,
model_name=model_name,
eval_steps=eval_steps,
num_train_epochs=1,
do_train=do_train,
do_eval=do_eval,
distributed=distributed,
fp32=fp32,
extra_args_str=extra_args_str,
remove_args_str=remove_args_str,
)
self.do_checks(output_dir, do_train=do_train, do_eval=do_eval, quality_checks=quality_checks)
return output_dir
def run_trainer(
self,
stage: str,
dtype: str,
model_name: str,
eval_steps: int = 10,
num_train_epochs: int = 1,
do_train: bool = False,
do_eval: bool = True,
distributed: bool = True,
fp32: bool = False,
extra_args_str: str = None,
remove_args_str: str = None,
):
max_len = 32
data_dir = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro"
output_dir = self.get_auto_remove_tmp_dir()
args = f"""
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--output_dir {output_dir}
--overwrite_output_dir
--max_source_length {max_len}
--max_target_length {max_len}
--val_max_target_length {max_len}
--warmup_steps 8
--predict_with_generate
--save_steps 0
--eval_steps {eval_steps}
--group_by_length
--label_smoothing_factor 0.1
--source_lang en
--target_lang ro
--report_to none
""".split()
args.extend(["--source_prefix", '"translate English to Romanian: "'])
if not fp32:
args.extend([f"--{dtype}"])
actions = 0
if do_train:
actions += 1
args.extend(
f"""
--do_train
--num_train_epochs {str(num_train_epochs)}
--max_train_samples 16
--per_device_train_batch_size 2
--learning_rate 3e-3
""".split()
)
if do_eval:
actions += 1
args.extend(
"""
--do_eval
--max_eval_samples 16
--per_device_eval_batch_size 2
""".split()
)
assert actions > 0, "need at least do_train or do_eval for the test to run"
if extra_args_str is not None:
args.extend(extra_args_str.split())
# currently only works for bool args
if remove_args_str is not None:
remove_args = remove_args_str.split()
args = [x for x in args if x not in remove_args]
ds_args = f"--deepspeed {self.test_file_dir_str}/ds_config_{stage}.json".split()
script = [f"{self.examples_dir_str}/pytorch/translation/run_translation.py"]
launcher = get_launcher(distributed)
cmd = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(cmd, env=self.get_env())
return output_dir
@parameterized.expand(params, name_func=parameterized_custom_name_func)
def test_clm(self, stage, dtype):
# this test exercises model.resize_token_embeddings() which requires param gathering outside
# of forward - it's not used by `run_translation.py`, but it is in `run_clm.py`
data_dir = self.tests_dir / "fixtures"
output_dir = self.get_auto_remove_tmp_dir()
args = f"""
--model_name_or_path {GPT2_TINY}
--train_file {data_dir}/sample_text.txt
--validation_file {data_dir}/sample_text.txt
--output_dir {output_dir}
--overwrite_output_dir
--do_train
--do_eval
--max_train_samples 16
--max_eval_samples 16
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--num_train_epochs 1
--warmup_steps 8
--block_size 64
--report_to none
""".split()
args.extend([f"--{dtype}"])
ds_args = f"--deepspeed {self.test_file_dir_str}/ds_config_{stage}.json".split()
script = [f"{self.examples_dir_str}/pytorch/language-modeling/run_clm.py"]
launcher = get_launcher(distributed=True)
cmd = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(cmd, env=self.get_env())
def test_clm_from_config_zero3_fp16(self):
# this test exercises AutoModel.from_config(config) - to ensure zero.Init is called
data_dir = self.tests_dir / "fixtures"
output_dir = self.get_auto_remove_tmp_dir()
args = f"""
--model_type gpt2
--tokenizer_name {GPT2_TINY}
--train_file {data_dir}/sample_text.txt
--validation_file {data_dir}/sample_text.txt
--output_dir {output_dir}
--overwrite_output_dir
--do_train
--max_train_samples 4
--per_device_train_batch_size 2
--num_train_epochs 1
--warmup_steps 8
--block_size 8
--fp16
--report_to none
""".split()
ds_args = f"--deepspeed {self.test_file_dir_str}/ds_config_zero3.json".split()
script = [f"{self.examples_dir_str}/pytorch/language-modeling/run_clm.py"]
launcher = get_launcher(distributed=True)
cmd = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
with CaptureStderr() as cs:
execute_subprocess_async(cmd, env=self.get_env())
self.assertIn("Detected DeepSpeed ZeRO-3", cs.err)
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/deepspeed/vit_feature_extractor.json | {
"feature_extractor_type": "ViTFeatureExtractor",
"size": 30
}
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/deepspeed/test_model_zoo.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import os
import subprocess
from os.path import dirname
from parameterized import parameterized
from tests.trainer.test_trainer import TrainerIntegrationCommon # noqa
from transformers import is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_tests_dir,
require_deepspeed,
require_torch_gpu,
slow,
)
from transformers.trainer_utils import set_seed
if is_torch_available():
from tests.trainer.test_trainer import ( # noqa
RegressionModelConfig,
RegressionPreTrainedModel,
get_regression_trainer,
)
set_seed(42)
FIXTURE_DIRECTORY = get_tests_dir("fixtures")
ROOT_DIRECTORY = os.path.join(dirname(get_tests_dir()))
DS_TESTS_DIRECTORY = dirname(os.path.abspath(__file__))
# default torch.distributed port
DEFAULT_MASTER_PORT = "10999"
T5_SMALL = "t5-small"
# *** Working Models ***
ALBERT_TINY = "hf-internal-testing/tiny-albert"
BART_TINY = "sshleifer/bart-tiny-random"
BERT_TINY = "hf-internal-testing/tiny-bert"
BIGBIRD_PEGASUS_TINY = "hf-internal-testing/tiny-random-bigbird_pegasus"
BIG_BIRD_TINY = "hf-internal-testing/tiny-random-big_bird"
BLENDERBOT_TINY = "hf-internal-testing/tiny-random-blenderbot"
BLOOM_TINY = "bigscience/bigscience-small-testing"
DEBERTA_TINY = "hf-internal-testing/tiny-random-deberta"
DEBERTA_V2_TINY = "hf-internal-testing/tiny-random-deberta-v2"
DISTILBERT_TINY = "sshleifer/tiny-distilbert-base-cased"
ELECTRA_TINY = "hf-internal-testing/tiny-electra"
FLAUBERT_TINY = "hf-internal-testing/tiny-random-flaubert"
FSMT_TINY = "stas/tiny-wmt19-en-de"
FUNNEL_TINY = "hf-internal-testing/tiny-random-funnel"
GPT2_TINY = "sshleifer/tiny-gpt2"
GPTJ_TINY = "hf-internal-testing/tiny-random-gptj"
GPT_NEO_TINY = "hf-internal-testing/tiny-random-gpt_neo"
LAYOUTLM_TINY = "hf-internal-testing/tiny-layoutlm"
LED_TINY = "hf-internal-testing/tiny-random-led"
LONGFORMER_TINY = "hf-internal-testing/tiny-random-longformer"
M2M_100_TINY = "stas/tiny-m2m_100" # hf tiny model is unsuitable
MARIAN_TINY = "sshleifer/tiny-marian-en-de"
MBART_TINY = "sshleifer/tiny-mbart"
MOBILEBERT_TINY = "hf-internal-testing/tiny-random-mobilebert"
MPNET_TINY = "hf-internal-testing/tiny-random-mpnet"
PEGASUS_TINY = "stas/pegasus-cnn_dailymail-tiny-random"
PROPHETNET_TINY = "hf-internal-testing/tiny-random-prophetnet"
ROBERTA_TINY = "sshleifer/tiny-distilroberta-base"
SQUEEZEBERT_TINY = "hf-internal-testing/tiny-random-squeezebert"
T5_TINY = "patrickvonplaten/t5-tiny-random"
T5_V1_TINY = "hf-internal-testing/tiny-random-t5-v1.1"
VIT_TINY = "hf-internal-testing/tiny-random-vit"
XLM_ROBERTA_TINY = "hf-internal-testing/tiny-xlm-roberta"
XLNET_TINY = "sshleifer/tiny-xlnet-base-cased"
# *** To Fix ***
# *** tiny model issues ***
# missing model files:
MT5_TINY = "hf-internal-testing/tiny-random-mt5"
CAMEMBERT_TINY = "hf-internal-testing/tiny-random-camembert"
OPENAI_GPT_TINY = "hf-internal-testing/tiny-random-openai-gpt"
# missing tokenizer files
CONVBERT_TINY = "hf-internal-testing/tiny-random-convbert"
LAYOUTLMV2_TINY = "hf-internal-testing/tiny-random-layoutlmv2"
HUBERT_TINY = "hf-internal-testing/tiny-random-hubert"
# issues with tokenizer
CTRL_TINY = "hf-internal-testing/tiny-random-ctrl"
TRANSFO_XL_TINY = "hf-internal-testing/tiny-random-transfo-xl" # same as ctrl
# other issues with tiny models
IBERT_TINY = "hf-internal-testing/tiny-random-ibert" # multiple issues with either mlm/qa/clas
REFORMER_TINY = "hf-internal-testing/tiny-random-reformer" # multiple issues with either mlm/qa/clas
# *** Lacking official examples to test with ***
# or not working with examples
DPR_TINY = "hf-internal-testing/tiny-random-dpr"
# - "dpr" examples/research_projects/rag-end2end-retriever/
RAG_TINY = "hf-internal-testing/tiny-random-rag"
# - "rag" research_projects
LUKE_TINY = ""
# - "luke" Entities classes - no plan to make such example
LXMERT_TINY = "hf-internal-testing/tiny-random-lxmert"
# - "lxmert" doesn't work with run_qa.py
CLIP_TINY = "hf-internal-testing/tiny-random-clip"
# - "clip" nothing under pytorch examples - XXX: Suraj is working on adding some - check by end of Sep
SPEECH_TO_TEXT_TINY = "hf-internal-testing/tiny-random-speech_to_text"
# - "speech_to_text", nothing under pytorch examples
# *** Reactive mode ***
# models with low usage, unstable API, things about to change - do nothing about the following until someone runs into a problem
TAPAS_TINY = "hf-internal-testing/tiny-random-tapas"
# additional notes on tapas
# 1. "Table must be of type pd.DataFrame" failure
# TODO: new models to add:
#
def get_launcher(distributed=False):
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
num_gpus = min(2, get_gpu_count()) if distributed else 1
master_port = os.environ.get("DS_TEST_PORT", DEFAULT_MASTER_PORT)
return f"deepspeed --num_nodes 1 --num_gpus {num_gpus} --master_port {master_port}".split()
def make_task_cmds():
data_dir_samples = f"{FIXTURE_DIRECTORY}/tests_samples"
data_dir_wmt = f"{data_dir_samples}/wmt_en_ro"
data_dir_xsum = f"{data_dir_samples}/xsum"
args_main = """
--do_train
--max_train_samples 4
--per_device_train_batch_size 2
--num_train_epochs 1
--fp16
--report_to none
--overwrite_output_dir
""".split()
# try to cover as many models as possible once (it's enough to run on one task per model)
# but need a tiny model for each
#
# should have "{model_type.upper()}_TINY" corresponding vars defined, e.g., T5_TINY, etc.
tasks2models = {
"trans": [
"bart",
"fsmt",
"m2m_100",
"marian",
"mbart",
"t5",
"t5_v1",
# "mt5", missing model files
],
"sum": [
"pegasus",
],
"clm": [
"big_bird",
"bigbird_pegasus",
"blenderbot",
"bloom",
"gpt2",
"gpt_neo",
"gptj",
"xlm-roberta",
"prophetnet",
# "camembert", missing model files
],
"mlm": [
"albert",
"deberta",
"deberta-v2",
"distilbert",
"electra",
"flaubert",
"funnel",
"layoutlm",
# "reformer", # multiple issues with either mlm/qa/clas
],
"qa": [
"led",
"longformer",
"mobilebert",
"mpnet",
"roberta",
"squeezebert",
# "convbert", # missing tokenizer files
# "layoutlmv2", missing model files
],
"clas": [
"bert",
"xlnet",
# "hubert", # missing tokenizer files
# "ibert", # multiple issues with either mlm/qa/clas
# "transfo-xl", # tokenizer issues as ctrl
# "ctrl", # tokenizer issues
# "openai-gpt", missing model files
# "tapas", multiple issues
],
"img_clas": [
"vit",
],
}
scripts_dir = f"{ROOT_DIRECTORY}/examples/pytorch"
tasks = {
"trans": f"""
{scripts_dir}/translation/run_translation.py
--train_file {data_dir_wmt}/train.json
--source_lang en
--target_lang ro
""",
"sum": f"""
{scripts_dir}/summarization/run_summarization.py
--train_file {data_dir_xsum}/sample.json
--max_source_length 12
--max_target_length 12
--lang en
""",
"clm": f"""
{scripts_dir}/language-modeling/run_clm.py
--train_file {FIXTURE_DIRECTORY}/sample_text.txt
--block_size 8
""",
"mlm": f"""
{scripts_dir}/language-modeling/run_mlm.py
--train_file {FIXTURE_DIRECTORY}/sample_text.txt
""",
"qa": f"""
{scripts_dir}/question-answering/run_qa.py
--train_file {data_dir_samples}/SQUAD/sample.json
""",
"clas": f"""
{scripts_dir}/text-classification/run_glue.py
--train_file {data_dir_samples}/MRPC/train.csv
--max_seq_length 12
--task_name MRPC
""",
"img_clas": f"""
{scripts_dir}/image-classification/run_image_classification.py
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--remove_unused_columns False
--max_steps 10
--image_processor_name {DS_TESTS_DIRECTORY}/vit_feature_extractor.json
""",
}
launcher = get_launcher(distributed=True)
cmds = {}
for task, args in tasks.items():
args = args.split()
for model in tasks2models[task]:
model_name = globals()[f"{model.upper().replace('-', '_')}_TINY"]
args_model = f"--model_name_or_path {model_name}".split()
cmds[f"{task}_{model}"] = launcher + args + args_model + args_main
# # generation special case
# if task == "gen":
# launcher = f"deepspeed --num_nodes 1 --num_gpus 1".split()
# args_model += f"--model_type {model}".split()
# cmds[f"{task}_{model}"] = launcher + args + args_model
# else:
return cmds
task_cmds = make_task_cmds()
ZERO2 = "zero2"
ZERO3 = "zero3"
stages = [ZERO2, ZERO3]
# future preparation:
# for now test just fp16, as these tests are quite slow
# FP16 = "fp16"
# BF16 = "bf16"
#
# dtypes = [FP16]
# so just hardcoding --fp16 for now
# if is_torch_bf16_gpu_available():
# dtypes += [BF16]
def parameterized_custom_name_func(func, param_num, param):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
param_based_name = parameterized.to_safe_name("_".join(str(x) for x in param.args))
return f"{func.__name__}_{param_based_name}"
# Cartesian-product of zero stages with models to test
params = list(itertools.product(stages, task_cmds.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class TestDeepSpeedModelZoo(TestCasePlus):
"""This class is for testing via an external script - can do multiple gpus"""
def get_task_cmd(self, task, stage):
# return a ready to run train cmd
if task not in task_cmds:
raise ValueError(f"don't know of task {task}, have {task_cmds.keys()}")
cmd = task_cmds[task]
args_ds = f"--deepspeed {self.test_file_dir_str}/ds_config_{stage}.json".split()
output_dir = self.get_auto_remove_tmp_dir()
args_out = f"--output_dir {output_dir}".split()
cmd += args_ds + args_out
return cmd, output_dir
@parameterized.expand(params, name_func=parameterized_custom_name_func)
def test_zero_to_fp32(self, stage, task):
# testing the ability to do a run followed by recovery of full fp32 weights
cmd, output_dir = self.get_task_cmd(task, stage)
# 1. generate the checkpoint
cmd += "--save_steps 1".split()
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] + cmd)); die
execute_subprocess_async(cmd, env=self.get_env())
# 2. test that the fp32 weights get reconsolidated
chkpt_dir = f"{output_dir}/checkpoint-1"
recovered_model_path = f"{chkpt_dir}/out.bin"
cmd = f"{chkpt_dir}/zero_to_fp32.py {chkpt_dir} {recovered_model_path}"
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
subprocess.check_call(cmd, shell=True)
assert os.path.exists(recovered_model_path), f"{recovered_model_path} was not found"
# possibly could also test that the resulting saved model is usable but given that we use
# random models we won't know if it's any good
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/tools/test_image_captioning.py | # coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from pathlib import Path
from transformers import is_vision_available, load_tool
from transformers.testing_utils import get_tests_dir
from .test_tools_common import ToolTesterMixin
if is_vision_available():
from PIL import Image
class ImageCaptioningToolTester(unittest.TestCase, ToolTesterMixin):
def setUp(self):
self.tool = load_tool("image-captioning")
self.tool.setup()
self.remote_tool = load_tool("image-captioning", remote=True)
def test_exact_match_arg(self):
image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png")
result = self.tool(image)
self.assertEqual(result, "two cats sleeping on a couch")
def test_exact_match_arg_remote(self):
image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png")
result = self.remote_tool(image)
self.assertEqual(result, "two cats sleeping on a couch")
def test_exact_match_kwarg(self):
image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png")
result = self.tool(image=image)
self.assertEqual(result, "two cats sleeping on a couch")
def test_exact_match_kwarg_remote(self):
image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png")
result = self.remote_tool(image=image)
self.assertEqual(result, "two cats sleeping on a couch")
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/tools/test_text_to_speech.py | # coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class TextToSpeechToolTester(unittest.TestCase, ToolTesterMixin):
def setUp(self):
self.tool = load_tool("text-to-speech")
self.tool.setup()
def test_exact_match_arg(self):
# SpeechT5 isn't deterministic
torch.manual_seed(0)
result = self.tool("hey")
resulting_tensor = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3],
torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485]),
)
)
def test_exact_match_kwarg(self):
# SpeechT5 isn't deterministic
torch.manual_seed(0)
result = self.tool("hey")
resulting_tensor = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3],
torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485]),
)
)
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/tools/test_python_interpreter.py | # coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
# Fake function we will use as tool
def add_two(x):
return x + 2
class PythonInterpreterTester(unittest.TestCase):
def test_evaluate_assign(self):
code = "x = 3"
state = {}
result = evaluate(code, {}, state=state)
assert result == 3
self.assertDictEqual(state, {"x": 3})
code = "x = y"
state = {"y": 5}
result = evaluate(code, {}, state=state)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(state, {"x": 5, "y": 5})
def test_evaluate_call(self):
code = "y = add_two(x)"
state = {"x": 3}
result = evaluate(code, {"add_two": add_two}, state=state)
assert result == 5
self.assertDictEqual(state, {"x": 3, "y": 5})
# Won't work without the tool
with CaptureStdout() as out:
result = evaluate(code, {}, state=state)
assert result is None
assert "tried to execute add_two" in out.out
def test_evaluate_constant(self):
code = "x = 3"
state = {}
result = evaluate(code, {}, state=state)
assert result == 3
self.assertDictEqual(state, {"x": 3})
def test_evaluate_dict(self):
code = "test_dict = {'x': x, 'y': add_two(x)}"
state = {"x": 3}
result = evaluate(code, {"add_two": add_two}, state=state)
self.assertDictEqual(result, {"x": 3, "y": 5})
self.assertDictEqual(state, {"x": 3, "test_dict": {"x": 3, "y": 5}})
def test_evaluate_expression(self):
code = "x = 3\ny = 5"
state = {}
result = evaluate(code, {}, state=state)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(state, {"x": 3, "y": 5})
def test_evaluate_f_string(self):
code = "text = f'This is x: {x}.'"
state = {"x": 3}
result = evaluate(code, {}, state=state)
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(state, {"x": 3, "text": "This is x: 3."})
def test_evaluate_if(self):
code = "if x <= 3:\n y = 2\nelse:\n y = 5"
state = {"x": 3}
result = evaluate(code, {}, state=state)
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(state, {"x": 3, "y": 2})
state = {"x": 8}
result = evaluate(code, {}, state=state)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(state, {"x": 8, "y": 5})
def test_evaluate_list(self):
code = "test_list = [x, add_two(x)]"
state = {"x": 3}
result = evaluate(code, {"add_two": add_two}, state=state)
self.assertListEqual(result, [3, 5])
self.assertDictEqual(state, {"x": 3, "test_list": [3, 5]})
def test_evaluate_name(self):
code = "y = x"
state = {"x": 3}
result = evaluate(code, {}, state=state)
assert result == 3
self.assertDictEqual(state, {"x": 3, "y": 3})
def test_evaluate_subscript(self):
code = "test_list = [x, add_two(x)]\ntest_list[1]"
state = {"x": 3}
result = evaluate(code, {"add_two": add_two}, state=state)
assert result == 5
self.assertDictEqual(state, {"x": 3, "test_list": [3, 5]})
code = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
state = {"x": 3}
result = evaluate(code, {"add_two": add_two}, state=state)
assert result == 5
self.assertDictEqual(state, {"x": 3, "test_dict": {"x": 3, "y": 5}})
def test_evaluate_for(self):
code = "x = 0\nfor i in range(3):\n x = i"
state = {}
result = evaluate(code, {"range": range}, state=state)
assert result == 2
self.assertDictEqual(state, {"x": 2, "i": 2})
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/tools/test_agent_types.py | # coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def get_new_path(suffix="") -> str:
directory = tempfile.mkdtemp()
return os.path.join(directory, str(uuid.uuid4()) + suffix)
@require_soundfile
@require_torch
class AgentAudioTests(unittest.TestCase):
def test_from_tensor(self):
tensor = torch.rand(12, dtype=torch.float64) - 0.5
agent_type = AgentAudio(tensor)
path = str(agent_type.to_string())
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(tensor, agent_type.to_raw(), atol=1e-4))
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(path))
# Ensure that the file contains the same value as the original tensor
new_tensor, _ = sf.read(path)
self.assertTrue(torch.allclose(tensor, torch.tensor(new_tensor), atol=1e-4))
def test_from_string(self):
tensor = torch.rand(12, dtype=torch.float64) - 0.5
path = get_new_path(suffix=".wav")
sf.write(path, tensor, 16000)
agent_type = AgentAudio(path)
self.assertTrue(torch.allclose(tensor, agent_type.to_raw(), atol=1e-4))
self.assertEqual(agent_type.to_string(), path)
@require_vision
@require_torch
class AgentImageTests(unittest.TestCase):
def test_from_tensor(self):
tensor = torch.randint(0, 256, (64, 64, 3))
agent_type = AgentImage(tensor)
path = str(agent_type.to_string())
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(tensor, agent_type._tensor, atol=1e-4))
self.assertIsInstance(agent_type.to_raw(), Image.Image)
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(path))
def test_from_string(self):
path = Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png"
image = Image.open(path)
agent_type = AgentImage(path)
self.assertTrue(path.samefile(agent_type.to_string()))
self.assertTrue(image == agent_type.to_raw())
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(path))
def test_from_image(self):
path = Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png"
image = Image.open(path)
agent_type = AgentImage(image)
self.assertFalse(path.samefile(agent_type.to_string()))
self.assertTrue(image == agent_type.to_raw())
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(path))
class AgentTextTests(unittest.TestCase):
def test_from_string(self):
string = "Hey!"
agent_type = AgentText(string)
self.assertEqual(string, agent_type.to_string())
self.assertEqual(string, agent_type.to_raw())
self.assertEqual(string, agent_type)
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/tools/test_speech_to_text.py | # coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_torch_available, load_tool
from .test_tools_common import ToolTesterMixin
if is_torch_available():
import torch
class SpeechToTextToolTester(unittest.TestCase, ToolTesterMixin):
def setUp(self):
self.tool = load_tool("speech-to-text")
self.tool.setup()
def test_exact_match_arg(self):
result = self.tool(torch.ones(3000))
self.assertEqual(result, " you")
def test_exact_match_kwarg(self):
result = self.tool(audio=torch.ones(3000))
self.assertEqual(result, " you")
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/tools/test_text_classification.py | # coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class TextClassificationToolTester(unittest.TestCase, ToolTesterMixin):
def setUp(self):
self.tool = load_tool("text-classification")
self.tool.setup()
self.remote_tool = load_tool("text-classification", remote=True)
def test_exact_match_arg(self):
result = self.tool("That's quite cool", ["positive", "negative"])
self.assertEqual(result, "positive")
def test_exact_match_arg_remote(self):
result = self.remote_tool("That's quite cool", ["positive", "negative"])
self.assertEqual(result, "positive")
def test_exact_match_kwarg(self):
result = self.tool(text="That's quite cool", labels=["positive", "negative"])
self.assertEqual(result, "positive")
def test_exact_match_kwarg_remote(self):
result = self.remote_tool(text="That's quite cool", labels=["positive", "negative"])
self.assertEqual(result, "positive")
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/tools/test_image_segmentation.py | # coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from pathlib import Path
from transformers import is_vision_available, load_tool
from transformers.testing_utils import get_tests_dir
from .test_tools_common import ToolTesterMixin
if is_vision_available():
from PIL import Image
class ImageSegmentationToolTester(unittest.TestCase, ToolTesterMixin):
def setUp(self):
self.tool = load_tool("image-segmentation")
self.tool.setup()
self.remote_tool = load_tool("image-segmentation", remote=True)
def test_exact_match_arg(self):
image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png")
result = self.tool(image, "cat")
self.assertTrue(isinstance(result, Image.Image))
def test_exact_match_arg_remote(self):
image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png")
result = self.remote_tool(image, "cat")
self.assertTrue(isinstance(result, Image.Image))
def test_exact_match_kwarg(self):
image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png")
result = self.tool(image=image, label="cat")
self.assertTrue(isinstance(result, Image.Image))
def test_exact_match_kwarg_remote(self):
image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png")
result = self.remote_tool(image=image, label="cat")
self.assertTrue(isinstance(result, Image.Image))
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/tools/test_tools_common.py | # coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
authorized_types = ["text", "image", "audio"]
def create_inputs(input_types: List[str]):
inputs = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input")
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png").resize((512, 512))
)
elif input_type == "audio":
inputs.append(torch.ones(3000))
elif isinstance(input_type, list):
inputs.append(create_inputs(input_type))
else:
raise ValueError(f"Invalid type requested: {input_type}")
return inputs
def output_types(outputs: List):
output_types = []
for output in outputs:
if isinstance(output, (str, AgentText)):
output_types.append("text")
elif isinstance(output, (Image.Image, AgentImage)):
output_types.append("image")
elif isinstance(output, (torch.Tensor, AgentAudio)):
output_types.append("audio")
else:
raise ValueError(f"Invalid output: {output}")
return output_types
@is_tool_test
class ToolTesterMixin:
def test_inputs_outputs(self):
self.assertTrue(hasattr(self.tool, "inputs"))
self.assertTrue(hasattr(self.tool, "outputs"))
inputs = self.tool.inputs
for _input in inputs:
if isinstance(_input, list):
for __input in _input:
self.assertTrue(__input in authorized_types)
else:
self.assertTrue(_input in authorized_types)
outputs = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types)
def test_call(self):
inputs = create_inputs(self.tool.inputs)
outputs = self.tool(*inputs)
# There is a single output
if len(self.tool.outputs) == 1:
outputs = [outputs]
self.assertListEqual(output_types(outputs), self.tool.outputs)
def test_common_attributes(self):
self.assertTrue(hasattr(self.tool, "description"))
self.assertTrue(hasattr(self.tool, "default_checkpoint"))
self.assertTrue(self.tool.description.startswith("This is a tool that"))
def test_agent_types_outputs(self):
inputs = create_inputs(self.tool.inputs)
outputs = self.tool(*inputs)
if not isinstance(outputs, list):
outputs = [outputs]
self.assertEqual(len(outputs), len(self.tool.outputs))
for output, output_type in zip(outputs, self.tool.outputs):
agent_type = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(output, agent_type))
def test_agent_types_inputs(self):
inputs = create_inputs(self.tool.inputs)
_inputs = []
for _input, input_type in zip(inputs, self.tool.inputs):
if isinstance(input_type, list):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input) for _input_type in input_type])
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input))
# Should not raise an error
outputs = self.tool(*inputs)
if not isinstance(outputs, list):
outputs = [outputs]
self.assertEqual(len(outputs), len(self.tool.outputs))
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/tools/test_image_question_answering.py | # coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from pathlib import Path
from transformers import is_vision_available, load_tool
from transformers.testing_utils import get_tests_dir
from .test_tools_common import ToolTesterMixin
if is_vision_available():
from PIL import Image
class ImageQuestionAnsweringToolTester(unittest.TestCase, ToolTesterMixin):
def setUp(self):
self.tool = load_tool("image-question-answering")
self.tool.setup()
self.remote_tool = load_tool("image-question-answering", remote=True)
def test_exact_match_arg(self):
image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png")
result = self.tool(image, "How many cats are sleeping on the couch?")
self.assertEqual(result, "2")
def test_exact_match_arg_remote(self):
image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png")
result = self.remote_tool(image, "How many cats are sleeping on the couch?")
self.assertEqual(result, "2")
def test_exact_match_kwarg(self):
image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png")
result = self.tool(image=image, question="How many cats are sleeping on the couch?")
self.assertEqual(result, "2")
def test_exact_match_kwarg_remote(self):
image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png")
result = self.remote_tool(image=image, question="How many cats are sleeping on the couch?")
self.assertEqual(result, "2")
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/tools/test_text_question_answering.py | # coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
TEXT = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class TextQuestionAnsweringToolTester(unittest.TestCase, ToolTesterMixin):
def setUp(self):
self.tool = load_tool("text-question-answering")
self.tool.setup()
self.remote_tool = load_tool("text-question-answering", remote=True)
def test_exact_match_arg(self):
result = self.tool(TEXT, "What did Hugging Face do in April 2021?")
self.assertEqual(result, "launched the BigScience Research Workshop")
def test_exact_match_arg_remote(self):
result = self.remote_tool(TEXT, "What did Hugging Face do in April 2021?")
self.assertEqual(result, "launched the BigScience Research Workshop")
def test_exact_match_kwarg(self):
result = self.tool(text=TEXT, question="What did Hugging Face do in April 2021?")
self.assertEqual(result, "launched the BigScience Research Workshop")
def test_exact_match_kwarg_remote(self):
result = self.remote_tool(text=TEXT, question="What did Hugging Face do in April 2021?")
self.assertEqual(result, "launched the BigScience Research Workshop")
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/tools/test_translation.py | # coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import load_tool
from transformers.tools.agent_types import AGENT_TYPE_MAPPING
from .test_tools_common import ToolTesterMixin, output_types
class TranslationToolTester(unittest.TestCase, ToolTesterMixin):
def setUp(self):
self.tool = load_tool("translation")
self.tool.setup()
self.remote_tool = load_tool("translation", remote=True)
def test_exact_match_arg(self):
result = self.tool("Hey, what's up?", src_lang="English", tgt_lang="French")
self.assertEqual(result, "- Hé, comment ça va?")
def test_exact_match_arg_remote(self):
result = self.remote_tool("Hey, what's up?", src_lang="English", tgt_lang="French")
self.assertEqual(result, "- Hé, comment ça va?")
def test_exact_match_kwarg(self):
result = self.tool(text="Hey, what's up?", src_lang="English", tgt_lang="French")
self.assertEqual(result, "- Hé, comment ça va?")
def test_exact_match_kwarg_remote(self):
result = self.remote_tool(text="Hey, what's up?", src_lang="English", tgt_lang="French")
self.assertEqual(result, "- Hé, comment ça va?")
def test_call(self):
inputs = ["Hey, what's up?", "English", "Spanish"]
outputs = self.tool(*inputs)
# There is a single output
if len(self.tool.outputs) == 1:
outputs = [outputs]
self.assertListEqual(output_types(outputs), self.tool.outputs)
def test_agent_types_outputs(self):
inputs = ["Hey, what's up?", "English", "Spanish"]
outputs = self.tool(*inputs)
if not isinstance(outputs, list):
outputs = [outputs]
self.assertEqual(len(outputs), len(self.tool.outputs))
for output, output_type in zip(outputs, self.tool.outputs):
agent_type = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(output, agent_type))
def test_agent_types_inputs(self):
inputs = ["Hey, what's up?", "English", "Spanish"]
_inputs = []
for _input, input_type in zip(inputs, self.tool.inputs):
if isinstance(input_type, list):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input) for _input_type in input_type])
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input))
# Should not raise an error
outputs = self.tool(*inputs)
if not isinstance(outputs, list):
outputs = [outputs]
self.assertEqual(len(outputs), len(self.tool.outputs))
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/tools/test_text_summarization.py | # coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
TEXT = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class TextSummarizationToolTester(unittest.TestCase, ToolTesterMixin):
def setUp(self):
self.tool = load_tool("summarization")
self.tool.setup()
self.remote_tool = load_tool("summarization", remote=True)
def test_exact_match_arg(self):
result = self.tool(TEXT)
self.assertEqual(
result,
"Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf. In March 2021, Hugging Face raised $40 million in a Series B funding round. On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model. In 2022, the workshop concluded with the announcement of BLOOM.",
)
def test_exact_match_arg_remote(self):
result = self.remote_tool(TEXT)
self.assertEqual(
result,
"Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf. In March 2021, Hugging Face raised $40 million in a Series B funding round. On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model. In 2022, the workshop concluded with the announcement of BLOOM.",
)
def test_exact_match_kwarg(self):
result = self.tool(text=TEXT)
self.assertEqual(
result,
"Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf. In March 2021, Hugging Face raised $40 million in a Series B funding round. On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model. In 2022, the workshop concluded with the announcement of BLOOM.",
)
def test_exact_match_kwarg_remote(self):
result = self.remote_tool(text=TEXT)
self.assertEqual(
result,
"Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf. In March 2021, Hugging Face raised $40 million in a Series B funding round. On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model. In 2022, the workshop concluded with the announcement of BLOOM.",
)
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/tools/test_document_question_answering.py | # coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from datasets import load_dataset
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class DocumentQuestionAnsweringToolTester(unittest.TestCase, ToolTesterMixin):
def setUp(self):
self.tool = load_tool("document-question-answering")
self.tool.setup()
self.remote_tool = load_tool("document-question-answering", remote=True)
def test_exact_match_arg(self):
dataset = load_dataset("hf-internal-testing/example-documents", split="test")
document = dataset[0]["image"]
result = self.tool(document, "When is the coffee break?")
self.assertEqual(result, "11-14 to 11:39 a.m.")
def test_exact_match_arg_remote(self):
dataset = load_dataset("hf-internal-testing/example-documents", split="test")
document = dataset[0]["image"]
result = self.remote_tool(document, "When is the coffee break?")
self.assertEqual(result, "11-14 to 11:39 a.m.")
def test_exact_match_kwarg(self):
dataset = load_dataset("hf-internal-testing/example-documents", split="test")
document = dataset[0]["image"]
self.tool(document=document, question="When is the coffee break?")
def test_exact_match_kwarg_remote(self):
dataset = load_dataset("hf-internal-testing/example-documents", split="test")
document = dataset[0]["image"]
result = self.remote_tool(document=document, question="When is the coffee break?")
self.assertEqual(result, "11-14 to 11:39 a.m.")
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/peft_integration/test_peft_integration.py | # coding=utf-8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, OPTForCausalLM
from transformers.testing_utils import require_peft, require_torch, require_torch_gpu, slow, torch_device
from transformers.utils import is_torch_available
if is_torch_available():
import torch
@require_peft
@require_torch
class PeftTesterMixin:
peft_test_model_ids = ("peft-internal-testing/tiny-OPTForCausalLM-lora",)
transformers_test_model_ids = ("hf-internal-testing/tiny-random-OPTForCausalLM",)
transformers_test_model_classes = (AutoModelForCausalLM, OPTForCausalLM)
# TODO: run it with CI after PEFT release.
@slow
class PeftIntegrationTester(unittest.TestCase, PeftTesterMixin):
"""
A testing suite that makes sure that the PeftModel class is correctly integrated into the transformers library.
"""
def _check_lora_correctly_converted(self, model):
"""
Utility method to check if the model has correctly adapters injected on it.
"""
from peft.tuners.tuners_utils import BaseTunerLayer
is_peft_loaded = False
for _, m in model.named_modules():
if isinstance(m, BaseTunerLayer):
is_peft_loaded = True
break
return is_peft_loaded
def test_peft_from_pretrained(self):
"""
Simple test that tests the basic usage of PEFT model through `from_pretrained`.
This checks if we pass a remote folder that contains an adapter config and adapter weights, it
should correctly load a model that has adapters injected on it.
"""
for model_id in self.peft_test_model_ids:
for transformers_class in self.transformers_test_model_classes:
peft_model = transformers_class.from_pretrained(model_id).to(torch_device)
self.assertTrue(self._check_lora_correctly_converted(peft_model))
self.assertTrue(peft_model._hf_peft_config_loaded)
# dummy generation
_ = peft_model.generate(input_ids=torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device))
def test_peft_state_dict(self):
"""
Simple test that checks if the returned state dict of `get_adapter_state_dict()` method contains
the expected keys.
"""
for model_id in self.peft_test_model_ids:
for transformers_class in self.transformers_test_model_classes:
peft_model = transformers_class.from_pretrained(model_id).to(torch_device)
state_dict = peft_model.get_adapter_state_dict()
for key in state_dict.keys():
self.assertTrue("lora" in key)
def test_peft_save_pretrained(self):
"""
Test that checks various combinations of `save_pretrained` with a model that has adapters loaded
on it. This checks if the saved model contains the expected files (adapter weights and adapter config).
"""
for model_id in self.peft_test_model_ids:
for transformers_class in self.transformers_test_model_classes:
peft_model = transformers_class.from_pretrained(model_id).to(torch_device)
with tempfile.TemporaryDirectory() as tmpdirname:
peft_model.save_pretrained(tmpdirname)
self.assertTrue("adapter_model.safetensors" in os.listdir(tmpdirname))
self.assertTrue("adapter_config.json" in os.listdir(tmpdirname))
self.assertTrue("config.json" not in os.listdir(tmpdirname))
self.assertTrue("pytorch_model.bin" not in os.listdir(tmpdirname))
self.assertTrue("model.safetensors" not in os.listdir(tmpdirname))
peft_model = transformers_class.from_pretrained(tmpdirname).to(torch_device)
self.assertTrue(self._check_lora_correctly_converted(peft_model))
peft_model.save_pretrained(tmpdirname, safe_serialization=False)
self.assertTrue("adapter_model.bin" in os.listdir(tmpdirname))
self.assertTrue("adapter_config.json" in os.listdir(tmpdirname))
peft_model = transformers_class.from_pretrained(tmpdirname).to(torch_device)
self.assertTrue(self._check_lora_correctly_converted(peft_model))
def test_peft_enable_disable_adapters(self):
"""
A test that checks if `enable_adapters` and `disable_adapters` methods work as expected.
"""
from peft import LoraConfig
dummy_input = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device)
for model_id in self.transformers_test_model_ids:
for transformers_class in self.transformers_test_model_classes:
peft_model = transformers_class.from_pretrained(model_id).to(torch_device)
peft_config = LoraConfig(init_lora_weights=False)
peft_model.add_adapter(peft_config)
peft_logits = peft_model(dummy_input).logits
peft_model.disable_adapters()
peft_logits_disabled = peft_model(dummy_input).logits
peft_model.enable_adapters()
peft_logits_enabled = peft_model(dummy_input).logits
self.assertTrue(torch.allclose(peft_logits, peft_logits_enabled, atol=1e-12, rtol=1e-12))
self.assertFalse(torch.allclose(peft_logits_enabled, peft_logits_disabled, atol=1e-12, rtol=1e-12))
def test_peft_add_adapter(self):
"""
Simple test that tests if `add_adapter` works as expected
"""
from peft import LoraConfig
for model_id in self.transformers_test_model_ids:
for transformers_class in self.transformers_test_model_classes:
model = transformers_class.from_pretrained(model_id).to(torch_device)
peft_config = LoraConfig(init_lora_weights=False)
model.add_adapter(peft_config)
self.assertTrue(self._check_lora_correctly_converted(model))
# dummy generation
_ = model.generate(input_ids=torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device))
def test_peft_add_adapter_from_pretrained(self):
"""
Simple test that tests if `add_adapter` works as expected
"""
from peft import LoraConfig
for model_id in self.transformers_test_model_ids:
for transformers_class in self.transformers_test_model_classes:
model = transformers_class.from_pretrained(model_id).to(torch_device)
peft_config = LoraConfig(init_lora_weights=False)
model.add_adapter(peft_config)
self.assertTrue(self._check_lora_correctly_converted(model))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model_from_pretrained = transformers_class.from_pretrained(tmpdirname).to(torch_device)
self.assertTrue(self._check_lora_correctly_converted(model_from_pretrained))
def test_peft_add_adapter_modules_to_save(self):
"""
Simple test that tests if `add_adapter` works as expected when training with
modules to save.
"""
from peft import LoraConfig
from peft.utils import ModulesToSaveWrapper
for model_id in self.transformers_test_model_ids:
for transformers_class in self.transformers_test_model_classes:
dummy_input = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device)
model = transformers_class.from_pretrained(model_id).to(torch_device)
peft_config = LoraConfig(init_lora_weights=False, modules_to_save=["lm_head"])
model.add_adapter(peft_config)
self._check_lora_correctly_converted(model)
_has_modules_to_save_wrapper = False
for name, module in model.named_modules():
if isinstance(module, ModulesToSaveWrapper):
_has_modules_to_save_wrapper = True
self.assertTrue(module.modules_to_save.default.weight.requires_grad)
self.assertTrue("lm_head" in name)
break
self.assertTrue(_has_modules_to_save_wrapper)
state_dict = model.get_adapter_state_dict()
self.assertTrue("lm_head.weight" in state_dict.keys())
logits = model(dummy_input).logits
loss = logits.mean()
loss.backward()
for _, param in model.named_parameters():
if param.requires_grad:
self.assertTrue(param.grad is not None)
def test_peft_add_adapter_training_gradient_checkpointing(self):
"""
Simple test that tests if `add_adapter` works as expected when training with
gradient checkpointing.
"""
from peft import LoraConfig
for model_id in self.transformers_test_model_ids:
for transformers_class in self.transformers_test_model_classes:
model = transformers_class.from_pretrained(model_id).to(torch_device)
peft_config = LoraConfig(init_lora_weights=False)
model.add_adapter(peft_config)
self.assertTrue(self._check_lora_correctly_converted(model))
# When attaching adapters the input embeddings will stay frozen, this will
# lead to the output embedding having requires_grad=False.
dummy_input = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device)
frozen_output = model.get_input_embeddings()(dummy_input)
self.assertTrue(frozen_output.requires_grad is False)
model.gradient_checkpointing_enable()
# Since here we attached the hook, the input should have requires_grad to set
# properly
non_frozen_output = model.get_input_embeddings()(dummy_input)
self.assertTrue(non_frozen_output.requires_grad is True)
# To repro the Trainer issue
dummy_input.requires_grad = False
for name, param in model.named_parameters():
if "lora" in name.lower():
self.assertTrue(param.requires_grad)
logits = model(dummy_input).logits
loss = logits.mean()
loss.backward()
for name, param in model.named_parameters():
if param.requires_grad:
self.assertTrue("lora" in name.lower())
self.assertTrue(param.grad is not None)
def test_peft_add_multi_adapter(self):
"""
Simple test that tests the basic usage of PEFT model through `from_pretrained`. This test tests if
add_adapter works as expected in multi-adapter setting.
"""
from peft import LoraConfig
from peft.tuners.tuners_utils import BaseTunerLayer
dummy_input = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device)
for model_id in self.transformers_test_model_ids:
for transformers_class in self.transformers_test_model_classes:
is_peft_loaded = False
model = transformers_class.from_pretrained(model_id).to(torch_device)
logits_original_model = model(dummy_input).logits
peft_config = LoraConfig(init_lora_weights=False)
model.add_adapter(peft_config)
logits_adapter_1 = model(dummy_input)
model.add_adapter(peft_config, adapter_name="adapter-2")
logits_adapter_2 = model(dummy_input)
for _, m in model.named_modules():
if isinstance(m, BaseTunerLayer):
is_peft_loaded = True
break
self.assertTrue(is_peft_loaded)
# dummy generation
_ = model.generate(input_ids=dummy_input)
model.set_adapter("default")
self.assertTrue(model.active_adapters() == ["default"])
self.assertTrue(model.active_adapter() == "default")
model.set_adapter("adapter-2")
self.assertTrue(model.active_adapters() == ["adapter-2"])
self.assertTrue(model.active_adapter() == "adapter-2")
# Logits comparison
self.assertFalse(
torch.allclose(logits_adapter_1.logits, logits_adapter_2.logits, atol=1e-6, rtol=1e-6)
)
self.assertFalse(torch.allclose(logits_original_model, logits_adapter_2.logits, atol=1e-6, rtol=1e-6))
model.set_adapter(["adapter-2", "default"])
self.assertTrue(model.active_adapters() == ["adapter-2", "default"])
self.assertTrue(model.active_adapter() == "adapter-2")
logits_adapter_mixed = model(dummy_input)
self.assertFalse(
torch.allclose(logits_adapter_1.logits, logits_adapter_mixed.logits, atol=1e-6, rtol=1e-6)
)
self.assertFalse(
torch.allclose(logits_adapter_2.logits, logits_adapter_mixed.logits, atol=1e-6, rtol=1e-6)
)
# multi active adapter saving not supported
with self.assertRaises(ValueError), tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
@require_torch_gpu
def test_peft_from_pretrained_kwargs(self):
"""
Simple test that tests the basic usage of PEFT model through `from_pretrained` + additional kwargs
and see if the integraiton behaves as expected.
"""
for model_id in self.peft_test_model_ids:
for transformers_class in self.transformers_test_model_classes:
peft_model = transformers_class.from_pretrained(model_id, load_in_8bit=True, device_map="auto")
module = peft_model.model.decoder.layers[0].self_attn.v_proj
self.assertTrue(module.__class__.__name__ == "Linear8bitLt")
self.assertTrue(peft_model.hf_device_map is not None)
# dummy generation
_ = peft_model.generate(input_ids=torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device))
@require_torch_gpu
def test_peft_save_quantized(self):
"""
Simple test that tests the basic usage of PEFT model save_pretrained with quantized base models
"""
# 4bit
for model_id in self.peft_test_model_ids:
for transformers_class in self.transformers_test_model_classes:
peft_model = transformers_class.from_pretrained(model_id, load_in_4bit=True, device_map="auto")
module = peft_model.model.decoder.layers[0].self_attn.v_proj
self.assertTrue(module.__class__.__name__ == "Linear4bit")
self.assertTrue(peft_model.hf_device_map is not None)
with tempfile.TemporaryDirectory() as tmpdirname:
peft_model.save_pretrained(tmpdirname)
self.assertTrue("adapter_model.safetensors" in os.listdir(tmpdirname))
self.assertTrue("adapter_config.json" in os.listdir(tmpdirname))
self.assertTrue("pytorch_model.bin" not in os.listdir(tmpdirname))
self.assertTrue("model.safetensors" not in os.listdir(tmpdirname))
# 8-bit
for model_id in self.peft_test_model_ids:
for transformers_class in self.transformers_test_model_classes:
peft_model = transformers_class.from_pretrained(model_id, load_in_8bit=True, device_map="auto")
module = peft_model.model.decoder.layers[0].self_attn.v_proj
self.assertTrue(module.__class__.__name__ == "Linear8bitLt")
self.assertTrue(peft_model.hf_device_map is not None)
with tempfile.TemporaryDirectory() as tmpdirname:
peft_model.save_pretrained(tmpdirname)
self.assertTrue("adapter_model.safetensors" in os.listdir(tmpdirname))
self.assertTrue("adapter_config.json" in os.listdir(tmpdirname))
self.assertTrue("pytorch_model.bin" not in os.listdir(tmpdirname))
self.assertTrue("model.safetensors" not in os.listdir(tmpdirname))
@require_torch_gpu
def test_peft_save_quantized_regression(self):
"""
Simple test that tests the basic usage of PEFT model save_pretrained with quantized base models
Regression test to make sure everything works as expected before the safetensors integration.
"""
# 4bit
for model_id in self.peft_test_model_ids:
for transformers_class in self.transformers_test_model_classes:
peft_model = transformers_class.from_pretrained(model_id, load_in_4bit=True, device_map="auto")
module = peft_model.model.decoder.layers[0].self_attn.v_proj
self.assertTrue(module.__class__.__name__ == "Linear4bit")
self.assertTrue(peft_model.hf_device_map is not None)
with tempfile.TemporaryDirectory() as tmpdirname:
peft_model.save_pretrained(tmpdirname, safe_serialization=False)
self.assertTrue("adapter_model.bin" in os.listdir(tmpdirname))
self.assertTrue("adapter_config.json" in os.listdir(tmpdirname))
self.assertTrue("pytorch_model.bin" not in os.listdir(tmpdirname))
self.assertTrue("model.safetensors" not in os.listdir(tmpdirname))
# 8-bit
for model_id in self.peft_test_model_ids:
for transformers_class in self.transformers_test_model_classes:
peft_model = transformers_class.from_pretrained(model_id, load_in_8bit=True, device_map="auto")
module = peft_model.model.decoder.layers[0].self_attn.v_proj
self.assertTrue(module.__class__.__name__ == "Linear8bitLt")
self.assertTrue(peft_model.hf_device_map is not None)
with tempfile.TemporaryDirectory() as tmpdirname:
peft_model.save_pretrained(tmpdirname, safe_serialization=False)
self.assertTrue("adapter_model.bin" in os.listdir(tmpdirname))
self.assertTrue("adapter_config.json" in os.listdir(tmpdirname))
self.assertTrue("pytorch_model.bin" not in os.listdir(tmpdirname))
self.assertTrue("model.safetensors" not in os.listdir(tmpdirname))
def test_peft_pipeline(self):
"""
Simple test that tests the basic usage of PEFT model + pipeline
"""
from transformers import pipeline
for model_id in self.peft_test_model_ids:
pipe = pipeline("text-generation", model_id)
_ = pipe("Hello")
def test_peft_add_adapter_with_state_dict(self):
"""
Simple test that tests the basic usage of PEFT model through `from_pretrained`. This test tests if
add_adapter works as expected with a state_dict being passed.
"""
from peft import LoraConfig
dummy_input = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device)
for model_id, peft_model_id in zip(self.transformers_test_model_ids, self.peft_test_model_ids):
for transformers_class in self.transformers_test_model_classes:
model = transformers_class.from_pretrained(model_id).to(torch_device)
peft_config = LoraConfig(init_lora_weights=False)
with self.assertRaises(ValueError):
model.load_adapter(peft_model_id=None)
state_dict_path = hf_hub_download(peft_model_id, "adapter_model.bin")
dummy_state_dict = torch.load(state_dict_path)
model.load_adapter(adapter_state_dict=dummy_state_dict, peft_config=peft_config)
with self.assertRaises(ValueError):
model.load_adapter(model.load_adapter(adapter_state_dict=dummy_state_dict, peft_config=None))
self.assertTrue(self._check_lora_correctly_converted(model))
# dummy generation
_ = model.generate(input_ids=dummy_input)
def test_peft_from_pretrained_hub_kwargs(self):
"""
Tests different combinations of PEFT model + from_pretrained + hub kwargs
"""
peft_model_id = "peft-internal-testing/tiny-opt-lora-revision"
# This should not work
with self.assertRaises(OSError):
_ = AutoModelForCausalLM.from_pretrained(peft_model_id)
adapter_kwargs = {"revision": "test"}
# This should work
model = AutoModelForCausalLM.from_pretrained(peft_model_id, adapter_kwargs=adapter_kwargs)
self.assertTrue(self._check_lora_correctly_converted(model))
model = OPTForCausalLM.from_pretrained(peft_model_id, adapter_kwargs=adapter_kwargs)
self.assertTrue(self._check_lora_correctly_converted(model))
adapter_kwargs = {"revision": "main", "subfolder": "test_subfolder"}
model = AutoModelForCausalLM.from_pretrained(peft_model_id, adapter_kwargs=adapter_kwargs)
self.assertTrue(self._check_lora_correctly_converted(model))
model = OPTForCausalLM.from_pretrained(peft_model_id, adapter_kwargs=adapter_kwargs)
self.assertTrue(self._check_lora_correctly_converted(model))
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/optimization/test_optimization_tf.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class OptimizationFTest(unittest.TestCase):
def assertListAlmostEqual(self, list1, list2, tol):
self.assertEqual(len(list1), len(list2))
for a, b in zip(list1, list2):
self.assertAlmostEqual(a, b, delta=tol)
def testGradientAccumulator(self):
accumulator = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0])])
accumulator([tf.constant([-2.0, 1.0])])
accumulator([tf.constant([-1.0, 2.0])])
with self.assertRaises(ValueError):
accumulator([tf.constant([1.0, 1.0]), tf.constant([2.0, 2.0])])
self.assertEqual(accumulator.step, 3)
self.assertEqual(len(accumulator.gradients), 1)
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist(), [-2.0, 5.0], tol=1e-2)
accumulator.reset()
self.assertEqual(accumulator.step, 0)
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist(), [0.0, 0.0], tol=1e-2)
def testGradientAccumulatorDistributionStrategy(self):
context._context = None
ops.enable_eager_execution_internal()
physical_devices = tf.config.list_physical_devices("CPU")
if len(physical_devices) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0], [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()]
)
devices = tf.config.list_logical_devices(device_type="CPU")
strategy = tf.distribute.MirroredStrategy(devices=devices[:2])
with strategy.scope():
accumulator = GradientAccumulator()
variable = tf.Variable([4.0, 3.0])
optimizer, _ = create_optimizer(5e-5, 10, 5)
gradient_placeholder = tf.Variable([0.0, 0.0], trainable=False)
def accumulate_on_replica(gradient):
accumulator([gradient])
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients, [variable])))
@tf.function
def accumulate(grad1, grad2):
with strategy.scope():
local_variables = strategy.experimental_local_results(gradient_placeholder)
local_variables[0].assign(grad1)
local_variables[1].assign(grad2)
strategy.run(accumulate_on_replica, args=(gradient_placeholder,))
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(apply_on_replica)
def _check_local_values(grad1, grad2):
values = strategy.experimental_local_results(accumulator._gradients[0])
self.assertListAlmostEqual(values[0].value(), grad1, tol=1e-2)
self.assertListAlmostEqual(values[1].value(), grad2, tol=1e-2)
accumulate([1.0, 2.0], [-1.0, 1.0])
accumulate([3.0, -1.0], [-1.0, -1.0])
accumulate([-2.0, 2.0], [3.0, -2.0])
self.assertEqual(accumulator.step, 3)
_check_local_values([2.0, 3.0], [1.0, -2.0])
apply_grad()
self.assertListAlmostEqual(variable.value(), [4.0, 3.0], tol=1e-2)
accumulator.reset()
self.assertEqual(accumulator.step, 0)
_check_local_values([0.0, 0.0], [0.0, 0.0])
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/optimization/test_optimization.py | # coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def unwrap_schedule(scheduler, num_steps=10):
lrs = []
for _ in range(num_steps):
lrs.append(scheduler.get_lr()[0])
scheduler.step()
return lrs
def unwrap_and_save_reload_schedule(scheduler, num_steps=10):
lrs = []
for step in range(num_steps):
lrs.append(scheduler.get_lr()[0])
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
file_name = os.path.join(tmpdirname, "schedule.bin")
torch.save(scheduler.state_dict(), file_name)
state_dict = torch.load(file_name)
scheduler.load_state_dict(state_dict)
return lrs
@require_torch
class OptimizationTest(unittest.TestCase):
def assertListAlmostEqual(self, list1, list2, tol):
self.assertEqual(len(list1), len(list2))
for a, b in zip(list1, list2):
self.assertAlmostEqual(a, b, delta=tol)
def test_adam_w(self):
w = torch.tensor([0.1, -0.2, -0.1], requires_grad=True)
target = torch.tensor([0.4, 0.2, -0.5])
criterion = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
optimizer = AdamW(params=[w], lr=2e-1, weight_decay=0.0)
for _ in range(100):
loss = criterion(w, target)
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist(), [0.4, 0.2, -0.5], tol=1e-2)
def test_adafactor(self):
w = torch.tensor([0.1, -0.2, -0.1], requires_grad=True)
target = torch.tensor([0.4, 0.2, -0.5])
criterion = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
optimizer = Adafactor(
params=[w],
lr=1e-2,
eps=(1e-30, 1e-3),
clip_threshold=1.0,
decay_rate=-0.8,
beta1=None,
weight_decay=0.0,
relative_step=False,
scale_parameter=False,
warmup_init=False,
)
for _ in range(1000):
loss = criterion(w, target)
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist(), [0.4, 0.2, -0.5], tol=1e-2)
@require_torch
class ScheduleInitTest(unittest.TestCase):
m = nn.Linear(50, 50) if is_torch_available() else None
optimizer = AdamW(m.parameters(), lr=10.0) if is_torch_available() else None
num_steps = 10
def assertListAlmostEqual(self, list1, list2, tol, msg=None):
self.assertEqual(len(list1), len(list2))
for a, b in zip(list1, list2):
self.assertAlmostEqual(a, b, delta=tol, msg=msg)
def test_schedulers(self):
common_kwargs = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
scheds = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
kwargs, expected_learning_rates = data
scheduler = scheduler_func(self.optimizer, **kwargs)
self.assertEqual(len([scheduler.get_lr()[0]]), 1)
lrs_1 = unwrap_schedule(scheduler, self.num_steps)
self.assertListAlmostEqual(
lrs_1,
expected_learning_rates,
tol=1e-2,
msg=f"failed for {scheduler_func} in normal scheduler",
)
scheduler = scheduler_func(self.optimizer, **kwargs)
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(scheduler) # wrap to test picklability of the schedule
lrs_2 = unwrap_and_save_reload_schedule(scheduler, self.num_steps)
self.assertListEqual(lrs_1, lrs_2, msg=f"failed for {scheduler_func} in save and reload")
class LambdaScheduleWrapper:
"""See https://github.com/huggingface/transformers/issues/21689"""
def __init__(self, fn):
self.fn = fn
def __call__(self, *args, **kwargs):
return self.fn(*args, **kwargs)
@classmethod
def wrap_scheduler(self, scheduler):
scheduler.lr_lambdas = list(map(self, scheduler.lr_lambdas))
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/pipelines/test_pipelines_object_detection.py | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class Image:
@staticmethod
def open(*args, **kwargs):
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class ObjectDetectionPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_OBJECT_DETECTION_MAPPING
def get_test_pipeline(self, model, tokenizer, processor):
object_detector = ObjectDetectionPipeline(model=model, image_processor=processor)
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def run_pipeline_test(self, object_detector, examples):
outputs = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png", threshold=0.0)
self.assertGreater(len(outputs), 0)
for detected_object in outputs:
self.assertEqual(
detected_object,
{
"score": ANY(float),
"label": ANY(str),
"box": {"xmin": ANY(int), "ymin": ANY(int), "xmax": ANY(int), "ymax": ANY(int)},
},
)
import datasets
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test")
batch = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
batch_outputs = object_detector(batch, threshold=0.0)
self.assertEqual(len(batch), len(batch_outputs))
for outputs in batch_outputs:
self.assertGreater(len(outputs), 0)
for detected_object in outputs:
self.assertEqual(
detected_object,
{
"score": ANY(float),
"label": ANY(str),
"box": {"xmin": ANY(int), "ymin": ANY(int), "xmax": ANY(int), "ymax": ANY(int)},
},
)
@require_tf
@unittest.skip("Object detection not implemented in TF")
def test_small_model_tf(self):
pass
@require_torch
def test_small_model_pt(self):
model_id = "hf-internal-testing/tiny-detr-mobilenetsv3"
model = AutoModelForObjectDetection.from_pretrained(model_id)
feature_extractor = AutoFeatureExtractor.from_pretrained(model_id)
object_detector = ObjectDetectionPipeline(model=model, feature_extractor=feature_extractor)
outputs = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg", threshold=0.0)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
)
outputs = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
],
threshold=0.0,
)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
],
)
@require_torch
@slow
def test_large_model_pt(self):
model_id = "facebook/detr-resnet-50"
model = AutoModelForObjectDetection.from_pretrained(model_id)
feature_extractor = AutoFeatureExtractor.from_pretrained(model_id)
object_detector = ObjectDetectionPipeline(model=model, feature_extractor=feature_extractor)
outputs = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg")
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
)
outputs = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
]
)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
],
)
@require_torch
@slow
def test_integration_torch_object_detection(self):
model_id = "facebook/detr-resnet-50"
object_detector = pipeline("object-detection", model=model_id)
outputs = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg")
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
)
outputs = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
]
)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
],
)
@require_torch
@slow
def test_threshold(self):
threshold = 0.9985
model_id = "facebook/detr-resnet-50"
object_detector = pipeline("object-detection", model=model_id)
outputs = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg", threshold=threshold)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
)
@require_torch
@require_pytesseract
@slow
def test_layoutlm(self):
model_id = "Narsil/layoutlmv3-finetuned-funsd"
threshold = 0.9993
object_detector = pipeline("object-detection", model=model_id, threshold=threshold)
outputs = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png"
)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
],
)
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/pipelines/test_pipelines_common.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import logging
import os
import sys
import tempfile
import unittest
from pathlib import Path
import datasets
import numpy as np
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DistilBertForSequenceClassification,
TextClassificationPipeline,
TFAutoModelForSequenceClassification,
pipeline,
)
from transformers.pipelines import PIPELINE_REGISTRY, get_task
from transformers.pipelines.base import Pipeline, _pad
from transformers.testing_utils import (
TOKEN,
USER,
CaptureLogger,
RequestCounter,
backend_empty_cache,
is_pipeline_test,
is_staging_test,
nested_simplify,
require_tensorflow_probability,
require_tf,
require_torch,
require_torch_accelerator,
require_torch_or_tf,
slow,
torch_device,
)
from transformers.utils import direct_transformers_import, is_tf_available, is_torch_available
from transformers.utils import logging as transformers_logging
sys.path.append(str(Path(__file__).parent.parent.parent / "utils"))
from test_module.custom_pipeline import PairClassificationPipeline # noqa E402
logger = logging.getLogger(__name__)
PATH_TO_TRANSFORMERS = os.path.join(Path(__file__).parent.parent.parent, "src/transformers")
# Dynamically import the Transformers module to grab the attribute classes of the processor form their names.
transformers_module = direct_transformers_import(PATH_TO_TRANSFORMERS)
class ANY:
def __init__(self, *_types):
self._types = _types
def __eq__(self, other):
return isinstance(other, self._types)
def __repr__(self):
return f"ANY({', '.join(_type.__name__ for _type in self._types)})"
@is_pipeline_test
class CommonPipelineTest(unittest.TestCase):
@require_torch
def test_pipeline_iteration(self):
from torch.utils.data import Dataset
class MyDataset(Dataset):
data = [
"This is a test",
"This restaurant is great",
"This restaurant is awful",
]
def __len__(self):
return 3
def __getitem__(self, i):
return self.data[i]
text_classifier = pipeline(
task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt"
)
dataset = MyDataset()
for output in text_classifier(dataset):
self.assertEqual(output, {"label": ANY(str), "score": ANY(float)})
@require_torch
def test_check_task_auto_inference(self):
pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert")
self.assertIsInstance(pipe, TextClassificationPipeline)
@require_torch
def test_pipeline_batch_size_global(self):
pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert")
self.assertEqual(pipe._batch_size, None)
self.assertEqual(pipe._num_workers, None)
pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert", batch_size=2, num_workers=1)
self.assertEqual(pipe._batch_size, 2)
self.assertEqual(pipe._num_workers, 1)
@require_torch
def test_pipeline_pathlike(self):
pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert")
with tempfile.TemporaryDirectory() as d:
pipe.save_pretrained(d)
path = Path(d)
newpipe = pipeline(task="text-classification", model=path)
self.assertIsInstance(newpipe, TextClassificationPipeline)
@require_torch
def test_pipeline_override(self):
class MyPipeline(TextClassificationPipeline):
pass
text_classifier = pipeline(model="hf-internal-testing/tiny-random-distilbert", pipeline_class=MyPipeline)
self.assertIsInstance(text_classifier, MyPipeline)
def test_check_task(self):
task = get_task("gpt2")
self.assertEqual(task, "text-generation")
with self.assertRaises(RuntimeError):
# Wrong framework
get_task("espnet/siddhana_slurp_entity_asr_train_asr_conformer_raw_en_word_valid.acc.ave_10best")
@require_torch
def test_iterator_data(self):
def data(n: int):
for _ in range(n):
yield "This is a test"
pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert")
results = []
for out in pipe(data(10)):
self.assertEqual(nested_simplify(out), {"label": "LABEL_0", "score": 0.504})
results.append(out)
self.assertEqual(len(results), 10)
# When using multiple workers on streamable data it should still work
# This will force using `num_workers=1` with a warning for now.
results = []
for out in pipe(data(10), num_workers=2):
self.assertEqual(nested_simplify(out), {"label": "LABEL_0", "score": 0.504})
results.append(out)
self.assertEqual(len(results), 10)
@require_tf
def test_iterator_data_tf(self):
def data(n: int):
for _ in range(n):
yield "This is a test"
pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert", framework="tf")
out = pipe("This is a test")
results = []
for out in pipe(data(10)):
self.assertEqual(nested_simplify(out), {"label": "LABEL_0", "score": 0.504})
results.append(out)
self.assertEqual(len(results), 10)
@require_torch
def test_unbatch_attentions_hidden_states(self):
model = DistilBertForSequenceClassification.from_pretrained(
"hf-internal-testing/tiny-random-distilbert", output_hidden_states=True, output_attentions=True
)
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-distilbert")
text_classifier = TextClassificationPipeline(model=model, tokenizer=tokenizer)
# Used to throw an error because `hidden_states` are a tuple of tensors
# instead of the expected tensor.
outputs = text_classifier(["This is great !"] * 20, batch_size=32)
self.assertEqual(len(outputs), 20)
@is_pipeline_test
class PipelineScikitCompatTest(unittest.TestCase):
@require_torch
def test_pipeline_predict_pt(self):
data = ["This is a test"]
text_classifier = pipeline(
task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt"
)
expected_output = [{"label": ANY(str), "score": ANY(float)}]
actual_output = text_classifier.predict(data)
self.assertEqual(expected_output, actual_output)
@require_tf
def test_pipeline_predict_tf(self):
data = ["This is a test"]
text_classifier = pipeline(
task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="tf"
)
expected_output = [{"label": ANY(str), "score": ANY(float)}]
actual_output = text_classifier.predict(data)
self.assertEqual(expected_output, actual_output)
@require_torch
def test_pipeline_transform_pt(self):
data = ["This is a test"]
text_classifier = pipeline(
task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt"
)
expected_output = [{"label": ANY(str), "score": ANY(float)}]
actual_output = text_classifier.transform(data)
self.assertEqual(expected_output, actual_output)
@require_tf
def test_pipeline_transform_tf(self):
data = ["This is a test"]
text_classifier = pipeline(
task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="tf"
)
expected_output = [{"label": ANY(str), "score": ANY(float)}]
actual_output = text_classifier.transform(data)
self.assertEqual(expected_output, actual_output)
@is_pipeline_test
class PipelinePadTest(unittest.TestCase):
@require_torch
def test_pipeline_padding(self):
import torch
items = [
{
"label": "label1",
"input_ids": torch.LongTensor([[1, 23, 24, 2]]),
"attention_mask": torch.LongTensor([[0, 1, 1, 0]]),
},
{
"label": "label2",
"input_ids": torch.LongTensor([[1, 23, 24, 43, 44, 2]]),
"attention_mask": torch.LongTensor([[0, 1, 1, 1, 1, 0]]),
},
]
self.assertEqual(_pad(items, "label", 0, "right"), ["label1", "label2"])
self.assertTrue(
torch.allclose(
_pad(items, "input_ids", 10, "right"),
torch.LongTensor([[1, 23, 24, 2, 10, 10], [1, 23, 24, 43, 44, 2]]),
)
)
self.assertTrue(
torch.allclose(
_pad(items, "input_ids", 10, "left"),
torch.LongTensor([[10, 10, 1, 23, 24, 2], [1, 23, 24, 43, 44, 2]]),
)
)
self.assertTrue(
torch.allclose(
_pad(items, "attention_mask", 0, "right"), torch.LongTensor([[0, 1, 1, 0, 0, 0], [0, 1, 1, 1, 1, 0]])
)
)
@require_torch
def test_pipeline_image_padding(self):
import torch
items = [
{
"label": "label1",
"pixel_values": torch.zeros((1, 3, 10, 10)),
},
{
"label": "label2",
"pixel_values": torch.zeros((1, 3, 10, 10)),
},
]
self.assertEqual(_pad(items, "label", 0, "right"), ["label1", "label2"])
self.assertTrue(
torch.allclose(
_pad(items, "pixel_values", 10, "right"),
torch.zeros((2, 3, 10, 10)),
)
)
@require_torch
def test_pipeline_offset_mapping(self):
import torch
items = [
{
"offset_mappings": torch.zeros([1, 11, 2], dtype=torch.long),
},
{
"offset_mappings": torch.zeros([1, 4, 2], dtype=torch.long),
},
]
self.assertTrue(
torch.allclose(
_pad(items, "offset_mappings", 0, "right"),
torch.zeros((2, 11, 2), dtype=torch.long),
),
)
@is_pipeline_test
class PipelineUtilsTest(unittest.TestCase):
@require_torch
def test_pipeline_dataset(self):
from transformers.pipelines.pt_utils import PipelineDataset
dummy_dataset = [0, 1, 2, 3]
def add(number, extra=0):
return number + extra
dataset = PipelineDataset(dummy_dataset, add, {"extra": 2})
self.assertEqual(len(dataset), 4)
outputs = [dataset[i] for i in range(4)]
self.assertEqual(outputs, [2, 3, 4, 5])
@require_torch
def test_pipeline_iterator(self):
from transformers.pipelines.pt_utils import PipelineIterator
dummy_dataset = [0, 1, 2, 3]
def add(number, extra=0):
return number + extra
dataset = PipelineIterator(dummy_dataset, add, {"extra": 2})
self.assertEqual(len(dataset), 4)
outputs = list(dataset)
self.assertEqual(outputs, [2, 3, 4, 5])
@require_torch
def test_pipeline_iterator_no_len(self):
from transformers.pipelines.pt_utils import PipelineIterator
def dummy_dataset():
for i in range(4):
yield i
def add(number, extra=0):
return number + extra
dataset = PipelineIterator(dummy_dataset(), add, {"extra": 2})
with self.assertRaises(TypeError):
len(dataset)
outputs = list(dataset)
self.assertEqual(outputs, [2, 3, 4, 5])
@require_torch
def test_pipeline_batch_unbatch_iterator(self):
from transformers.pipelines.pt_utils import PipelineIterator
dummy_dataset = [{"id": [0, 1, 2]}, {"id": [3]}]
def add(number, extra=0):
return {"id": [i + extra for i in number["id"]]}
dataset = PipelineIterator(dummy_dataset, add, {"extra": 2}, loader_batch_size=3)
outputs = list(dataset)
self.assertEqual(outputs, [{"id": 2}, {"id": 3}, {"id": 4}, {"id": 5}])
@require_torch
def test_pipeline_batch_unbatch_iterator_tensors(self):
import torch
from transformers.pipelines.pt_utils import PipelineIterator
dummy_dataset = [{"id": torch.LongTensor([[10, 20], [0, 1], [0, 2]])}, {"id": torch.LongTensor([[3]])}]
def add(number, extra=0):
return {"id": number["id"] + extra}
dataset = PipelineIterator(dummy_dataset, add, {"extra": 2}, loader_batch_size=3)
outputs = list(dataset)
self.assertEqual(
nested_simplify(outputs), [{"id": [[12, 22]]}, {"id": [[2, 3]]}, {"id": [[2, 4]]}, {"id": [[5]]}]
)
@require_torch
def test_pipeline_chunk_iterator(self):
from transformers.pipelines.pt_utils import PipelineChunkIterator
def preprocess_chunk(n: int):
for i in range(n):
yield i
dataset = [2, 3]
dataset = PipelineChunkIterator(dataset, preprocess_chunk, {}, loader_batch_size=3)
outputs = list(dataset)
self.assertEqual(outputs, [0, 1, 0, 1, 2])
@require_torch
def test_pipeline_pack_iterator(self):
from transformers.pipelines.pt_utils import PipelinePackIterator
def pack(item):
return {"id": item["id"] + 1, "is_last": item["is_last"]}
dataset = [
{"id": 0, "is_last": False},
{"id": 1, "is_last": True},
{"id": 0, "is_last": False},
{"id": 1, "is_last": False},
{"id": 2, "is_last": True},
]
dataset = PipelinePackIterator(dataset, pack, {})
outputs = list(dataset)
self.assertEqual(
outputs,
[
[
{"id": 1},
{"id": 2},
],
[
{"id": 1},
{"id": 2},
{"id": 3},
],
],
)
@require_torch
def test_pipeline_pack_unbatch_iterator(self):
from transformers.pipelines.pt_utils import PipelinePackIterator
dummy_dataset = [{"id": [0, 1, 2], "is_last": [False, True, False]}, {"id": [3], "is_last": [True]}]
def add(number, extra=0):
return {"id": [i + extra for i in number["id"]], "is_last": number["is_last"]}
dataset = PipelinePackIterator(dummy_dataset, add, {"extra": 2}, loader_batch_size=3)
outputs = list(dataset)
self.assertEqual(outputs, [[{"id": 2}, {"id": 3}], [{"id": 4}, {"id": 5}]])
# is_false Across batch
dummy_dataset = [{"id": [0, 1, 2], "is_last": [False, False, False]}, {"id": [3], "is_last": [True]}]
def add(number, extra=0):
return {"id": [i + extra for i in number["id"]], "is_last": number["is_last"]}
dataset = PipelinePackIterator(dummy_dataset, add, {"extra": 2}, loader_batch_size=3)
outputs = list(dataset)
self.assertEqual(outputs, [[{"id": 2}, {"id": 3}, {"id": 4}, {"id": 5}]])
def test_pipeline_negative_device(self):
# To avoid regressing, pipeline used to accept device=-1
classifier = pipeline("text-generation", "hf-internal-testing/tiny-random-bert", device=-1)
expected_output = [{"generated_text": ANY(str)}]
actual_output = classifier("Test input.")
self.assertEqual(expected_output, actual_output)
@slow
@require_torch
def test_load_default_pipelines_pt(self):
import torch
from transformers.pipelines import SUPPORTED_TASKS
set_seed_fn = lambda: torch.manual_seed(0) # noqa: E731
for task in SUPPORTED_TASKS.keys():
if task == "table-question-answering":
# test table in seperate test due to more dependencies
continue
self.check_default_pipeline(task, "pt", set_seed_fn, self.check_models_equal_pt)
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
backend_empty_cache(torch_device)
@slow
@require_tf
def test_load_default_pipelines_tf(self):
import tensorflow as tf
from transformers.pipelines import SUPPORTED_TASKS
set_seed_fn = lambda: tf.random.set_seed(0) # noqa: E731
for task in SUPPORTED_TASKS.keys():
if task == "table-question-answering":
# test table in seperate test due to more dependencies
continue
self.check_default_pipeline(task, "tf", set_seed_fn, self.check_models_equal_tf)
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
@slow
@require_torch
def test_load_default_pipelines_pt_table_qa(self):
import torch
set_seed_fn = lambda: torch.manual_seed(0) # noqa: E731
self.check_default_pipeline("table-question-answering", "pt", set_seed_fn, self.check_models_equal_pt)
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
backend_empty_cache(torch_device)
@slow
@require_torch
@require_torch_accelerator
def test_pipeline_accelerator(self):
pipe = pipeline("text-generation", device=torch_device)
_ = pipe("Hello")
@slow
@require_torch
@require_torch_accelerator
def test_pipeline_accelerator_indexed(self):
pipe = pipeline("text-generation", device=torch_device)
_ = pipe("Hello")
@slow
@require_tf
@require_tensorflow_probability
def test_load_default_pipelines_tf_table_qa(self):
import tensorflow as tf
set_seed_fn = lambda: tf.random.set_seed(0) # noqa: E731
self.check_default_pipeline("table-question-answering", "tf", set_seed_fn, self.check_models_equal_tf)
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
def check_default_pipeline(self, task, framework, set_seed_fn, check_models_equal_fn):
from transformers.pipelines import SUPPORTED_TASKS, pipeline
task_dict = SUPPORTED_TASKS[task]
# test to compare pipeline to manually loading the respective model
model = None
relevant_auto_classes = task_dict[framework]
if len(relevant_auto_classes) == 0:
# task has no default
logger.debug(f"{task} in {framework} has no default")
return
# by default use first class
auto_model_cls = relevant_auto_classes[0]
# retrieve correct model ids
if task == "translation":
# special case for translation pipeline which has multiple languages
model_ids = []
revisions = []
tasks = []
for translation_pair in task_dict["default"].keys():
model_id, revision = task_dict["default"][translation_pair]["model"][framework]
model_ids.append(model_id)
revisions.append(revision)
tasks.append(task + f"_{'_to_'.join(translation_pair)}")
else:
# normal case - non-translation pipeline
model_id, revision = task_dict["default"]["model"][framework]
model_ids = [model_id]
revisions = [revision]
tasks = [task]
# check for equality
for model_id, revision, task in zip(model_ids, revisions, tasks):
# load default model
try:
set_seed_fn()
model = auto_model_cls.from_pretrained(model_id, revision=revision)
except ValueError:
# first auto class is possible not compatible with model, go to next model class
auto_model_cls = relevant_auto_classes[1]
set_seed_fn()
model = auto_model_cls.from_pretrained(model_id, revision=revision)
# load default pipeline
set_seed_fn()
default_pipeline = pipeline(task, framework=framework)
# compare pipeline model with default model
models_are_equal = check_models_equal_fn(default_pipeline.model, model)
self.assertTrue(models_are_equal, f"{task} model doesn't match pipeline.")
logger.debug(f"{task} in {framework} succeeded with {model_id}.")
def check_models_equal_pt(self, model1, model2):
models_are_equal = True
for model1_p, model2_p in zip(model1.parameters(), model2.parameters()):
if model1_p.data.ne(model2_p.data).sum() > 0:
models_are_equal = False
return models_are_equal
def check_models_equal_tf(self, model1, model2):
models_are_equal = True
for model1_p, model2_p in zip(model1.weights, model2.weights):
if np.abs(model1_p.numpy() - model2_p.numpy()).sum() > 1e-5:
models_are_equal = False
return models_are_equal
class CustomPipeline(Pipeline):
def _sanitize_parameters(self, **kwargs):
preprocess_kwargs = {}
if "maybe_arg" in kwargs:
preprocess_kwargs["maybe_arg"] = kwargs["maybe_arg"]
return preprocess_kwargs, {}, {}
def preprocess(self, text, maybe_arg=2):
input_ids = self.tokenizer(text, return_tensors="pt")
return input_ids
def _forward(self, model_inputs):
outputs = self.model(**model_inputs)
return outputs
def postprocess(self, model_outputs):
return model_outputs["logits"].softmax(-1).numpy()
@is_pipeline_test
class CustomPipelineTest(unittest.TestCase):
def test_warning_logs(self):
transformers_logging.set_verbosity_debug()
logger_ = transformers_logging.get_logger("transformers.pipelines.base")
alias = "text-classification"
# Get the original task, so we can restore it at the end.
# (otherwise the subsequential tests in `TextClassificationPipelineTests` will fail)
_, original_task, _ = PIPELINE_REGISTRY.check_task(alias)
try:
with CaptureLogger(logger_) as cm:
PIPELINE_REGISTRY.register_pipeline(alias, PairClassificationPipeline)
self.assertIn(f"{alias} is already registered", cm.out)
finally:
# restore
PIPELINE_REGISTRY.supported_tasks[alias] = original_task
def test_register_pipeline(self):
PIPELINE_REGISTRY.register_pipeline(
"custom-text-classification",
pipeline_class=PairClassificationPipeline,
pt_model=AutoModelForSequenceClassification if is_torch_available() else None,
tf_model=TFAutoModelForSequenceClassification if is_tf_available() else None,
default={"pt": "hf-internal-testing/tiny-random-distilbert"},
type="text",
)
assert "custom-text-classification" in PIPELINE_REGISTRY.get_supported_tasks()
_, task_def, _ = PIPELINE_REGISTRY.check_task("custom-text-classification")
self.assertEqual(task_def["pt"], (AutoModelForSequenceClassification,) if is_torch_available() else ())
self.assertEqual(task_def["tf"], (TFAutoModelForSequenceClassification,) if is_tf_available() else ())
self.assertEqual(task_def["type"], "text")
self.assertEqual(task_def["impl"], PairClassificationPipeline)
self.assertEqual(task_def["default"], {"model": {"pt": "hf-internal-testing/tiny-random-distilbert"}})
# Clean registry for next tests.
del PIPELINE_REGISTRY.supported_tasks["custom-text-classification"]
@require_torch_or_tf
def test_dynamic_pipeline(self):
PIPELINE_REGISTRY.register_pipeline(
"pair-classification",
pipeline_class=PairClassificationPipeline,
pt_model=AutoModelForSequenceClassification if is_torch_available() else None,
tf_model=TFAutoModelForSequenceClassification if is_tf_available() else None,
)
classifier = pipeline("pair-classification", model="hf-internal-testing/tiny-random-bert")
# Clean registry as we won't need the pipeline to be in it for the rest to work.
del PIPELINE_REGISTRY.supported_tasks["pair-classification"]
with tempfile.TemporaryDirectory() as tmp_dir:
classifier.save_pretrained(tmp_dir)
# checks
self.assertDictEqual(
classifier.model.config.custom_pipelines,
{
"pair-classification": {
"impl": "custom_pipeline.PairClassificationPipeline",
"pt": ("AutoModelForSequenceClassification",) if is_torch_available() else (),
"tf": ("TFAutoModelForSequenceClassification",) if is_tf_available() else (),
}
},
)
# Fails if the user forget to pass along `trust_remote_code=True`
with self.assertRaises(ValueError):
_ = pipeline(model=tmp_dir)
new_classifier = pipeline(model=tmp_dir, trust_remote_code=True)
# Using trust_remote_code=False forces the traditional pipeline tag
old_classifier = pipeline("text-classification", model=tmp_dir, trust_remote_code=False)
# Can't make an isinstance check because the new_classifier is from the PairClassificationPipeline class of a
# dynamic module
self.assertEqual(new_classifier.__class__.__name__, "PairClassificationPipeline")
self.assertEqual(new_classifier.task, "pair-classification")
results = new_classifier("I hate you", second_text="I love you")
self.assertDictEqual(
nested_simplify(results),
{"label": "LABEL_0", "score": 0.505, "logits": [-0.003, -0.024]},
)
self.assertEqual(old_classifier.__class__.__name__, "TextClassificationPipeline")
self.assertEqual(old_classifier.task, "text-classification")
results = old_classifier("I hate you", text_pair="I love you")
self.assertListEqual(
nested_simplify(results),
[{"label": "LABEL_0", "score": 0.505}],
)
@require_torch_or_tf
def test_cached_pipeline_has_minimum_calls_to_head(self):
# Make sure we have cached the pipeline.
_ = pipeline("text-classification", model="hf-internal-testing/tiny-random-bert")
with RequestCounter() as counter:
_ = pipeline("text-classification", model="hf-internal-testing/tiny-random-bert")
self.assertEqual(counter["GET"], 0)
self.assertEqual(counter["HEAD"], 1)
self.assertEqual(counter.total_calls, 1)
@require_torch
def test_chunk_pipeline_batching_single_file(self):
# Make sure we have cached the pipeline.
pipe = pipeline(model="hf-internal-testing/tiny-random-Wav2Vec2ForCTC")
ds = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id")
audio = ds[40]["audio"]["array"]
pipe = pipeline(model="hf-internal-testing/tiny-random-Wav2Vec2ForCTC")
# For some reason scoping doesn't work if not using `self.`
self.COUNT = 0
forward = pipe.model.forward
def new_forward(*args, **kwargs):
self.COUNT += 1
return forward(*args, **kwargs)
pipe.model.forward = new_forward
for out in pipe(audio, return_timestamps="char", chunk_length_s=3, stride_length_s=[1, 1], batch_size=1024):
pass
self.assertEqual(self.COUNT, 1)
@require_torch
@is_staging_test
class DynamicPipelineTester(unittest.TestCase):
vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "I", "love", "hate", "you"]
@classmethod
def setUpClass(cls):
cls._token = TOKEN
HfFolder.save_token(TOKEN)
@classmethod
def tearDownClass(cls):
try:
delete_repo(token=cls._token, repo_id="test-dynamic-pipeline")
except HTTPError:
pass
def test_push_to_hub_dynamic_pipeline(self):
from transformers import BertConfig, BertForSequenceClassification, BertTokenizer
PIPELINE_REGISTRY.register_pipeline(
"pair-classification",
pipeline_class=PairClassificationPipeline,
pt_model=AutoModelForSequenceClassification,
)
config = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37
)
model = BertForSequenceClassification(config).eval()
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"{USER}/test-dynamic-pipeline", token=self._token)
repo = Repository(tmp_dir, clone_from=f"{USER}/test-dynamic-pipeline", token=self._token)
vocab_file = os.path.join(tmp_dir, "vocab.txt")
with open(vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens]))
tokenizer = BertTokenizer(vocab_file)
classifier = pipeline("pair-classification", model=model, tokenizer=tokenizer)
# Clean registry as we won't need the pipeline to be in it for the rest to work.
del PIPELINE_REGISTRY.supported_tasks["pair-classification"]
classifier.save_pretrained(tmp_dir)
# checks
self.assertDictEqual(
classifier.model.config.custom_pipelines,
{
"pair-classification": {
"impl": "custom_pipeline.PairClassificationPipeline",
"pt": ("AutoModelForSequenceClassification",),
"tf": (),
}
},
)
repo.push_to_hub()
# Fails if the user forget to pass along `trust_remote_code=True`
with self.assertRaises(ValueError):
_ = pipeline(model=f"{USER}/test-dynamic-pipeline")
new_classifier = pipeline(model=f"{USER}/test-dynamic-pipeline", trust_remote_code=True)
# Can't make an isinstance check because the new_classifier is from the PairClassificationPipeline class of a
# dynamic module
self.assertEqual(new_classifier.__class__.__name__, "PairClassificationPipeline")
results = classifier("I hate you", second_text="I love you")
new_results = new_classifier("I hate you", second_text="I love you")
self.assertDictEqual(nested_simplify(results), nested_simplify(new_results))
# Using trust_remote_code=False forces the traditional pipeline tag
old_classifier = pipeline(
"text-classification", model=f"{USER}/test-dynamic-pipeline", trust_remote_code=False
)
self.assertEqual(old_classifier.__class__.__name__, "TextClassificationPipeline")
self.assertEqual(old_classifier.task, "text-classification")
new_results = old_classifier("I hate you", text_pair="I love you")
self.assertListEqual(
nested_simplify([{"label": results["label"], "score": results["score"]}]), nested_simplify(new_results)
)
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/pipelines/test_pipelines_image_to_image.py | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import (
MODEL_FOR_IMAGE_TO_IMAGE_MAPPING,
AutoImageProcessor,
AutoModelForImageToImage,
ImageToImagePipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class Image:
@staticmethod
def open(*args, **kwargs):
pass
@is_pipeline_test
@require_torch
@require_vision
class ImageToImagePipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_IMAGE_TO_IMAGE_MAPPING
examples = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"),
"http://images.cocodataset.org/val2017/000000039769.jpg",
]
@require_torch
@require_vision
@slow
def test_pipeline(self):
model_id = "caidas/swin2SR-classical-sr-x2-64"
upscaler = pipeline("image-to-image", model=model_id)
upscaled_list = upscaler(self.examples)
self.assertEqual(len(upscaled_list), len(self.examples))
for output in upscaled_list:
self.assertIsInstance(output, Image.Image)
self.assertEqual(upscaled_list[0].size, (1296, 976))
self.assertEqual(upscaled_list[1].size, (1296, 976))
@require_torch
@require_vision
@slow
def test_pipeline_model_processor(self):
model_id = "caidas/swin2SR-classical-sr-x2-64"
model = AutoModelForImageToImage.from_pretrained(model_id)
image_processor = AutoImageProcessor.from_pretrained(model_id)
upscaler = ImageToImagePipeline(model=model, image_processor=image_processor)
upscaled_list = upscaler(self.examples)
self.assertEqual(len(upscaled_list), len(self.examples))
for output in upscaled_list:
self.assertIsInstance(output, Image.Image)
self.assertEqual(upscaled_list[0].size, (1296, 976))
self.assertEqual(upscaled_list[1].size, (1296, 976))
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/pipelines/test_pipelines_summarization.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
SummarizationPipeline,
TFPreTrainedModel,
pipeline,
)
from transformers.testing_utils import get_gpu_count, is_pipeline_test, require_tf, require_torch, slow, torch_device
from transformers.tokenization_utils import TruncationStrategy
from .test_pipelines_common import ANY
@is_pipeline_test
class SummarizationPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
tf_model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def get_test_pipeline(self, model, tokenizer, processor):
summarizer = SummarizationPipeline(model=model, tokenizer=tokenizer)
return summarizer, ["(CNN)The Palestinian Authority officially became", "Some other text"]
def run_pipeline_test(self, summarizer, _):
model = summarizer.model
outputs = summarizer("(CNN)The Palestinian Authority officially became")
self.assertEqual(outputs, [{"summary_text": ANY(str)}])
outputs = summarizer(
"(CNN)The Palestinian Authority officially became ",
num_beams=2,
min_length=2,
max_length=5,
)
self.assertEqual(outputs, [{"summary_text": ANY(str)}])
# Some models (Switch Transformers, LED, T5, LongT5, etc) can handle long sequences.
model_can_handle_longer_seq = [
"SwitchTransformersConfig",
"T5Config",
"LongT5Config",
"LEDConfig",
"PegasusXConfig",
"FSMTConfig",
"M2M100Config",
"ProphetNetConfig", # positional embeddings up to a fixed maximum size (otherwise clamping the values)
]
if model.config.__class__.__name__ not in model_can_handle_longer_seq:
# Too long and exception is expected.
# For TF models, if the weights are initialized in GPU context, we won't get expected index error from
# the embedding layer.
if not (
isinstance(model, TFPreTrainedModel)
and get_gpu_count() > 0
and len(summarizer.model.trainable_weights) > 0
):
with self.assertRaises(Exception):
outputs = summarizer("This " * 1000)
outputs = summarizer("This " * 1000, truncation=TruncationStrategy.ONLY_FIRST)
@require_torch
def test_small_model_pt(self):
summarizer = pipeline(task="summarization", model="sshleifer/tiny-mbart", framework="pt")
outputs = summarizer("This is a small test")
self.assertEqual(
outputs,
[
{
"summary_text": "เข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไป"
}
],
)
@require_tf
def test_small_model_tf(self):
summarizer = pipeline(task="summarization", model="sshleifer/tiny-mbart", framework="tf")
outputs = summarizer("This is a small test")
self.assertEqual(
outputs,
[
{
"summary_text": "เข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไป"
}
],
)
@require_torch
@slow
def test_integration_torch_summarization(self):
summarizer = pipeline(task="summarization", device=torch_device)
cnn_article = (
" (CNN)The Palestinian Authority officially became the 123rd member of the International Criminal Court on"
" Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The"
" formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is based."
" The Palestinians signed the ICC's founding Rome Statute in January, when they also accepted its"
' jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including East'
' Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination into the'
" situation in Palestinian territories, paving the way for possible war crimes investigations against"
" Israelis. As members of the court, Palestinians may be subject to counter-charges as well. Israel and"
" the United States, neither of which is an ICC member, opposed the Palestinians' efforts to join the"
" body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday's ceremony, said it was a"
' move toward greater justice. "As Palestine formally becomes a State Party to the Rome Statute today, the'
' world is also a step closer to ending a long era of impunity and injustice," he said, according to an'
' ICC news release. "Indeed, today brings us closer to our shared goals of justice and peace." Judge'
" Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was just the first step for the"
' Palestinians. "As the Rome Statute today enters into force for the State of Palestine, Palestine'
" acquires all the rights as well as responsibilities that come with being a State Party to the Statute."
' These are substantive commitments, which cannot be taken lightly," she said. Rights group Human Rights'
' Watch welcomed the development. "Governments seeking to penalize Palestine for joining the ICC should'
" immediately end their pressure, and countries that support universal acceptance of the court's treaty"
' should speak out to welcome its membership," said Balkees Jarrah, international justice counsel for the'
" group. \"What's objectionable is the attempts to undermine international justice, not Palestine's"
' decision to join a treaty to which over 100 countries around the world are members." In January, when'
" the preliminary ICC examination was opened, Israeli Prime Minister Benjamin Netanyahu described it as an"
' outrage, saying the court was overstepping its boundaries. The United States also said it "strongly"'
" disagreed with the court's decision. \"As we have said repeatedly, we do not believe that Palestine is a"
' state and therefore we do not believe that it is eligible to join the ICC," the State Department said in'
' a statement. It urged the warring sides to resolve their differences through direct negotiations. "We'
' will continue to oppose actions against Israel at the ICC as counterproductive to the cause of peace,"'
" it said. But the ICC begs to differ with the definition of a state for its purposes and refers to the"
' territories as "Palestine." While a preliminary examination is not a formal investigation, it allows the'
" court to review evidence and determine whether to investigate suspects on both sides. Prosecutor Fatou"
' Bensouda said her office would "conduct its analysis in full independence and impartiality." The war'
" between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The inquiry"
" will include alleged war crimes committed since June. The International Criminal Court was set up in"
" 2002 to prosecute genocide, crimes against humanity and war crimes. CNN's Vasco Cotovio, Kareem Khadder"
" and Faith Karimi contributed to this report."
)
expected_cnn_summary = (
" The Palestinian Authority becomes the 123rd member of the International Criminal Court . The move gives"
" the court jurisdiction over alleged crimes in Palestinian territories . Israel and the United States"
" opposed the Palestinians' efforts to join the court . Rights group Human Rights Watch welcomes the move,"
" says governments seeking to penalize Palestine should end pressure ."
)
result = summarizer(cnn_article)
self.assertEqual(result[0]["summary_text"], expected_cnn_summary)
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/pipelines/test_pipelines_text_classification.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow, torch_device
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_TO_SKIP = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class TextClassificationPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
tf_model_mapping = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
model_mapping = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
tf_model_mapping = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def test_small_model_pt(self):
text_classifier = pipeline(
task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt"
)
outputs = text_classifier("This is great !")
self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}])
outputs = text_classifier("This is great !", top_k=2)
self.assertEqual(
nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}]
)
outputs = text_classifier(["This is great !", "This is bad"], top_k=2)
self.assertEqual(
nested_simplify(outputs),
[
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
],
)
outputs = text_classifier("This is great !", top_k=1)
self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}])
# Legacy behavior
outputs = text_classifier("This is great !", return_all_scores=False)
self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}])
outputs = text_classifier("This is great !", return_all_scores=True)
self.assertEqual(
nested_simplify(outputs), [[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}]]
)
outputs = text_classifier(["This is great !", "Something else"], return_all_scores=True)
self.assertEqual(
nested_simplify(outputs),
[
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
],
)
outputs = text_classifier(["This is great !", "Something else"], return_all_scores=False)
self.assertEqual(
nested_simplify(outputs),
[
{"label": "LABEL_0", "score": 0.504},
{"label": "LABEL_0", "score": 0.504},
],
)
@require_torch
def test_accepts_torch_device(self):
text_classifier = pipeline(
task="text-classification",
model="hf-internal-testing/tiny-random-distilbert",
framework="pt",
device=torch_device,
)
outputs = text_classifier("This is great !")
self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}])
@require_tf
def test_small_model_tf(self):
text_classifier = pipeline(
task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="tf"
)
outputs = text_classifier("This is great !")
self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}])
@slow
@require_torch
def test_pt_bert(self):
text_classifier = pipeline("text-classification")
outputs = text_classifier("This is great !")
self.assertEqual(nested_simplify(outputs), [{"label": "POSITIVE", "score": 1.0}])
outputs = text_classifier("This is bad !")
self.assertEqual(nested_simplify(outputs), [{"label": "NEGATIVE", "score": 1.0}])
outputs = text_classifier("Birds are a type of animal")
self.assertEqual(nested_simplify(outputs), [{"label": "POSITIVE", "score": 0.988}])
@slow
@require_tf
def test_tf_bert(self):
text_classifier = pipeline("text-classification", framework="tf")
outputs = text_classifier("This is great !")
self.assertEqual(nested_simplify(outputs), [{"label": "POSITIVE", "score": 1.0}])
outputs = text_classifier("This is bad !")
self.assertEqual(nested_simplify(outputs), [{"label": "NEGATIVE", "score": 1.0}])
outputs = text_classifier("Birds are a type of animal")
self.assertEqual(nested_simplify(outputs), [{"label": "POSITIVE", "score": 0.988}])
def get_test_pipeline(self, model, tokenizer, processor):
text_classifier = TextClassificationPipeline(model=model, tokenizer=tokenizer)
return text_classifier, ["HuggingFace is in", "This is another test"]
def run_pipeline_test(self, text_classifier, _):
model = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
valid_inputs = "HuggingFace is in"
outputs = text_classifier(valid_inputs)
self.assertEqual(nested_simplify(outputs), [{"label": ANY(str), "score": ANY(float)}])
self.assertTrue(outputs[0]["label"] in model.config.id2label.values())
valid_inputs = ["HuggingFace is in ", "Paris is in France"]
outputs = text_classifier(valid_inputs)
self.assertEqual(
nested_simplify(outputs),
[{"label": ANY(str), "score": ANY(float)}, {"label": ANY(str), "score": ANY(float)}],
)
self.assertTrue(outputs[0]["label"] in model.config.id2label.values())
self.assertTrue(outputs[1]["label"] in model.config.id2label.values())
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
outputs = text_classifier(valid_inputs, top_k=None)
N = len(model.config.id2label.values())
self.assertEqual(
nested_simplify(outputs),
[[{"label": ANY(str), "score": ANY(float)}] * N, [{"label": ANY(str), "score": ANY(float)}] * N],
)
valid_inputs = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"}
outputs = text_classifier(valid_inputs)
self.assertEqual(
nested_simplify(outputs),
{"label": ANY(str), "score": ANY(float)},
)
self.assertTrue(outputs["label"] in model.config.id2label.values())
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
invalid_input = [["HuggingFace is in ", "Paris is in France"]]
with self.assertRaises(ValueError):
text_classifier(invalid_input)
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
outputs = text_classifier([[["HuggingFace is in ", "Paris is in France"]]])
self.assertEqual(
nested_simplify(outputs),
[{"label": ANY(str), "score": ANY(float)}],
)
self.assertTrue(outputs[0]["label"] in model.config.id2label.values())
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/pipelines/test_pipelines_token_classification.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from transformers import (
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
AutoModelForTokenClassification,
AutoTokenizer,
TokenClassificationPipeline,
pipeline,
)
from transformers.pipelines import AggregationStrategy, TokenClassificationArgumentHandler
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torch_accelerator,
slow,
torch_device,
)
from .test_pipelines_common import ANY
VALID_INPUTS = ["A simple string", ["list of strings", "A simple string that is quite a bit longer"]]
# These 2 model types require different inputs than those of the usual text models.
_TO_SKIP = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class TokenClassificationPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
tf_model_mapping = TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
if model_mapping is not None:
model_mapping = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
tf_model_mapping = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def get_test_pipeline(self, model, tokenizer, processor):
token_classifier = TokenClassificationPipeline(model=model, tokenizer=tokenizer)
return token_classifier, ["A simple string", "A simple string that is quite a bit longer"]
def run_pipeline_test(self, token_classifier, _):
model = token_classifier.model
tokenizer = token_classifier.tokenizer
if not tokenizer.is_fast:
return # Slow tokenizers do not return offsets mappings, so this test will fail
outputs = token_classifier("A simple string")
self.assertIsInstance(outputs, list)
n = len(outputs)
self.assertEqual(
nested_simplify(outputs),
[
{
"entity": ANY(str),
"score": ANY(float),
"start": ANY(int),
"end": ANY(int),
"index": ANY(int),
"word": ANY(str),
}
for i in range(n)
],
)
outputs = token_classifier(["list of strings", "A simple string that is quite a bit longer"])
self.assertIsInstance(outputs, list)
self.assertEqual(len(outputs), 2)
n = len(outputs[0])
m = len(outputs[1])
self.assertEqual(
nested_simplify(outputs),
[
[
{
"entity": ANY(str),
"score": ANY(float),
"start": ANY(int),
"end": ANY(int),
"index": ANY(int),
"word": ANY(str),
}
for i in range(n)
],
[
{
"entity": ANY(str),
"score": ANY(float),
"start": ANY(int),
"end": ANY(int),
"index": ANY(int),
"word": ANY(str),
}
for i in range(m)
],
],
)
self.run_aggregation_strategy(model, tokenizer)
def run_aggregation_strategy(self, model, tokenizer):
token_classifier = TokenClassificationPipeline(model=model, tokenizer=tokenizer, aggregation_strategy="simple")
self.assertEqual(token_classifier._postprocess_params["aggregation_strategy"], AggregationStrategy.SIMPLE)
outputs = token_classifier("A simple string")
self.assertIsInstance(outputs, list)
n = len(outputs)
self.assertEqual(
nested_simplify(outputs),
[
{
"entity_group": ANY(str),
"score": ANY(float),
"start": ANY(int),
"end": ANY(int),
"word": ANY(str),
}
for i in range(n)
],
)
token_classifier = TokenClassificationPipeline(model=model, tokenizer=tokenizer, aggregation_strategy="first")
self.assertEqual(token_classifier._postprocess_params["aggregation_strategy"], AggregationStrategy.FIRST)
outputs = token_classifier("A simple string")
self.assertIsInstance(outputs, list)
n = len(outputs)
self.assertEqual(
nested_simplify(outputs),
[
{
"entity_group": ANY(str),
"score": ANY(float),
"start": ANY(int),
"end": ANY(int),
"word": ANY(str),
}
for i in range(n)
],
)
token_classifier = TokenClassificationPipeline(model=model, tokenizer=tokenizer, aggregation_strategy="max")
self.assertEqual(token_classifier._postprocess_params["aggregation_strategy"], AggregationStrategy.MAX)
outputs = token_classifier("A simple string")
self.assertIsInstance(outputs, list)
n = len(outputs)
self.assertEqual(
nested_simplify(outputs),
[
{
"entity_group": ANY(str),
"score": ANY(float),
"start": ANY(int),
"end": ANY(int),
"word": ANY(str),
}
for i in range(n)
],
)
token_classifier = TokenClassificationPipeline(
model=model, tokenizer=tokenizer, aggregation_strategy="average"
)
self.assertEqual(token_classifier._postprocess_params["aggregation_strategy"], AggregationStrategy.AVERAGE)
outputs = token_classifier("A simple string")
self.assertIsInstance(outputs, list)
n = len(outputs)
self.assertEqual(
nested_simplify(outputs),
[
{
"entity_group": ANY(str),
"score": ANY(float),
"start": ANY(int),
"end": ANY(int),
"word": ANY(str),
}
for i in range(n)
],
)
with self.assertWarns(UserWarning):
token_classifier = pipeline(task="ner", model=model, tokenizer=tokenizer, grouped_entities=True)
self.assertEqual(token_classifier._postprocess_params["aggregation_strategy"], AggregationStrategy.SIMPLE)
with self.assertWarns(UserWarning):
token_classifier = pipeline(
task="ner", model=model, tokenizer=tokenizer, grouped_entities=True, ignore_subwords=True
)
self.assertEqual(token_classifier._postprocess_params["aggregation_strategy"], AggregationStrategy.FIRST)
@slow
@require_torch
def test_chunking(self):
NER_MODEL = "elastic/distilbert-base-uncased-finetuned-conll03-english"
model = AutoModelForTokenClassification.from_pretrained(NER_MODEL)
tokenizer = AutoTokenizer.from_pretrained(NER_MODEL, use_fast=True)
tokenizer.model_max_length = 10
stride = 5
sentence = (
"Hugging Face, Inc. is a French company that develops tools for building applications using machine learning. "
"The company, based in New York City was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf."
)
token_classifier = TokenClassificationPipeline(
model=model, tokenizer=tokenizer, aggregation_strategy="simple", stride=stride
)
output = token_classifier(sentence)
self.assertEqual(
nested_simplify(output),
[
{"entity_group": "ORG", "score": 0.978, "word": "hugging face, inc.", "start": 0, "end": 18},
{"entity_group": "MISC", "score": 0.999, "word": "french", "start": 24, "end": 30},
{"entity_group": "LOC", "score": 0.997, "word": "new york city", "start": 131, "end": 144},
{"entity_group": "MISC", "score": 0.999, "word": "french", "start": 168, "end": 174},
{"entity_group": "PER", "score": 0.999, "word": "clement delangue", "start": 189, "end": 205},
{"entity_group": "PER", "score": 0.999, "word": "julien chaumond", "start": 207, "end": 222},
{"entity_group": "PER", "score": 0.999, "word": "thomas wolf", "start": 228, "end": 239},
],
)
token_classifier = TokenClassificationPipeline(
model=model, tokenizer=tokenizer, aggregation_strategy="first", stride=stride
)
output = token_classifier(sentence)
self.assertEqual(
nested_simplify(output),
[
{"entity_group": "ORG", "score": 0.978, "word": "hugging face, inc.", "start": 0, "end": 18},
{"entity_group": "MISC", "score": 0.999, "word": "french", "start": 24, "end": 30},
{"entity_group": "LOC", "score": 0.997, "word": "new york city", "start": 131, "end": 144},
{"entity_group": "MISC", "score": 0.999, "word": "french", "start": 168, "end": 174},
{"entity_group": "PER", "score": 0.999, "word": "clement delangue", "start": 189, "end": 205},
{"entity_group": "PER", "score": 0.999, "word": "julien chaumond", "start": 207, "end": 222},
{"entity_group": "PER", "score": 0.999, "word": "thomas wolf", "start": 228, "end": 239},
],
)
token_classifier = TokenClassificationPipeline(
model=model, tokenizer=tokenizer, aggregation_strategy="max", stride=stride
)
output = token_classifier(sentence)
self.assertEqual(
nested_simplify(output),
[
{"entity_group": "ORG", "score": 0.978, "word": "hugging face, inc.", "start": 0, "end": 18},
{"entity_group": "MISC", "score": 0.999, "word": "french", "start": 24, "end": 30},
{"entity_group": "LOC", "score": 0.997, "word": "new york city", "start": 131, "end": 144},
{"entity_group": "MISC", "score": 0.999, "word": "french", "start": 168, "end": 174},
{"entity_group": "PER", "score": 0.999, "word": "clement delangue", "start": 189, "end": 205},
{"entity_group": "PER", "score": 0.999, "word": "julien chaumond", "start": 207, "end": 222},
{"entity_group": "PER", "score": 0.999, "word": "thomas wolf", "start": 228, "end": 239},
],
)
token_classifier = TokenClassificationPipeline(
model=model, tokenizer=tokenizer, aggregation_strategy="average", stride=stride
)
output = token_classifier(sentence)
self.assertEqual(
nested_simplify(output),
[
{"entity_group": "ORG", "score": 0.978, "word": "hugging face, inc.", "start": 0, "end": 18},
{"entity_group": "MISC", "score": 0.999, "word": "french", "start": 24, "end": 30},
{"entity_group": "LOC", "score": 0.997, "word": "new york city", "start": 131, "end": 144},
{"entity_group": "MISC", "score": 0.999, "word": "french", "start": 168, "end": 174},
{"entity_group": "PER", "score": 0.999, "word": "clement delangue", "start": 189, "end": 205},
{"entity_group": "PER", "score": 0.999, "word": "julien chaumond", "start": 207, "end": 222},
{"entity_group": "PER", "score": 0.999, "word": "thomas wolf", "start": 228, "end": 239},
],
)
@require_torch
def test_chunking_fast(self):
# Note: We cannot run the test on "conflicts" on the chunking.
# The problem is that the model is random, and thus the results do heavily
# depend on the chunking, so we cannot expect "abcd" and "bcd" to find
# the same entities. We defer to slow tests for this.
pipe = pipeline(model="hf-internal-testing/tiny-bert-for-token-classification")
sentence = "The company, based in New York City was founded in 2016 by French entrepreneurs"
results = pipe(sentence, aggregation_strategy="first")
# This is what this random model gives on the full sentence
self.assertEqual(
nested_simplify(results),
[
# This is 2 actual tokens
{"end": 39, "entity_group": "MISC", "score": 0.115, "start": 31, "word": "city was"},
{"end": 79, "entity_group": "MISC", "score": 0.115, "start": 66, "word": "entrepreneurs"},
],
)
# This will force the tokenizer to split after "city was".
pipe.tokenizer.model_max_length = 12
self.assertEqual(
pipe.tokenizer.decode(pipe.tokenizer.encode(sentence, truncation=True)),
"[CLS] the company, based in new york city was [SEP]",
)
stride = 4
results = pipe(sentence, aggregation_strategy="first", stride=stride)
self.assertEqual(
nested_simplify(results),
[
{"end": 39, "entity_group": "MISC", "score": 0.115, "start": 31, "word": "city was"},
# This is an extra entity found by this random model, but at least both original
# entities are there
{"end": 58, "entity_group": "MISC", "score": 0.115, "start": 56, "word": "by"},
{"end": 79, "entity_group": "MISC", "score": 0.115, "start": 66, "word": "entrepreneurs"},
],
)
@require_torch
@slow
def test_spanish_bert(self):
# https://github.com/huggingface/transformers/pull/4987
NER_MODEL = "mrm8488/bert-spanish-cased-finetuned-ner"
model = AutoModelForTokenClassification.from_pretrained(NER_MODEL)
tokenizer = AutoTokenizer.from_pretrained(NER_MODEL, use_fast=True)
sentence = """Consuelo Araújo Noguera, ministra de cultura del presidente Andrés Pastrana (1998.2002) fue asesinada por las Farc luego de haber permanecido secuestrada por algunos meses."""
token_classifier = pipeline("ner", model=model, tokenizer=tokenizer)
output = token_classifier(sentence)
self.assertEqual(
nested_simplify(output[:3]),
[
{"entity": "B-PER", "score": 0.999, "word": "Cons", "start": 0, "end": 4, "index": 1},
{"entity": "B-PER", "score": 0.803, "word": "##uelo", "start": 4, "end": 8, "index": 2},
{"entity": "I-PER", "score": 0.999, "word": "Ara", "start": 9, "end": 12, "index": 3},
],
)
token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="simple")
output = token_classifier(sentence)
self.assertEqual(
nested_simplify(output[:3]),
[
{"entity_group": "PER", "score": 0.999, "word": "Cons", "start": 0, "end": 4},
{"entity_group": "PER", "score": 0.966, "word": "##uelo Araújo Noguera", "start": 4, "end": 23},
{"entity_group": "PER", "score": 1.0, "word": "Andrés Pastrana", "start": 60, "end": 75},
],
)
token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="first")
output = token_classifier(sentence)
self.assertEqual(
nested_simplify(output[:3]),
[
{"entity_group": "PER", "score": 0.999, "word": "Consuelo Araújo Noguera", "start": 0, "end": 23},
{"entity_group": "PER", "score": 1.0, "word": "Andrés Pastrana", "start": 60, "end": 75},
{"entity_group": "ORG", "score": 0.999, "word": "Farc", "start": 110, "end": 114},
],
)
token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="max")
output = token_classifier(sentence)
self.assertEqual(
nested_simplify(output[:3]),
[
{"entity_group": "PER", "score": 0.999, "word": "Consuelo Araújo Noguera", "start": 0, "end": 23},
{"entity_group": "PER", "score": 1.0, "word": "Andrés Pastrana", "start": 60, "end": 75},
{"entity_group": "ORG", "score": 0.999, "word": "Farc", "start": 110, "end": 114},
],
)
token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="average")
output = token_classifier(sentence)
self.assertEqual(
nested_simplify(output[:3]),
[
{"entity_group": "PER", "score": 0.966, "word": "Consuelo Araújo Noguera", "start": 0, "end": 23},
{"entity_group": "PER", "score": 1.0, "word": "Andrés Pastrana", "start": 60, "end": 75},
{"entity_group": "ORG", "score": 0.542, "word": "Farc", "start": 110, "end": 114},
],
)
@require_torch_accelerator
@slow
def test_accelerator(self):
sentence = "This is dummy sentence"
ner = pipeline(
"token-classification",
device=torch_device,
aggregation_strategy=AggregationStrategy.SIMPLE,
)
output = ner(sentence)
self.assertEqual(nested_simplify(output), [])
@require_torch
@slow
def test_dbmdz_english(self):
# Other sentence
NER_MODEL = "dbmdz/bert-large-cased-finetuned-conll03-english"
model = AutoModelForTokenClassification.from_pretrained(NER_MODEL)
tokenizer = AutoTokenizer.from_pretrained(NER_MODEL, use_fast=True)
sentence = """Enzo works at the UN"""
token_classifier = pipeline("ner", model=model, tokenizer=tokenizer)
output = token_classifier(sentence)
self.assertEqual(
nested_simplify(output),
[
{"entity": "I-PER", "score": 0.998, "word": "En", "start": 0, "end": 2, "index": 1},
{"entity": "I-PER", "score": 0.997, "word": "##zo", "start": 2, "end": 4, "index": 2},
{"entity": "I-ORG", "score": 0.999, "word": "UN", "start": 18, "end": 20, "index": 6},
],
)
token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="simple")
output = token_classifier(sentence)
self.assertEqual(
nested_simplify(output),
[
{"entity_group": "PER", "score": 0.997, "word": "Enzo", "start": 0, "end": 4},
{"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 18, "end": 20},
],
)
token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="first")
output = token_classifier(sentence)
self.assertEqual(
nested_simplify(output[:3]),
[
{"entity_group": "PER", "score": 0.998, "word": "Enzo", "start": 0, "end": 4},
{"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 18, "end": 20},
],
)
token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="max")
output = token_classifier(sentence)
self.assertEqual(
nested_simplify(output[:3]),
[
{"entity_group": "PER", "score": 0.998, "word": "Enzo", "start": 0, "end": 4},
{"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 18, "end": 20},
],
)
token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="average")
output = token_classifier(sentence)
self.assertEqual(
nested_simplify(output),
[
{"entity_group": "PER", "score": 0.997, "word": "Enzo", "start": 0, "end": 4},
{"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 18, "end": 20},
],
)
@require_torch
@slow
def test_aggregation_strategy_byte_level_tokenizer(self):
sentence = "Groenlinks praat over Schiphol."
ner = pipeline("ner", model="xlm-roberta-large-finetuned-conll02-dutch", aggregation_strategy="max")
self.assertEqual(
nested_simplify(ner(sentence)),
[
{"end": 10, "entity_group": "ORG", "score": 0.994, "start": 0, "word": "Groenlinks"},
{"entity_group": "LOC", "score": 1.0, "word": "Schiphol.", "start": 22, "end": 31},
],
)
@require_torch
def test_aggregation_strategy_no_b_i_prefix(self):
model_name = "sshleifer/tiny-dbmdz-bert-large-cased-finetuned-conll03-english"
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
token_classifier = pipeline(task="ner", model=model_name, tokenizer=tokenizer, framework="pt")
# Just to understand scores indexes in this test
token_classifier.model.config.id2label = {0: "O", 1: "MISC", 2: "PER", 3: "ORG", 4: "LOC"}
example = [
{
"scores": np.array([0, 0, 0, 0, 0.9968166351318359]), # fmt : skip
"index": 1,
"is_subword": False,
"word": "En",
"start": 0,
"end": 2,
},
{
"scores": np.array([0, 0, 0, 0, 0.9957635998725891]), # fmt : skip
"index": 2,
"is_subword": True,
"word": "##zo",
"start": 2,
"end": 4,
},
{
"scores": np.array([0, 0, 0, 0.9986497163772583, 0]), # fmt : skip
"index": 7,
"word": "UN",
"is_subword": False,
"start": 11,
"end": 13,
},
]
self.assertEqual(
nested_simplify(token_classifier.aggregate(example, AggregationStrategy.NONE)),
[
{"end": 2, "entity": "LOC", "score": 0.997, "start": 0, "word": "En", "index": 1},
{"end": 4, "entity": "LOC", "score": 0.996, "start": 2, "word": "##zo", "index": 2},
{"end": 13, "entity": "ORG", "score": 0.999, "start": 11, "word": "UN", "index": 7},
],
)
self.assertEqual(
nested_simplify(token_classifier.aggregate(example, AggregationStrategy.SIMPLE)),
[
{"entity_group": "LOC", "score": 0.996, "word": "Enzo", "start": 0, "end": 4},
{"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 11, "end": 13},
],
)
@require_torch
def test_aggregation_strategy(self):
model_name = "sshleifer/tiny-dbmdz-bert-large-cased-finetuned-conll03-english"
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
token_classifier = pipeline(task="ner", model=model_name, tokenizer=tokenizer, framework="pt")
# Just to understand scores indexes in this test
self.assertEqual(
token_classifier.model.config.id2label,
{0: "O", 1: "B-MISC", 2: "I-MISC", 3: "B-PER", 4: "I-PER", 5: "B-ORG", 6: "I-ORG", 7: "B-LOC", 8: "I-LOC"},
)
example = [
{
"scores": np.array([0, 0, 0, 0, 0.9968166351318359, 0, 0, 0]), # fmt : skip
"index": 1,
"is_subword": False,
"word": "En",
"start": 0,
"end": 2,
},
{
"scores": np.array([0, 0, 0, 0, 0.9957635998725891, 0, 0, 0]), # fmt : skip
"index": 2,
"is_subword": True,
"word": "##zo",
"start": 2,
"end": 4,
},
{
"scores": np.array([0, 0, 0, 0, 0, 0.9986497163772583, 0, 0]), # fmt : skip
"index": 7,
"word": "UN",
"is_subword": False,
"start": 11,
"end": 13,
},
]
self.assertEqual(
nested_simplify(token_classifier.aggregate(example, AggregationStrategy.NONE)),
[
{"end": 2, "entity": "I-PER", "score": 0.997, "start": 0, "word": "En", "index": 1},
{"end": 4, "entity": "I-PER", "score": 0.996, "start": 2, "word": "##zo", "index": 2},
{"end": 13, "entity": "B-ORG", "score": 0.999, "start": 11, "word": "UN", "index": 7},
],
)
self.assertEqual(
nested_simplify(token_classifier.aggregate(example, AggregationStrategy.SIMPLE)),
[
{"entity_group": "PER", "score": 0.996, "word": "Enzo", "start": 0, "end": 4},
{"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 11, "end": 13},
],
)
self.assertEqual(
nested_simplify(token_classifier.aggregate(example, AggregationStrategy.FIRST)),
[
{"entity_group": "PER", "score": 0.997, "word": "Enzo", "start": 0, "end": 4},
{"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 11, "end": 13},
],
)
self.assertEqual(
nested_simplify(token_classifier.aggregate(example, AggregationStrategy.MAX)),
[
{"entity_group": "PER", "score": 0.997, "word": "Enzo", "start": 0, "end": 4},
{"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 11, "end": 13},
],
)
self.assertEqual(
nested_simplify(token_classifier.aggregate(example, AggregationStrategy.AVERAGE)),
[
{"entity_group": "PER", "score": 0.996, "word": "Enzo", "start": 0, "end": 4},
{"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 11, "end": 13},
],
)
@require_torch
def test_aggregation_strategy_example2(self):
model_name = "sshleifer/tiny-dbmdz-bert-large-cased-finetuned-conll03-english"
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
token_classifier = pipeline(task="ner", model=model_name, tokenizer=tokenizer, framework="pt")
# Just to understand scores indexes in this test
self.assertEqual(
token_classifier.model.config.id2label,
{0: "O", 1: "B-MISC", 2: "I-MISC", 3: "B-PER", 4: "I-PER", 5: "B-ORG", 6: "I-ORG", 7: "B-LOC", 8: "I-LOC"},
)
example = [
{
# Necessary for AVERAGE
"scores": np.array([0, 0.55, 0, 0.45, 0, 0, 0, 0, 0, 0]),
"is_subword": False,
"index": 1,
"word": "Ra",
"start": 0,
"end": 2,
},
{
"scores": np.array([0, 0, 0, 0.2, 0, 0, 0, 0.8, 0, 0]),
"is_subword": True,
"word": "##ma",
"start": 2,
"end": 4,
"index": 2,
},
{
# 4th score will have the higher average
# 4th score is B-PER for this model
# It's does not correspond to any of the subtokens.
"scores": np.array([0, 0, 0, 0.4, 0, 0, 0.6, 0, 0, 0]),
"is_subword": True,
"word": "##zotti",
"start": 11,
"end": 13,
"index": 3,
},
]
self.assertEqual(
token_classifier.aggregate(example, AggregationStrategy.NONE),
[
{"end": 2, "entity": "B-MISC", "score": 0.55, "start": 0, "word": "Ra", "index": 1},
{"end": 4, "entity": "B-LOC", "score": 0.8, "start": 2, "word": "##ma", "index": 2},
{"end": 13, "entity": "I-ORG", "score": 0.6, "start": 11, "word": "##zotti", "index": 3},
],
)
self.assertEqual(
token_classifier.aggregate(example, AggregationStrategy.FIRST),
[{"entity_group": "MISC", "score": 0.55, "word": "Ramazotti", "start": 0, "end": 13}],
)
self.assertEqual(
token_classifier.aggregate(example, AggregationStrategy.MAX),
[{"entity_group": "LOC", "score": 0.8, "word": "Ramazotti", "start": 0, "end": 13}],
)
self.assertEqual(
nested_simplify(token_classifier.aggregate(example, AggregationStrategy.AVERAGE)),
[{"entity_group": "PER", "score": 0.35, "word": "Ramazotti", "start": 0, "end": 13}],
)
@require_torch
@slow
def test_aggregation_strategy_offsets_with_leading_space(self):
sentence = "We're from New York"
model_name = "brandon25/deberta-base-finetuned-ner"
ner = pipeline("ner", model=model_name, ignore_labels=[], aggregation_strategy="max")
self.assertEqual(
nested_simplify(ner(sentence)),
[
{"entity_group": "O", "score": 1.0, "word": " We're from", "start": 0, "end": 10},
{"entity_group": "LOC", "score": 1.0, "word": " New York", "start": 10, "end": 19},
],
)
@require_torch
def test_gather_pre_entities(self):
model_name = "sshleifer/tiny-dbmdz-bert-large-cased-finetuned-conll03-english"
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
token_classifier = pipeline(task="ner", model=model_name, tokenizer=tokenizer, framework="pt")
sentence = "Hello there"
tokens = tokenizer(
sentence,
return_attention_mask=False,
return_tensors="pt",
truncation=True,
return_special_tokens_mask=True,
return_offsets_mapping=True,
)
offset_mapping = tokens.pop("offset_mapping").cpu().numpy()[0]
special_tokens_mask = tokens.pop("special_tokens_mask").cpu().numpy()[0]
input_ids = tokens["input_ids"].numpy()[0]
# First element in [CLS]
scores = np.array([[1, 0, 0], [0.1, 0.3, 0.6], [0.8, 0.1, 0.1]])
pre_entities = token_classifier.gather_pre_entities(
sentence,
input_ids,
scores,
offset_mapping,
special_tokens_mask,
aggregation_strategy=AggregationStrategy.NONE,
)
self.assertEqual(
nested_simplify(pre_entities),
[
{"word": "Hello", "scores": [0.1, 0.3, 0.6], "start": 0, "end": 5, "is_subword": False, "index": 1},
{
"word": "there",
"scores": [0.8, 0.1, 0.1],
"index": 2,
"start": 6,
"end": 11,
"is_subword": False,
},
],
)
@require_torch
def test_word_heuristic_leading_space(self):
model_name = "hf-internal-testing/tiny-random-deberta-v2"
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
token_classifier = pipeline(task="ner", model=model_name, tokenizer=tokenizer, framework="pt")
sentence = "I play the theremin"
tokens = tokenizer(
sentence,
return_attention_mask=False,
return_tensors="pt",
return_special_tokens_mask=True,
return_offsets_mapping=True,
)
offset_mapping = tokens.pop("offset_mapping").cpu().numpy()[0]
special_tokens_mask = tokens.pop("special_tokens_mask").cpu().numpy()[0]
input_ids = tokens["input_ids"].numpy()[0]
scores = np.array([[1, 0] for _ in input_ids]) # values irrelevant for heuristic
pre_entities = token_classifier.gather_pre_entities(
sentence,
input_ids,
scores,
offset_mapping,
special_tokens_mask,
aggregation_strategy=AggregationStrategy.FIRST,
)
# ensure expected tokenization and correct is_subword values
self.assertEqual(
[(entity["word"], entity["is_subword"]) for entity in pre_entities],
[("▁I", False), ("▁play", False), ("▁the", False), ("▁there", False), ("min", True)],
)
@require_tf
def test_tf_only(self):
model_name = "hf-internal-testing/tiny-random-bert-tf-only" # This model only has a TensorFlow version
# We test that if we don't specificy framework='tf', it gets detected automatically
token_classifier = pipeline(task="ner", model=model_name)
self.assertEqual(token_classifier.framework, "tf")
@require_tf
def test_small_model_tf(self):
model_name = "hf-internal-testing/tiny-bert-for-token-classification"
token_classifier = pipeline(task="token-classification", model=model_name, framework="tf")
outputs = token_classifier("This is a test !")
self.assertEqual(
nested_simplify(outputs),
[
{"entity": "I-MISC", "score": 0.115, "index": 1, "word": "this", "start": 0, "end": 4},
{"entity": "I-MISC", "score": 0.115, "index": 2, "word": "is", "start": 5, "end": 7},
],
)
@require_torch
def test_no_offset_tokenizer(self):
model_name = "hf-internal-testing/tiny-bert-for-token-classification"
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False)
token_classifier = pipeline(task="token-classification", model=model_name, tokenizer=tokenizer, framework="pt")
outputs = token_classifier("This is a test !")
self.assertEqual(
nested_simplify(outputs),
[
{"entity": "I-MISC", "score": 0.115, "index": 1, "word": "this", "start": None, "end": None},
{"entity": "I-MISC", "score": 0.115, "index": 2, "word": "is", "start": None, "end": None},
],
)
@require_torch
def test_small_model_pt(self):
model_name = "hf-internal-testing/tiny-bert-for-token-classification"
token_classifier = pipeline(task="token-classification", model=model_name, framework="pt")
outputs = token_classifier("This is a test !")
self.assertEqual(
nested_simplify(outputs),
[
{"entity": "I-MISC", "score": 0.115, "index": 1, "word": "this", "start": 0, "end": 4},
{"entity": "I-MISC", "score": 0.115, "index": 2, "word": "is", "start": 5, "end": 7},
],
)
token_classifier = pipeline(
task="token-classification", model=model_name, framework="pt", ignore_labels=["O", "I-MISC"]
)
outputs = token_classifier("This is a test !")
self.assertEqual(
nested_simplify(outputs),
[],
)
token_classifier = pipeline(task="token-classification", model=model_name, framework="pt")
# Overload offset_mapping
outputs = token_classifier(
"This is a test !", offset_mapping=[(0, 0), (0, 1), (0, 2), (0, 0), (0, 0), (0, 0), (0, 0)]
)
self.assertEqual(
nested_simplify(outputs),
[
{"entity": "I-MISC", "score": 0.115, "index": 1, "word": "this", "start": 0, "end": 1},
{"entity": "I-MISC", "score": 0.115, "index": 2, "word": "is", "start": 0, "end": 2},
],
)
# Batch size does not affect outputs (attention_mask are required)
sentences = ["This is a test !", "Another test this is with longer sentence"]
outputs = token_classifier(sentences)
outputs_batched = token_classifier(sentences, batch_size=2)
# Batching does not make a difference in predictions
self.assertEqual(nested_simplify(outputs_batched), nested_simplify(outputs))
self.assertEqual(
nested_simplify(outputs_batched),
[
[
{"entity": "I-MISC", "score": 0.115, "index": 1, "word": "this", "start": 0, "end": 4},
{"entity": "I-MISC", "score": 0.115, "index": 2, "word": "is", "start": 5, "end": 7},
],
[],
],
)
@require_torch
def test_pt_ignore_subwords_slow_tokenizer_raises(self):
model_name = "sshleifer/tiny-dbmdz-bert-large-cased-finetuned-conll03-english"
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False)
with self.assertRaises(ValueError):
pipeline(task="ner", model=model_name, tokenizer=tokenizer, aggregation_strategy=AggregationStrategy.FIRST)
with self.assertRaises(ValueError):
pipeline(
task="ner", model=model_name, tokenizer=tokenizer, aggregation_strategy=AggregationStrategy.AVERAGE
)
with self.assertRaises(ValueError):
pipeline(task="ner", model=model_name, tokenizer=tokenizer, aggregation_strategy=AggregationStrategy.MAX)
@slow
@require_torch
def test_simple(self):
token_classifier = pipeline(task="ner", model="dslim/bert-base-NER", grouped_entities=True)
sentence = "Hello Sarah Jessica Parker who Jessica lives in New York"
sentence2 = "This is a simple test"
output = token_classifier(sentence)
output_ = nested_simplify(output)
self.assertEqual(
output_,
[
{
"entity_group": "PER",
"score": 0.996,
"word": "Sarah Jessica Parker",
"start": 6,
"end": 26,
},
{"entity_group": "PER", "score": 0.977, "word": "Jessica", "start": 31, "end": 38},
{"entity_group": "LOC", "score": 0.999, "word": "New York", "start": 48, "end": 56},
],
)
output = token_classifier([sentence, sentence2])
output_ = nested_simplify(output)
self.assertEqual(
output_,
[
[
{"entity_group": "PER", "score": 0.996, "word": "Sarah Jessica Parker", "start": 6, "end": 26},
{"entity_group": "PER", "score": 0.977, "word": "Jessica", "start": 31, "end": 38},
{"entity_group": "LOC", "score": 0.999, "word": "New York", "start": 48, "end": 56},
],
[],
],
)
class TokenClassificationArgumentHandlerTestCase(unittest.TestCase):
def setUp(self):
self.args_parser = TokenClassificationArgumentHandler()
def test_simple(self):
string = "This is a simple input"
inputs, offset_mapping = self.args_parser(string)
self.assertEqual(inputs, [string])
self.assertEqual(offset_mapping, None)
inputs, offset_mapping = self.args_parser([string, string])
self.assertEqual(inputs, [string, string])
self.assertEqual(offset_mapping, None)
inputs, offset_mapping = self.args_parser(string, offset_mapping=[(0, 1), (1, 2)])
self.assertEqual(inputs, [string])
self.assertEqual(offset_mapping, [[(0, 1), (1, 2)]])
inputs, offset_mapping = self.args_parser(
[string, string], offset_mapping=[[(0, 1), (1, 2)], [(0, 2), (2, 3)]]
)
self.assertEqual(inputs, [string, string])
self.assertEqual(offset_mapping, [[(0, 1), (1, 2)], [(0, 2), (2, 3)]])
def test_errors(self):
string = "This is a simple input"
# 2 sentences, 1 offset_mapping, args
with self.assertRaises(TypeError):
self.args_parser(string, string, offset_mapping=[[(0, 1), (1, 2)]])
# 2 sentences, 1 offset_mapping, args
with self.assertRaises(TypeError):
self.args_parser(string, string, offset_mapping=[(0, 1), (1, 2)])
# 2 sentences, 1 offset_mapping, input_list
with self.assertRaises(ValueError):
self.args_parser([string, string], offset_mapping=[[(0, 1), (1, 2)]])
# 2 sentences, 1 offset_mapping, input_list
with self.assertRaises(ValueError):
self.args_parser([string, string], offset_mapping=[(0, 1), (1, 2)])
# 1 sentences, 2 offset_mapping
with self.assertRaises(ValueError):
self.args_parser(string, offset_mapping=[[(0, 1), (1, 2)], [(0, 2), (2, 3)]])
# 0 sentences, 1 offset_mapping
with self.assertRaises(TypeError):
self.args_parser(offset_mapping=[[(0, 1), (1, 2)]])
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/pipelines/test_pipelines_zero_shot.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_TO_SKIP = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class ZeroShotClassificationPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
tf_model_mapping = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
model_mapping = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
tf_model_mapping = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def get_test_pipeline(self, model, tokenizer, processor):
classifier = ZeroShotClassificationPipeline(
model=model, tokenizer=tokenizer, candidate_labels=["polics", "health"]
)
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def run_pipeline_test(self, classifier, _):
outputs = classifier("Who are you voting for in 2020?", candidate_labels="politics")
self.assertEqual(outputs, {"sequence": ANY(str), "labels": [ANY(str)], "scores": [ANY(float)]})
# No kwarg
outputs = classifier("Who are you voting for in 2020?", ["politics"])
self.assertEqual(outputs, {"sequence": ANY(str), "labels": [ANY(str)], "scores": [ANY(float)]})
outputs = classifier("Who are you voting for in 2020?", candidate_labels=["politics"])
self.assertEqual(outputs, {"sequence": ANY(str), "labels": [ANY(str)], "scores": [ANY(float)]})
outputs = classifier("Who are you voting for in 2020?", candidate_labels="politics, public health")
self.assertEqual(
outputs, {"sequence": ANY(str), "labels": [ANY(str), ANY(str)], "scores": [ANY(float), ANY(float)]}
)
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0)
outputs = classifier("Who are you voting for in 2020?", candidate_labels=["politics", "public health"])
self.assertEqual(
outputs, {"sequence": ANY(str), "labels": [ANY(str), ANY(str)], "scores": [ANY(float), ANY(float)]}
)
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0)
outputs = classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="This text is about {}"
)
self.assertEqual(outputs, {"sequence": ANY(str), "labels": [ANY(str)], "scores": [ANY(float)]})
# https://github.com/huggingface/transformers/issues/13846
outputs = classifier(["I am happy"], ["positive", "negative"])
self.assertEqual(
outputs,
[
{"sequence": ANY(str), "labels": [ANY(str), ANY(str)], "scores": [ANY(float), ANY(float)]}
for i in range(1)
],
)
outputs = classifier(["I am happy", "I am sad"], ["positive", "negative"])
self.assertEqual(
outputs,
[
{"sequence": ANY(str), "labels": [ANY(str), ANY(str)], "scores": [ANY(float), ANY(float)]}
for i in range(2)
],
)
with self.assertRaises(ValueError):
classifier("", candidate_labels="politics")
with self.assertRaises(TypeError):
classifier(None, candidate_labels="politics")
with self.assertRaises(ValueError):
classifier("Who are you voting for in 2020?", candidate_labels="")
with self.assertRaises(TypeError):
classifier("Who are you voting for in 2020?", candidate_labels=None)
with self.assertRaises(ValueError):
classifier(
"Who are you voting for in 2020?",
candidate_labels="politics",
hypothesis_template="Not formatting template",
)
with self.assertRaises(AttributeError):
classifier(
"Who are you voting for in 2020?",
candidate_labels="politics",
hypothesis_template=None,
)
self.run_entailment_id(classifier)
def run_entailment_id(self, zero_shot_classifier: Pipeline):
config = zero_shot_classifier.model.config
original_label2id = config.label2id
original_entailment = zero_shot_classifier.entailment_id
config.label2id = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id, -1)
config.label2id = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
config.label2id = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
config.label2id = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id, 2)
zero_shot_classifier.model.config.label2id = original_label2id
self.assertEqual(original_entailment, zero_shot_classifier.entailment_id)
@require_torch
def test_truncation(self):
zero_shot_classifier = pipeline(
"zero-shot-classification",
model="sshleifer/tiny-distilbert-base-cased-distilled-squad",
framework="pt",
)
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100, candidate_labels=["politics", "public health", "science"]
)
@require_torch
def test_small_model_pt(self):
zero_shot_classifier = pipeline(
"zero-shot-classification",
model="sshleifer/tiny-distilbert-base-cased-distilled-squad",
framework="pt",
)
outputs = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"]
)
self.assertEqual(
nested_simplify(outputs),
{
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
},
)
@require_tf
def test_small_model_tf(self):
zero_shot_classifier = pipeline(
"zero-shot-classification",
model="sshleifer/tiny-distilbert-base-cased-distilled-squad",
framework="tf",
)
outputs = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"]
)
self.assertEqual(
nested_simplify(outputs),
{
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
},
)
@slow
@require_torch
def test_large_model_pt(self):
zero_shot_classifier = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="pt")
outputs = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"]
)
self.assertEqual(
nested_simplify(outputs),
{
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
},
)
outputs = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data.",
candidate_labels=["machine learning", "statistics", "translation", "vision"],
multi_label=True,
)
self.assertEqual(
nested_simplify(outputs),
{
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
},
)
@slow
@require_tf
def test_large_model_tf(self):
zero_shot_classifier = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="tf")
outputs = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"]
)
self.assertEqual(
nested_simplify(outputs),
{
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
},
)
outputs = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data.",
candidate_labels=["machine learning", "statistics", "translation", "vision"],
multi_label=True,
)
self.assertEqual(
nested_simplify(outputs),
{
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
},
)
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/pipelines/test_pipelines_visual_question_answering.py | # Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_accelerator,
require_vision,
slow,
torch_device,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class Image:
@staticmethod
def open(*args, **kwargs):
pass
@is_pipeline_test
@require_torch
@require_vision
class VisualQuestionAnsweringPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def get_test_pipeline(self, model, tokenizer, processor):
vqa_pipeline = pipeline("visual-question-answering", model="hf-internal-testing/tiny-vilt-random-vqa")
examples = [
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def run_pipeline_test(self, vqa_pipeline, examples):
outputs = vqa_pipeline(examples, top_k=1)
self.assertEqual(
outputs,
[
[{"score": ANY(float), "answer": ANY(str)}],
[{"score": ANY(float), "answer": ANY(str)}],
],
)
@require_torch
def test_small_model_pt(self):
vqa_pipeline = pipeline("visual-question-answering", model="hf-internal-testing/tiny-vilt-random-vqa")
image = "./tests/fixtures/tests_samples/COCO/000000039769.png"
question = "How many cats are there?"
outputs = vqa_pipeline(image=image, question="How many cats are there?", top_k=2)
self.assertEqual(
outputs, [{"score": ANY(float), "answer": ANY(str)}, {"score": ANY(float), "answer": ANY(str)}]
)
outputs = vqa_pipeline({"image": image, "question": question}, top_k=2)
self.assertEqual(
outputs, [{"score": ANY(float), "answer": ANY(str)}, {"score": ANY(float), "answer": ANY(str)}]
)
@require_torch
@require_torch_accelerator
def test_small_model_pt_blip2(self):
vqa_pipeline = pipeline(
"visual-question-answering", model="hf-internal-testing/tiny-random-Blip2ForConditionalGeneration"
)
image = "./tests/fixtures/tests_samples/COCO/000000039769.png"
question = "How many cats are there?"
outputs = vqa_pipeline(image=image, question=question)
self.assertEqual(outputs, [{"answer": ANY(str)}])
outputs = vqa_pipeline({"image": image, "question": question})
self.assertEqual(outputs, [{"answer": ANY(str)}])
outputs = vqa_pipeline([{"image": image, "question": question}, {"image": image, "question": question}])
self.assertEqual(outputs, [[{"answer": ANY(str)}]] * 2)
vqa_pipeline = pipeline(
"visual-question-answering",
model="hf-internal-testing/tiny-random-Blip2ForConditionalGeneration",
model_kwargs={"torch_dtype": torch.float16},
device=torch_device,
)
self.assertEqual(vqa_pipeline.model.device, torch.device("{}:0".format(torch_device)))
self.assertEqual(vqa_pipeline.model.language_model.dtype, torch.float16)
self.assertEqual(vqa_pipeline.model.vision_model.dtype, torch.float16)
outputs = vqa_pipeline(image=image, question=question)
self.assertEqual(outputs, [{"answer": ANY(str)}])
@slow
@require_torch
def test_large_model_pt(self):
vqa_pipeline = pipeline("visual-question-answering", model="dandelin/vilt-b32-finetuned-vqa")
image = "./tests/fixtures/tests_samples/COCO/000000039769.png"
question = "How many cats are there?"
outputs = vqa_pipeline(image=image, question=question, top_k=2)
self.assertEqual(
nested_simplify(outputs, decimals=4), [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}]
)
outputs = vqa_pipeline({"image": image, "question": question}, top_k=2)
self.assertEqual(
nested_simplify(outputs, decimals=4), [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}]
)
outputs = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}], top_k=2
)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[[{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2,
)
@slow
@require_torch
@require_torch_accelerator
def test_large_model_pt_blip2(self):
vqa_pipeline = pipeline(
"visual-question-answering",
model="Salesforce/blip2-opt-2.7b",
model_kwargs={"torch_dtype": torch.float16},
device=torch_device,
)
self.assertEqual(vqa_pipeline.model.device, torch.device("{}:0".format(torch_device)))
self.assertEqual(vqa_pipeline.model.language_model.dtype, torch.float16)
image = "./tests/fixtures/tests_samples/COCO/000000039769.png"
question = "Question: how many cats are there? Answer:"
outputs = vqa_pipeline(image=image, question=question)
self.assertEqual(outputs, [{"answer": "two"}])
outputs = vqa_pipeline({"image": image, "question": question})
self.assertEqual(outputs, [{"answer": "two"}])
outputs = vqa_pipeline([{"image": image, "question": question}, {"image": image, "question": question}])
self.assertEqual(outputs, [[{"answer": "two"}]] * 2)
@require_tf
@unittest.skip("Visual question answering not implemented in TF")
def test_small_model_tf(self):
pass
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/pipelines/test_pipelines_depth_estimation.py | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from huggingface_hub.utils import insecure_hashlib
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class Image:
@staticmethod
def open(*args, **kwargs):
pass
def hashimage(image: Image) -> str:
m = insecure_hashlib.md5(image.tobytes())
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class DepthEstimationPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def get_test_pipeline(self, model, tokenizer, processor):
depth_estimator = DepthEstimationPipeline(model=model, image_processor=processor)
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def run_pipeline_test(self, depth_estimator, examples):
outputs = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png")
self.assertEqual({"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)}, outputs)
import datasets
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test")
outputs = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
)
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)},
{"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)},
{"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)},
{"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)},
{"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)},
],
outputs,
)
@require_tf
@unittest.skip("Depth estimation is not implemented in TF")
def test_small_model_tf(self):
pass
@slow
@require_torch
def test_large_model_pt(self):
model_id = "Intel/dpt-large"
depth_estimator = pipeline("depth-estimation", model=model_id)
outputs = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg")
outputs["depth"] = hashimage(outputs["depth"])
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item()), 29.304)
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item()), 2.662)
@require_torch
def test_small_model_pt(self):
# This is highly irregular to have no small tests.
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT")
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/pipelines/test_pipelines_mask_generation.py | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from typing import Dict
import numpy as np
from huggingface_hub.utils import insecure_hashlib
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class Image:
@staticmethod
def open(*args, **kwargs):
pass
def hashimage(image: Image) -> str:
m = insecure_hashlib.md5(image.tobytes())
return m.hexdigest()[:10]
def mask_to_test_readable(mask: Image) -> Dict:
npimg = np.array(mask)
shape = npimg.shape
return {"hash": hashimage(mask), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class MaskGenerationPipelineTests(unittest.TestCase):
model_mapping = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items()) if MODEL_FOR_MASK_GENERATION_MAPPING else [])
)
tf_model_mapping = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items()) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else [])
)
def get_test_pipeline(self, model, tokenizer, processor):
image_segmenter = MaskGenerationPipeline(model=model, image_processor=processor)
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
# TODO: Implement me @Arthur
def run_pipeline_test(self, mask_generator, examples):
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF")
def test_small_model_tf(self):
pass
@slow
@require_torch
def test_small_model_pt(self):
image_segmenter = pipeline("mask-generation", model="facebook/sam-vit-huge")
outputs = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", points_per_batch=256)
# Shortening by hashing
new_outupt = []
for i, o in enumerate(outputs["masks"]):
new_outupt += [{"mask": mask_to_test_readable(o), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(new_outupt, decimals=4),
[
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0444},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.021},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0132},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0053},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9967},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.993},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9909},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9879},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9834},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9716},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9612},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9599},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9552},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9532},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9516},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9499},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9483},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9464},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.943},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.943},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9408},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9335},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9326},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9262},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8999},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8986},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8984},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8873},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8871}
],
)
# fmt: on
@require_torch
@slow
def test_threshold(self):
model_id = "facebook/sam-vit-huge"
image_segmenter = pipeline("mask-generation", model=model_id)
outputs = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg", pred_iou_thresh=1, points_per_batch=256
)
# Shortening by hashing
new_outupt = []
for i, o in enumerate(outputs["masks"]):
new_outupt += [{"mask": mask_to_test_readable(o), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(new_outupt, decimals=4),
[
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.0444},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0210},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.0167},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.0132},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.0053},
],
)
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/pipelines/test_pipelines_translation.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pytest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
MBart50TokenizerFast,
MBartConfig,
MBartForConditionalGeneration,
TranslationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch, slow
from .test_pipelines_common import ANY
@is_pipeline_test
class TranslationPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
tf_model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def get_test_pipeline(self, model, tokenizer, processor):
if isinstance(model.config, MBartConfig):
src_lang, tgt_lang = list(tokenizer.lang_code_to_id.keys())[:2]
translator = TranslationPipeline(model=model, tokenizer=tokenizer, src_lang=src_lang, tgt_lang=tgt_lang)
else:
translator = TranslationPipeline(model=model, tokenizer=tokenizer)
return translator, ["Some string", "Some other text"]
def run_pipeline_test(self, translator, _):
outputs = translator("Some string")
self.assertEqual(outputs, [{"translation_text": ANY(str)}])
outputs = translator(["Some string"])
self.assertEqual(outputs, [{"translation_text": ANY(str)}])
outputs = translator(["Some string", "other string"])
self.assertEqual(outputs, [{"translation_text": ANY(str)}, {"translation_text": ANY(str)}])
@require_torch
def test_small_model_pt(self):
translator = pipeline("translation_en_to_ro", model="patrickvonplaten/t5-tiny-random", framework="pt")
outputs = translator("This is a test string", max_length=20)
self.assertEqual(
outputs,
[
{
"translation_text": (
"Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide"
" Beide Beide"
)
}
],
)
@require_tf
def test_small_model_tf(self):
translator = pipeline("translation_en_to_ro", model="patrickvonplaten/t5-tiny-random", framework="tf")
outputs = translator("This is a test string", max_length=20)
self.assertEqual(
outputs,
[
{
"translation_text": (
"Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide"
" Beide Beide"
)
}
],
)
@require_torch
def test_en_to_de_pt(self):
translator = pipeline("translation_en_to_de", model="patrickvonplaten/t5-tiny-random", framework="pt")
outputs = translator("This is a test string", max_length=20)
self.assertEqual(
outputs,
[
{
"translation_text": (
"monoton monoton monoton monoton monoton monoton monoton monoton monoton monoton urine urine"
" urine urine urine urine urine urine urine"
)
}
],
)
@require_tf
def test_en_to_de_tf(self):
translator = pipeline("translation_en_to_de", model="patrickvonplaten/t5-tiny-random", framework="tf")
outputs = translator("This is a test string", max_length=20)
self.assertEqual(
outputs,
[
{
"translation_text": (
"monoton monoton monoton monoton monoton monoton monoton monoton monoton monoton urine urine"
" urine urine urine urine urine urine urine"
)
}
],
)
class TranslationNewFormatPipelineTests(unittest.TestCase):
@require_torch
@slow
def test_default_translations(self):
# We don't provide a default for this pair
with self.assertRaises(ValueError):
pipeline(task="translation_cn_to_ar")
# but we do for this one
translator = pipeline(task="translation_en_to_de")
self.assertEqual(translator._preprocess_params["src_lang"], "en")
self.assertEqual(translator._preprocess_params["tgt_lang"], "de")
@require_torch
@slow
def test_multilingual_translation(self):
model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-many-to-many-mmt")
tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50-many-to-many-mmt")
translator = pipeline(task="translation", model=model, tokenizer=tokenizer)
# Missing src_lang, tgt_lang
with self.assertRaises(ValueError):
translator("This is a test")
outputs = translator("This is a test", src_lang="en_XX", tgt_lang="ar_AR")
self.assertEqual(outputs, [{"translation_text": "هذا إختبار"}])
outputs = translator("This is a test", src_lang="en_XX", tgt_lang="hi_IN")
self.assertEqual(outputs, [{"translation_text": "यह एक परीक्षण है"}])
# src_lang, tgt_lang can be defined at pipeline call time
translator = pipeline(task="translation", model=model, tokenizer=tokenizer, src_lang="en_XX", tgt_lang="ar_AR")
outputs = translator("This is a test")
self.assertEqual(outputs, [{"translation_text": "هذا إختبار"}])
@require_torch
def test_translation_on_odd_language(self):
model = "patrickvonplaten/t5-tiny-random"
translator = pipeline(task="translation_cn_to_ar", model=model)
self.assertEqual(translator._preprocess_params["src_lang"], "cn")
self.assertEqual(translator._preprocess_params["tgt_lang"], "ar")
@require_torch
def test_translation_default_language_selection(self):
model = "patrickvonplaten/t5-tiny-random"
with pytest.warns(UserWarning, match=r".*translation_en_to_de.*"):
translator = pipeline(task="translation", model=model)
self.assertEqual(translator.task, "translation_en_to_de")
self.assertEqual(translator._preprocess_params["src_lang"], "en")
self.assertEqual(translator._preprocess_params["tgt_lang"], "de")
@require_torch
def test_translation_with_no_language_no_model_fails(self):
with self.assertRaises(ValueError):
pipeline(task="translation")
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/pipelines/test_pipelines_conversational.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
AutoModelForCausalLM,
AutoModelForSeq2SeqLM,
AutoTokenizer,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallTokenizer,
Conversation,
ConversationalPipeline,
TFAutoModelForCausalLM,
pipeline,
)
from transformers.testing_utils import (
backend_empty_cache,
is_pipeline_test,
is_torch_available,
require_tf,
require_torch,
slow,
torch_device,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class ConversationalPipelineTests(unittest.TestCase):
def tearDown(self):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
backend_empty_cache(torch_device)
model_mapping = dict(
list(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.items())
if MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
else [] + list(MODEL_FOR_CAUSAL_LM_MAPPING.items())
if MODEL_FOR_CAUSAL_LM_MAPPING
else []
)
tf_model_mapping = dict(
list(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.items())
if TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
else [] + list(TF_MODEL_FOR_CAUSAL_LM_MAPPING.items())
if TF_MODEL_FOR_CAUSAL_LM_MAPPING
else []
)
def get_test_pipeline(self, model, tokenizer, processor):
conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer)
return conversation_agent, [Conversation("Hi there!")]
def run_pipeline_test(self, conversation_agent, _):
# Simple
outputs = conversation_agent(Conversation("Hi there!"), max_new_tokens=5)
self.assertEqual(
outputs,
Conversation([{"role": "user", "content": "Hi there!"}, {"role": "assistant", "content": ANY(str)}]),
)
# Single list
outputs = conversation_agent([Conversation("Hi there!")], max_new_tokens=5)
self.assertEqual(
outputs,
Conversation([{"role": "user", "content": "Hi there!"}, {"role": "assistant", "content": ANY(str)}]),
)
# Batch
conversation_1 = Conversation("Going to the movies tonight - any suggestions?")
conversation_2 = Conversation("What's the last book you have read?")
self.assertEqual(len(conversation_1), 1)
self.assertEqual(len(conversation_2), 1)
outputs = conversation_agent([conversation_1, conversation_2], max_new_tokens=5)
self.assertEqual(outputs, [conversation_1, conversation_2])
self.assertEqual(
outputs,
[
Conversation(
[
{"role": "user", "content": "Going to the movies tonight - any suggestions?"},
{"role": "assistant", "content": ANY(str)},
],
),
Conversation(
[
{"role": "user", "content": "What's the last book you have read?"},
{"role": "assistant", "content": ANY(str)},
]
),
],
)
# One conversation with history
conversation_2.add_message({"role": "user", "content": "Why do you recommend it?"})
outputs = conversation_agent(conversation_2, max_new_tokens=5)
self.assertEqual(outputs, conversation_2)
self.assertEqual(
outputs,
Conversation(
[
{"role": "user", "content": "What's the last book you have read?"},
{"role": "assistant", "content": ANY(str)},
{"role": "user", "content": "Why do you recommend it?"},
{"role": "assistant", "content": ANY(str)},
]
),
)
@require_torch
@slow
def test_integration_torch_conversation(self):
# When
conversation_agent = pipeline(task="conversational", device=torch_device)
conversation_1 = Conversation("Going to the movies tonight - any suggestions?")
conversation_2 = Conversation("What's the last book you have read?")
# Then
self.assertEqual(len(conversation_1.past_user_inputs), 0)
self.assertEqual(len(conversation_2.past_user_inputs), 0)
# When
result = conversation_agent([conversation_1, conversation_2], do_sample=False, max_length=1000)
# Then
self.assertEqual(result, [conversation_1, conversation_2])
self.assertEqual(len(result[0].past_user_inputs), 1)
self.assertEqual(len(result[1].past_user_inputs), 1)
self.assertEqual(len(result[0].generated_responses), 1)
self.assertEqual(len(result[1].generated_responses), 1)
self.assertEqual(result[0].past_user_inputs[0], "Going to the movies tonight - any suggestions?")
self.assertEqual(result[0].generated_responses[0], "The Big Lebowski")
self.assertEqual(result[1].past_user_inputs[0], "What's the last book you have read?")
self.assertEqual(result[1].generated_responses[0], "The Last Question")
# When
conversation_2.add_user_input("Why do you recommend it?")
result = conversation_agent(conversation_2, do_sample=False, max_length=1000)
# Then
self.assertEqual(result, conversation_2)
self.assertEqual(len(result.past_user_inputs), 2)
self.assertEqual(len(result.generated_responses), 2)
self.assertEqual(result.past_user_inputs[1], "Why do you recommend it?")
self.assertEqual(result.generated_responses[1], "It's a good book.")
@require_torch
@slow
def test_integration_torch_conversation_truncated_history(self):
# When
conversation_agent = pipeline(task="conversational", min_length_for_response=24, device=torch_device)
conversation_1 = Conversation("Going to the movies tonight - any suggestions?")
# Then
self.assertEqual(len(conversation_1.past_user_inputs), 0)
# When
result = conversation_agent(conversation_1, do_sample=False, max_length=36)
# Then
self.assertEqual(result, conversation_1)
self.assertEqual(len(result.past_user_inputs), 1)
self.assertEqual(len(result.generated_responses), 1)
self.assertEqual(result.past_user_inputs[0], "Going to the movies tonight - any suggestions?")
self.assertEqual(result.generated_responses[0], "The Big Lebowski")
# When
conversation_1.add_user_input("Is it an action movie?")
result = conversation_agent(conversation_1, do_sample=False, max_length=36)
# Then
self.assertEqual(result, conversation_1)
self.assertEqual(len(result.past_user_inputs), 2)
self.assertEqual(len(result.generated_responses), 2)
self.assertEqual(result.past_user_inputs[1], "Is it an action movie?")
self.assertEqual(result.generated_responses[1], "It's a comedy.")
@require_torch
def test_small_model_pt(self):
tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-small")
model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-small")
conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer)
conversation = Conversation("hello")
output = conversation_agent(conversation)
self.assertEqual(output, Conversation(past_user_inputs=["hello"], generated_responses=["Hi"]))
@require_tf
def test_small_model_tf(self):
tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-small")
model = TFAutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-small")
conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer)
conversation = Conversation("hello")
output = conversation_agent(conversation)
self.assertEqual(output, Conversation(past_user_inputs=["hello"], generated_responses=["Hi"]))
@require_torch
@slow
def test_integration_torch_conversation_dialogpt_input_ids(self):
tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-small")
model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-small")
conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer)
conversation_1 = Conversation("hello")
inputs = conversation_agent.preprocess(conversation_1)
self.assertEqual(inputs["input_ids"].tolist(), [[31373, 50256]])
conversation_2 = Conversation("how are you ?", past_user_inputs=["hello"], generated_responses=["Hi there!"])
inputs = conversation_agent.preprocess(conversation_2)
self.assertEqual(
inputs["input_ids"].tolist(), [[31373, 50256, 17250, 612, 0, 50256, 4919, 389, 345, 5633, 50256]]
)
@unittest.skip("Model is curently gated")
@require_torch
@slow
def test_integration_torch_conversation_llama2_input_ids(self):
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf", use_default_system_prompt=True)
conversation = Conversation(
"What is so great about #1?",
past_user_inputs=["I am going to Paris, what should I see?"],
generated_responses=[
"""\
Paris, the capital of France, is known for its stunning architecture, art museums, historical landmarks, and romantic atmosphere. Here are some of the top attractions to see in Paris:
1. The Eiffel Tower: The iconic Eiffel Tower is one of the most recognizable landmarks in the world and offers breathtaking views of the city.
2. The Louvre Museum: The Louvre is one of the world's largest and most famous museums, housing an impressive collection of art and artifacts, including the Mona Lisa.
3. Notre-Dame Cathedral: This beautiful cathedral is one of the most famous landmarks in Paris and is known for its Gothic architecture and stunning stained glass windows.
These are just a few of the many attractions that Paris has to offer. With so much to see and do, it's no wonder that Paris is one of the most popular tourist destinations in the world."""
],
)
inputs = tokenizer._build_conversation_input_ids(conversation)
EXPECTED_INPUTS_IDS = [ 1, 518, 25580, 29962, 3532, 14816, 29903, 6778, 13, 3492, 526, 263, 8444, 29892, 3390, 1319, 322, 15993, 20255, 29889, 29849, 1234, 408, 1371, 3730, 408, 1950, 29892, 1550, 1641, 9109, 29889, 29871, 3575, 6089, 881, 451, 3160, 738, 10311, 1319, 29892, 443, 621, 936, 29892, 11021, 391, 29892, 7916, 391, 29892, 304, 27375, 29892, 18215, 29892, 470, 27302, 2793, 29889, 3529, 9801, 393, 596, 20890, 526, 5374, 635, 443, 5365, 1463, 322, 6374, 297, 5469, 29889, 13, 13, 3644, 263, 1139, 947, 451, 1207, 738, 4060, 29892, 470, 338, 451, 2114, 1474, 16165, 261, 296, 29892, 5649, 2020, 2012, 310, 22862, 1554, 451, 1959, 29889, 960, 366, 1016, 29915, 29873, 1073, 278, 1234, 304, 263, 1139, 29892, 3113, 1016, 29915, 29873, 6232, 2089, 2472, 29889, 13, 29966, 829, 14816, 29903, 6778, 13, 13, 29902, 626, 2675, 304, 3681, 29892, 825, 881, 306, 1074, 29973, 518, 29914, 25580, 29962, 3681, 29892, 278, 7483, 310, 3444, 29892, 338, 2998, 363, 967, 380, 27389, 11258, 29892, 1616, 19133, 29879, 29892, 15839, 2982, 22848, 29892, 322, 6017, 7716, 25005, 29889, 2266, 526, 777, 310, 278, 2246, 19650, 1953, 304, 1074, 297, 3681, 29901, 13, 13, 29896, 29889, 450, 382, 2593, 295, 23615, 29901, 450, 9849, 293, 382, 2593, 295, 23615, 338, 697, 310, 278, 1556, 5936, 13902, 2982, 22848, 297, 278, 3186, 322, 16688, 2078, 271, 400, 5086, 8386, 310, 278, 4272, 29889, 13, 29906, 29889, 450, 4562, 12675, 6838, 29901, 450, 4562, 12675, 338, 697, 310, 278, 3186, 29915, 29879, 10150, 322, 1556, 13834, 19133, 29879, 29892, 27261, 385, 21210, 573, 4333, 310, 1616, 322, 24238, 29879, 29892, 3704, 278, 2598, 29874, 29420, 29889, 13, 29941, 29889, 24337, 29899, 29928, 420, 315, 21471, 29901, 910, 9560, 274, 21471, 338, 697, 310, 278, 1556, 13834, 2982, 22848, 297, 3681, 322, 338, 2998, 363, 967, 22883, 293, 11258, 322, 380, 27389, 380, 7114, 12917, 5417, 29889, 13, 13, 1349, 968, 526, 925, 263, 2846, 310, 278, 1784, 19650, 1953, 393, 3681, 756, 304, 5957, 29889, 2973, 577, 1568, 304, 1074, 322, 437, 29892, 372, 29915, 29879, 694, 4997, 393, 3681, 338, 697, 310, 278, 1556, 5972, 6282, 391, 15422, 800, 297, 278, 3186, 29889, 29871, 2, 1, 518, 25580, 29962, 1724, 338, 577, 2107, 1048, 396, 29896, 29973, 518, 29914, 25580, 29962] # fmt: skip
self.assertEqual(inputs, EXPECTED_INPUTS_IDS)
model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer)
EXPECTED_TEXT = "what topic you want to focus on and create content around it. This will help you stand out from other creators and attract a specific audience.\n\nStep 2: Set Up Your Channel\nCreate your YouTube account and customize your channel with your branding and logo. Make sure your channel name and profile picture are consistent with your niche.\n\nStep 3: Plan Your Content\nDevelop a content strategy that includes the type of content you want to create, how often you will post, and when you will post. Consider creating a content calendar to help you stay organized.\n\nStep 4: Invest in Quality Equipment\nInvest in good quality camera and microphone equipment to ensure your videos look and sound professional. You don't need to break the bank, but investing in good equipment will make a big difference in the quality of your videos.\n\nStep 5: Optimize Your Videos for Search\nUse keywords in your video titles, descriptions, and tags to help people find your videos when they search for topics related to your niche"
conversation = Conversation(
"<<SYS>>\n Only answer with emojis, and charades\n<</SYS>>\n\nHow can I build a house in 10 steps?"
)
result = conversation_agent(conversation)
self.assertEqual(result.generated_responses[-1], EXPECTED_TEXT)
@require_torch
@slow
def test_integration_torch_conversation_blenderbot_400M_input_ids(self):
tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
model = AutoModelForSeq2SeqLM.from_pretrained("facebook/blenderbot-400M-distill")
conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer)
# test1
conversation_1 = Conversation("hello")
inputs = conversation_agent.preprocess(conversation_1)
self.assertEqual(inputs["input_ids"].tolist(), [[1710, 86, 2]])
# test2
conversation_1 = Conversation(
"I like lasagne.",
past_user_inputs=["hello"],
generated_responses=[
" Do you like lasagne? It is a traditional Italian dish consisting of a shepherd's pie."
],
)
inputs = conversation_agent.preprocess(conversation_1)
self.assertEqual(
inputs["input_ids"].tolist(),
[
# This should be compared with the same conversation on ParlAI `safe_interactive` demo.
[
1710, # hello
86,
228, # Double space
228,
946,
304,
398,
6881,
558,
964,
38,
452,
315,
265,
6252,
452,
322,
968,
6884,
3146,
278,
306,
265,
617,
87,
388,
75,
341,
286,
521,
21,
228, # Double space
228,
281, # I like lasagne.
398,
6881,
558,
964,
21,
2, # EOS
],
],
)
@require_torch
@slow
def test_integration_torch_conversation_blenderbot_400M(self):
tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
model = AutoModelForSeq2SeqLM.from_pretrained("facebook/blenderbot-400M-distill")
conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer)
conversation_1 = Conversation("hello")
result = conversation_agent(
conversation_1,
)
self.assertEqual(
result.generated_responses[0],
# ParlAI implementation output, we have a different one, but it's our
# second best, you can check by using num_return_sequences=10
# " Hello! How are you? I'm just getting ready to go to work, how about you?",
" Hello! How are you doing today? I just got back from a walk with my dog.",
)
conversation_1 = Conversation("Lasagne hello")
result = conversation_agent(conversation_1, encoder_no_repeat_ngram_size=3)
self.assertEqual(
result.generated_responses[0],
" Do you like lasagne? It is a traditional Italian dish consisting of a shepherd's pie.",
)
conversation_1 = Conversation(
"Lasagne hello Lasagne is my favorite Italian dish. Do you like lasagne? I like lasagne."
)
result = conversation_agent(
conversation_1,
encoder_no_repeat_ngram_size=3,
)
self.assertEqual(
result.generated_responses[0],
" Me too. I like how it can be topped with vegetables, meats, and condiments.",
)
@require_torch
@slow
def test_integration_torch_conversation_encoder_decoder(self):
# When
tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M")
model = AutoModelForSeq2SeqLM.from_pretrained("facebook/blenderbot_small-90M")
conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer, device=torch_device)
conversation_1 = Conversation("My name is Sarah and I live in London")
conversation_2 = Conversation("Going to the movies tonight, What movie would you recommend? ")
# Then
self.assertEqual(len(conversation_1.past_user_inputs), 0)
self.assertEqual(len(conversation_2.past_user_inputs), 0)
# When
result = conversation_agent([conversation_1, conversation_2], do_sample=False, max_length=1000)
# Then
self.assertEqual(result, [conversation_1, conversation_2])
self.assertEqual(len(result[0].past_user_inputs), 1)
self.assertEqual(len(result[1].past_user_inputs), 1)
self.assertEqual(len(result[0].generated_responses), 1)
self.assertEqual(len(result[1].generated_responses), 1)
self.assertEqual(result[0].past_user_inputs[0], "My name is Sarah and I live in London")
self.assertEqual(
result[0].generated_responses[0],
"hi sarah, i live in london as well. do you have any plans for the weekend?",
)
self.assertEqual(
result[1].past_user_inputs[0], "Going to the movies tonight, What movie would you recommend? "
)
self.assertEqual(
result[1].generated_responses[0], "i don't know... i'm not really sure. what movie are you going to see?"
)
# When
conversation_1.add_user_input("Not yet, what about you?")
conversation_2.add_user_input("What's your name?")
result = conversation_agent([conversation_1, conversation_2], do_sample=False, max_length=1000)
# Then
self.assertEqual(result, [conversation_1, conversation_2])
self.assertEqual(len(result[0].past_user_inputs), 2)
self.assertEqual(len(result[1].past_user_inputs), 2)
self.assertEqual(len(result[0].generated_responses), 2)
self.assertEqual(len(result[1].generated_responses), 2)
self.assertEqual(result[0].past_user_inputs[1], "Not yet, what about you?")
self.assertEqual(result[0].generated_responses[1], "i don't have any plans yet. i'm not sure what to do yet.")
self.assertEqual(result[1].past_user_inputs[1], "What's your name?")
self.assertEqual(result[1].generated_responses[1], "i don't have a name, but i'm going to see a horror movie.")
@require_torch
@slow
def test_from_pipeline_conversation(self):
model_id = "facebook/blenderbot_small-90M"
# from model id
conversation_agent_from_model_id = pipeline("conversational", model=model_id, tokenizer=model_id)
# from model object
model = BlenderbotSmallForConditionalGeneration.from_pretrained(model_id)
tokenizer = BlenderbotSmallTokenizer.from_pretrained(model_id)
conversation_agent_from_model = pipeline("conversational", model=model, tokenizer=tokenizer)
conversation = Conversation("My name is Sarah and I live in London")
conversation_copy = Conversation("My name is Sarah and I live in London")
result_model_id = conversation_agent_from_model_id([conversation])
result_model = conversation_agent_from_model([conversation_copy])
# check for equality
self.assertEqual(
result_model_id.generated_responses[0],
"hi sarah, i live in london as well. do you have any plans for the weekend?",
)
self.assertEqual(
result_model_id.generated_responses[0],
result_model.generated_responses[0],
)
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/pipelines/test_pipelines_text_generation.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_accelerator,
require_torch_gpu,
require_torch_or_tf,
torch_device,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class TextGenerationPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_CAUSAL_LM_MAPPING
tf_model_mapping = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def test_small_model_pt(self):
text_generator = pipeline(task="text-generation", model="sshleifer/tiny-ctrl", framework="pt")
# Using `do_sample=False` to force deterministic output
outputs = text_generator("This is a test", do_sample=False)
self.assertEqual(
outputs,
[
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
],
)
outputs = text_generator(["This is a test", "This is a second test"])
self.assertEqual(
outputs,
[
[
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
],
[
{
"generated_text": (
"This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"
" oscope. oscope. FiliFili@@"
)
}
],
],
)
outputs = text_generator("This is a test", do_sample=True, num_return_sequences=2, return_tensors=True)
self.assertEqual(
outputs,
[
{"generated_token_ids": ANY(list)},
{"generated_token_ids": ANY(list)},
],
)
text_generator.tokenizer.pad_token_id = text_generator.model.config.eos_token_id
text_generator.tokenizer.pad_token = "<pad>"
outputs = text_generator(
["This is a test", "This is a second test"],
do_sample=True,
num_return_sequences=2,
batch_size=2,
return_tensors=True,
)
self.assertEqual(
outputs,
[
[
{"generated_token_ids": ANY(list)},
{"generated_token_ids": ANY(list)},
],
[
{"generated_token_ids": ANY(list)},
{"generated_token_ids": ANY(list)},
],
],
)
@require_tf
def test_small_model_tf(self):
text_generator = pipeline(task="text-generation", model="sshleifer/tiny-ctrl", framework="tf")
# Using `do_sample=False` to force deterministic output
outputs = text_generator("This is a test", do_sample=False)
self.assertEqual(
outputs,
[
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
],
)
outputs = text_generator(["This is a test", "This is a second test"], do_sample=False)
self.assertEqual(
outputs,
[
[
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
],
[
{
"generated_text": (
"This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"
" Cannes 閲閲Cannes Cannes Cannes 攵 please,"
)
}
],
],
)
def get_test_pipeline(self, model, tokenizer, processor):
text_generator = TextGenerationPipeline(model=model, tokenizer=tokenizer)
return text_generator, ["This is a test", "Another test"]
def test_stop_sequence_stopping_criteria(self):
prompt = """Hello I believe in"""
text_generator = pipeline("text-generation", model="hf-internal-testing/tiny-random-gpt2")
output = text_generator(prompt)
self.assertEqual(
output,
[{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}],
)
output = text_generator(prompt, stop_sequence=" fe")
self.assertEqual(output, [{"generated_text": "Hello I believe in fe"}])
def run_pipeline_test(self, text_generator, _):
model = text_generator.model
tokenizer = text_generator.tokenizer
outputs = text_generator("This is a test")
self.assertEqual(outputs, [{"generated_text": ANY(str)}])
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test"))
outputs = text_generator("This is a test", return_full_text=False)
self.assertEqual(outputs, [{"generated_text": ANY(str)}])
self.assertNotIn("This is a test", outputs[0]["generated_text"])
text_generator = pipeline(task="text-generation", model=model, tokenizer=tokenizer, return_full_text=False)
outputs = text_generator("This is a test")
self.assertEqual(outputs, [{"generated_text": ANY(str)}])
self.assertNotIn("This is a test", outputs[0]["generated_text"])
outputs = text_generator("This is a test", return_full_text=True)
self.assertEqual(outputs, [{"generated_text": ANY(str)}])
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test"))
outputs = text_generator(["This is great !", "Something else"], num_return_sequences=2, do_sample=True)
self.assertEqual(
outputs,
[
[{"generated_text": ANY(str)}, {"generated_text": ANY(str)}],
[{"generated_text": ANY(str)}, {"generated_text": ANY(str)}],
],
)
if text_generator.tokenizer.pad_token is not None:
outputs = text_generator(
["This is great !", "Something else"], num_return_sequences=2, batch_size=2, do_sample=True
)
self.assertEqual(
outputs,
[
[{"generated_text": ANY(str)}, {"generated_text": ANY(str)}],
[{"generated_text": ANY(str)}, {"generated_text": ANY(str)}],
],
)
with self.assertRaises(ValueError):
outputs = text_generator("test", return_full_text=True, return_text=True)
with self.assertRaises(ValueError):
outputs = text_generator("test", return_full_text=True, return_tensors=True)
with self.assertRaises(ValueError):
outputs = text_generator("test", return_text=True, return_tensors=True)
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
outputs = text_generator("")
self.assertEqual(outputs, [{"generated_text": ANY(str)}])
else:
with self.assertRaises((ValueError, AssertionError)):
outputs = text_generator("")
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS = [
"RwkvForCausalLM",
"XGLMForCausalLM",
"GPTNeoXForCausalLM",
"FuyuForCausalLM",
]
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError)):
text_generator("This is a test" * 500, max_new_tokens=20)
outputs = text_generator("This is a test" * 500, handle_long_generation="hole", max_new_tokens=20)
# Hole strategy cannot work
with self.assertRaises(ValueError):
text_generator(
"This is a test" * 500,
handle_long_generation="hole",
max_new_tokens=tokenizer.model_max_length + 10,
)
@require_torch
@require_accelerate
@require_torch_gpu
def test_small_model_pt_bloom_accelerate(self):
import torch
# Classic `model_kwargs`
pipe = pipeline(
model="hf-internal-testing/tiny-random-bloom",
model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloat16},
)
self.assertEqual(pipe.model.device, torch.device(0))
self.assertEqual(pipe.model.lm_head.weight.dtype, torch.bfloat16)
out = pipe("This is a test")
self.assertEqual(
out,
[
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
],
)
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
pipe = pipeline(model="hf-internal-testing/tiny-random-bloom", device_map="auto", torch_dtype=torch.bfloat16)
self.assertEqual(pipe.model.device, torch.device(0))
self.assertEqual(pipe.model.lm_head.weight.dtype, torch.bfloat16)
out = pipe("This is a test")
self.assertEqual(
out,
[
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
],
)
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
pipe = pipeline(model="hf-internal-testing/tiny-random-bloom", device_map="auto")
self.assertEqual(pipe.model.device, torch.device(0))
self.assertEqual(pipe.model.lm_head.weight.dtype, torch.float32)
out = pipe("This is a test")
self.assertEqual(
out,
[
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
],
)
@require_torch
@require_torch_accelerator
def test_small_model_fp16(self):
import torch
pipe = pipeline(
model="hf-internal-testing/tiny-random-bloom",
device=torch_device,
torch_dtype=torch.float16,
)
pipe("This is a test")
@require_torch
@require_accelerate
@require_torch_accelerator
def test_pipeline_accelerate_top_p(self):
import torch
pipe = pipeline(model="hf-internal-testing/tiny-random-bloom", device_map="auto", torch_dtype=torch.float16)
pipe("This is a test", do_sample=True, top_p=0.5)
def test_pipeline_length_setting_warning(self):
prompt = """Hello world"""
text_generator = pipeline("text-generation", model="hf-internal-testing/tiny-random-gpt2")
if text_generator.model.framework == "tf":
logger = logging.get_logger("transformers.generation.tf_utils")
else:
logger = logging.get_logger("transformers.generation.utils")
logger_msg = "Both `max_new_tokens`" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(logger) as cl:
_ = text_generator(prompt, max_length=10, max_new_tokens=1)
self.assertIn(logger_msg, cl.out)
# The user only sets one -> no warning
with CaptureLogger(logger) as cl:
_ = text_generator(prompt, max_new_tokens=1)
self.assertNotIn(logger_msg, cl.out)
with CaptureLogger(logger) as cl:
_ = text_generator(prompt, max_length=10)
self.assertNotIn(logger_msg, cl.out)
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/pipelines/test_pipelines_text_to_audio.py | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from transformers import (
MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING,
AutoProcessor,
TextToAudioPipeline,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
require_torch,
require_torch_accelerator,
require_torch_or_tf,
slow,
torch_device,
)
from transformers.trainer_utils import set_seed
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class TextToAudioPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING
# for now only test text_to_waveform and not text_to_spectrogram
@slow
@require_torch
def test_small_musicgen_pt(self):
music_generator = pipeline(task="text-to-audio", model="facebook/musicgen-small", framework="pt")
forward_params = {
"do_sample": False,
"max_new_tokens": 250,
}
outputs = music_generator("This is a test", forward_params=forward_params)
self.assertEqual({"audio": ANY(np.ndarray), "sampling_rate": 32000}, outputs)
# test two examples side-by-side
outputs = music_generator(["This is a test", "This is a second test"], forward_params=forward_params)
audio = [output["audio"] for output in outputs]
self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio)
# test batching
outputs = music_generator(
["This is a test", "This is a second test"], forward_params=forward_params, batch_size=2
)
audio = [output["audio"] for output in outputs]
self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio)
@slow
@require_torch
def test_small_bark_pt(self):
speech_generator = pipeline(task="text-to-audio", model="suno/bark-small", framework="pt")
forward_params = {
# Using `do_sample=False` to force deterministic output
"do_sample": False,
"semantic_max_new_tokens": 100,
}
outputs = speech_generator("This is a test", forward_params=forward_params)
self.assertEqual(
{"audio": ANY(np.ndarray), "sampling_rate": 24000},
outputs,
)
# test two examples side-by-side
outputs = speech_generator(
["This is a test", "This is a second test"],
forward_params=forward_params,
)
audio = [output["audio"] for output in outputs]
self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio)
# test other generation strategy
forward_params = {
"do_sample": True,
"semantic_max_new_tokens": 100,
"semantic_num_return_sequences": 2,
}
outputs = speech_generator("This is a test", forward_params=forward_params)
audio = outputs["audio"]
self.assertEqual(ANY(np.ndarray), audio)
# test using a speaker embedding
processor = AutoProcessor.from_pretrained("suno/bark-small")
temp_inp = processor("hey, how are you?", voice_preset="v2/en_speaker_5")
history_prompt = temp_inp["history_prompt"]
forward_params["history_prompt"] = history_prompt
outputs = speech_generator(
["This is a test", "This is a second test"],
forward_params=forward_params,
batch_size=2,
)
audio = [output["audio"] for output in outputs]
self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio)
@slow
@require_torch_accelerator
def test_conversion_additional_tensor(self):
speech_generator = pipeline(task="text-to-audio", model="suno/bark-small", framework="pt", device=torch_device)
processor = AutoProcessor.from_pretrained("suno/bark-small")
forward_params = {
"do_sample": True,
"semantic_max_new_tokens": 100,
}
# atm, must do to stay coherent with BarkProcessor
preprocess_params = {
"max_length": 256,
"add_special_tokens": False,
"return_attention_mask": True,
"return_token_type_ids": False,
"padding": "max_length",
}
outputs = speech_generator(
"This is a test",
forward_params=forward_params,
preprocess_params=preprocess_params,
)
temp_inp = processor("hey, how are you?", voice_preset="v2/en_speaker_5")
history_prompt = temp_inp["history_prompt"]
forward_params["history_prompt"] = history_prompt
# history_prompt is a torch.Tensor passed as a forward_param
# if generation is successful, it means that it was passed to the right device
outputs = speech_generator(
"This is a test", forward_params=forward_params, preprocess_params=preprocess_params
)
self.assertEqual(
{"audio": ANY(np.ndarray), "sampling_rate": 24000},
outputs,
)
@slow
@require_torch
def test_vits_model_pt(self):
speech_generator = pipeline(task="text-to-audio", model="facebook/mms-tts-eng", framework="pt")
outputs = speech_generator("This is a test")
self.assertEqual(outputs["sampling_rate"], 16000)
audio = outputs["audio"]
self.assertEqual(ANY(np.ndarray), audio)
# test two examples side-by-side
outputs = speech_generator(["This is a test", "This is a second test"])
audio = [output["audio"] for output in outputs]
self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio)
# test batching
outputs = speech_generator(["This is a test", "This is a second test"], batch_size=2)
self.assertEqual(ANY(np.ndarray), outputs[0]["audio"])
@slow
@require_torch
def test_forward_model_kwargs(self):
# use vits - a forward model
speech_generator = pipeline(task="text-to-audio", model="kakao-enterprise/vits-vctk", framework="pt")
# for reproducibility
set_seed(555)
outputs = speech_generator("This is a test", forward_params={"speaker_id": 5})
audio = outputs["audio"]
with self.assertRaises(TypeError):
# assert error if generate parameter
outputs = speech_generator("This is a test", forward_params={"speaker_id": 5, "do_sample": True})
forward_params = {"speaker_id": 5}
generate_kwargs = {"do_sample": True}
with self.assertRaises(ValueError):
# assert error if generate_kwargs with forward-only models
outputs = speech_generator(
"This is a test", forward_params=forward_params, generate_kwargs=generate_kwargs
)
self.assertTrue(np.abs(outputs["audio"] - audio).max() < 1e-5)
@slow
@require_torch
def test_generative_model_kwargs(self):
# use musicgen - a generative model
music_generator = pipeline(task="text-to-audio", model="facebook/musicgen-small", framework="pt")
forward_params = {
"do_sample": True,
"max_new_tokens": 250,
}
# for reproducibility
set_seed(555)
outputs = music_generator("This is a test", forward_params=forward_params)
audio = outputs["audio"]
self.assertEqual(ANY(np.ndarray), audio)
# make sure generate kwargs get priority over forward params
forward_params = {
"do_sample": False,
"max_new_tokens": 250,
}
generate_kwargs = {"do_sample": True}
# for reproducibility
set_seed(555)
outputs = music_generator("This is a test", forward_params=forward_params, generate_kwargs=generate_kwargs)
self.assertListEqual(outputs["audio"].tolist(), audio.tolist())
def get_test_pipeline(self, model, tokenizer, processor):
speech_generator = TextToAudioPipeline(model=model, tokenizer=tokenizer)
return speech_generator, ["This is a test", "Another test"]
def run_pipeline_test(self, speech_generator, _):
outputs = speech_generator("This is a test")
self.assertEqual(ANY(np.ndarray), outputs["audio"])
forward_params = (
{"num_return_sequences": 2, "do_sample": True} if speech_generator.model.can_generate() else {}
)
outputs = speech_generator(["This is great !", "Something else"], forward_params=forward_params)
audio = [output["audio"] for output in outputs]
self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio)
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/pipelines/test_pipelines_feature_extraction.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from transformers import (
FEATURE_EXTRACTOR_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_MAPPING,
TF_MODEL_MAPPING,
FeatureExtractionPipeline,
LxmertConfig,
is_tf_available,
is_torch_available,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@is_pipeline_test
class FeatureExtractionPipelineTests(unittest.TestCase):
model_mapping = MODEL_MAPPING
tf_model_mapping = TF_MODEL_MAPPING
@require_torch
def test_small_model_pt(self):
feature_extractor = pipeline(
task="feature-extraction", model="hf-internal-testing/tiny-random-distilbert", framework="pt"
)
outputs = feature_extractor("This is a test")
self.assertEqual(
nested_simplify(outputs),
[[[2.287, 1.234, 0.042, 1.53, 1.306, 0.879, -0.526, -1.71, -1.276, 0.756, -0.775, -1.048, -0.25, -0.595, -0.137, -0.598, 2.022, -0.812, 0.284, -0.488, -0.391, -0.403, -0.525, -0.061, -0.228, 1.086, 0.378, -0.14, 0.599, -0.087, -2.259, -0.098], [1.676, 0.232, -1.508, -0.145, 1.798, -1.388, 1.331, -0.37, -0.939, 0.043, 0.06, -0.414, -1.408, 0.24, 0.622, -0.55, -0.569, 1.873, -0.706, 1.924, -0.254, 1.927, -0.423, 0.152, -0.952, 0.509, -0.496, -0.968, 0.093, -1.049, -0.65, 0.312], [0.207, -0.775, -1.822, 0.321, -0.71, -0.201, 0.3, 1.146, -0.233, -0.753, -0.305, 1.309, -1.47, -0.21, 1.802, -1.555, -1.175, 1.323, -0.303, 0.722, -0.076, 0.103, -1.406, 1.931, 0.091, 0.237, 1.172, 1.607, 0.253, -0.9, -1.068, 0.438], [0.615, 1.077, 0.171, -0.175, 1.3, 0.901, -0.653, -0.138, 0.341, -0.654, -0.184, -0.441, -0.424, 0.356, -0.075, 0.26, -1.023, 0.814, 0.524, -0.904, -0.204, -0.623, 1.234, -1.03, 2.594, 0.56, 1.831, -0.199, -1.508, -0.492, -1.687, -2.165], [0.129, 0.008, -1.279, -0.412, -0.004, 1.663, 0.196, 0.104, 0.123, 0.119, 0.635, 1.757, 2.334, -0.799, -1.626, -1.26, 0.595, -0.316, -1.399, 0.232, 0.264, 1.386, -1.171, -0.256, -0.256, -1.944, 1.168, -0.368, -0.714, -0.51, 0.454, 1.148], [-0.32, 0.29, -1.309, -0.177, 0.453, 0.636, -0.024, 0.509, 0.931, -1.754, -1.575, 0.786, 0.046, -1.165, -1.416, 1.373, 1.293, -0.285, -1.541, -1.186, -0.106, -0.994, 2.001, 0.972, -0.02, 1.654, -0.236, 0.643, 1.02, 0.572, -0.914, -0.154], [0.7, -0.937, 0.441, 0.25, 0.78, -0.022, 0.282, -0.095, 1.558, -0.336, 1.706, 0.884, 1.28, 0.198, -0.796, 1.218, -1.769, 1.197, -0.342, -0.177, -0.645, 1.364, 0.008, -0.597, -0.484, -2.772, -0.696, -0.632, -0.34, -1.527, -0.562, 0.862], [2.504, 0.831, -1.271, -0.033, 0.298, -0.735, 1.339, 1.74, 0.233, -1.424, -0.819, -0.761, 0.291, 0.853, -0.092, -0.885, 0.164, 1.025, 0.907, 0.749, -1.515, -0.545, -1.365, 0.271, 0.034, -2.005, 0.031, 0.244, 0.621, 0.176, 0.336, -1.196], [-0.711, 0.591, -1.001, -0.946, 0.784, -1.66, 1.545, 0.799, -0.857, 1.148, 0.213, -0.285, 0.464, -0.139, 0.79, -1.663, -1.121, 0.575, -0.178, -0.508, 1.565, -0.242, -0.346, 1.024, -1.135, -0.158, -2.101, 0.275, 2.009, -0.425, 0.716, 0.981], [0.912, -1.186, -0.846, -0.421, -1.315, -0.827, 0.309, 0.533, 1.029, -2.343, 1.513, -1.238, 1.487, -0.849, 0.896, -0.927, -0.459, 0.159, 0.177, 0.873, 0.935, 1.433, -0.485, 0.737, 1.327, -0.338, 1.608, -0.47, -0.445, -1.118, -0.213, -0.446], [-0.434, -1.362, -1.098, -1.068, 1.507, 0.003, 0.413, -0.395, 0.897, -0.237, 1.405, -0.344, 1.693, 0.677, 0.097, -0.257, -0.602, 1.026, -1.229, 0.855, -0.713, 1.014, 0.443, 0.238, 0.425, -2.184, 1.933, -1.157, -1.132, -0.597, -0.785, 0.967], [0.58, -0.971, 0.789, -0.468, -0.576, 1.779, 1.747, 1.715, -1.939, 0.125, 0.656, -0.042, -1.024, -1.767, 0.107, -0.408, -0.866, -1.774, 1.248, 0.939, -0.033, 1.523, 1.168, -0.744, 0.209, -0.168, -0.316, 0.207, -0.432, 0.047, -0.646, -0.664], [-0.185, -0.613, -1.695, 1.602, -0.32, -0.277, 0.967, 0.728, -0.965, -0.234, 1.069, -0.63, -1.631, 0.711, 0.426, 1.298, -0.191, -0.467, -0.771, 0.971, -0.118, -1.577, -2.064, -0.055, -0.59, 0.642, -0.997, 1.251, 0.538, 1.367, 0.106, 1.704]]]) # fmt: skip
@require_tf
def test_small_model_tf(self):
feature_extractor = pipeline(
task="feature-extraction", model="hf-internal-testing/tiny-random-distilbert", framework="tf"
)
outputs = feature_extractor("This is a test")
self.assertEqual(
nested_simplify(outputs),
[[[2.287, 1.234, 0.042, 1.53, 1.306, 0.879, -0.526, -1.71, -1.276, 0.756, -0.775, -1.048, -0.25, -0.595, -0.137, -0.598, 2.022, -0.812, 0.284, -0.488, -0.391, -0.403, -0.525, -0.061, -0.228, 1.086, 0.378, -0.14, 0.599, -0.087, -2.259, -0.098], [1.676, 0.232, -1.508, -0.145, 1.798, -1.388, 1.331, -0.37, -0.939, 0.043, 0.06, -0.414, -1.408, 0.24, 0.622, -0.55, -0.569, 1.873, -0.706, 1.924, -0.254, 1.927, -0.423, 0.152, -0.952, 0.509, -0.496, -0.968, 0.093, -1.049, -0.65, 0.312], [0.207, -0.775, -1.822, 0.321, -0.71, -0.201, 0.3, 1.146, -0.233, -0.753, -0.305, 1.309, -1.47, -0.21, 1.802, -1.555, -1.175, 1.323, -0.303, 0.722, -0.076, 0.103, -1.406, 1.931, 0.091, 0.237, 1.172, 1.607, 0.253, -0.9, -1.068, 0.438], [0.615, 1.077, 0.171, -0.175, 1.3, 0.901, -0.653, -0.138, 0.341, -0.654, -0.184, -0.441, -0.424, 0.356, -0.075, 0.26, -1.023, 0.814, 0.524, -0.904, -0.204, -0.623, 1.234, -1.03, 2.594, 0.56, 1.831, -0.199, -1.508, -0.492, -1.687, -2.165], [0.129, 0.008, -1.279, -0.412, -0.004, 1.663, 0.196, 0.104, 0.123, 0.119, 0.635, 1.757, 2.334, -0.799, -1.626, -1.26, 0.595, -0.316, -1.399, 0.232, 0.264, 1.386, -1.171, -0.256, -0.256, -1.944, 1.168, -0.368, -0.714, -0.51, 0.454, 1.148], [-0.32, 0.29, -1.309, -0.177, 0.453, 0.636, -0.024, 0.509, 0.931, -1.754, -1.575, 0.786, 0.046, -1.165, -1.416, 1.373, 1.293, -0.285, -1.541, -1.186, -0.106, -0.994, 2.001, 0.972, -0.02, 1.654, -0.236, 0.643, 1.02, 0.572, -0.914, -0.154], [0.7, -0.937, 0.441, 0.25, 0.78, -0.022, 0.282, -0.095, 1.558, -0.336, 1.706, 0.884, 1.28, 0.198, -0.796, 1.218, -1.769, 1.197, -0.342, -0.177, -0.645, 1.364, 0.008, -0.597, -0.484, -2.772, -0.696, -0.632, -0.34, -1.527, -0.562, 0.862], [2.504, 0.831, -1.271, -0.033, 0.298, -0.735, 1.339, 1.74, 0.233, -1.424, -0.819, -0.761, 0.291, 0.853, -0.092, -0.885, 0.164, 1.025, 0.907, 0.749, -1.515, -0.545, -1.365, 0.271, 0.034, -2.005, 0.031, 0.244, 0.621, 0.176, 0.336, -1.196], [-0.711, 0.591, -1.001, -0.946, 0.784, -1.66, 1.545, 0.799, -0.857, 1.148, 0.213, -0.285, 0.464, -0.139, 0.79, -1.663, -1.121, 0.575, -0.178, -0.508, 1.565, -0.242, -0.346, 1.024, -1.135, -0.158, -2.101, 0.275, 2.009, -0.425, 0.716, 0.981], [0.912, -1.186, -0.846, -0.421, -1.315, -0.827, 0.309, 0.533, 1.029, -2.343, 1.513, -1.238, 1.487, -0.849, 0.896, -0.927, -0.459, 0.159, 0.177, 0.873, 0.935, 1.433, -0.485, 0.737, 1.327, -0.338, 1.608, -0.47, -0.445, -1.118, -0.213, -0.446], [-0.434, -1.362, -1.098, -1.068, 1.507, 0.003, 0.413, -0.395, 0.897, -0.237, 1.405, -0.344, 1.693, 0.677, 0.097, -0.257, -0.602, 1.026, -1.229, 0.855, -0.713, 1.014, 0.443, 0.238, 0.425, -2.184, 1.933, -1.157, -1.132, -0.597, -0.785, 0.967], [0.58, -0.971, 0.789, -0.468, -0.576, 1.779, 1.747, 1.715, -1.939, 0.125, 0.656, -0.042, -1.024, -1.767, 0.107, -0.408, -0.866, -1.774, 1.248, 0.939, -0.033, 1.523, 1.168, -0.744, 0.209, -0.168, -0.316, 0.207, -0.432, 0.047, -0.646, -0.664], [-0.185, -0.613, -1.695, 1.602, -0.32, -0.277, 0.967, 0.728, -0.965, -0.234, 1.069, -0.63, -1.631, 0.711, 0.426, 1.298, -0.191, -0.467, -0.771, 0.971, -0.118, -1.577, -2.064, -0.055, -0.59, 0.642, -0.997, 1.251, 0.538, 1.367, 0.106, 1.704]]]) # fmt: skip
@require_torch
def test_tokenization_small_model_pt(self):
feature_extractor = pipeline(
task="feature-extraction", model="hf-internal-testing/tiny-random-distilbert", framework="pt"
)
# test with empty parameters
outputs = feature_extractor("This is a test")
self.assertEqual(
nested_simplify(outputs),
[[[2.287, 1.234, 0.042, 1.53, 1.306, 0.879, -0.526, -1.71, -1.276, 0.756, -0.775, -1.048, -0.25, -0.595, -0.137, -0.598, 2.022, -0.812, 0.284, -0.488, -0.391, -0.403, -0.525, -0.061, -0.228, 1.086, 0.378, -0.14, 0.599, -0.087, -2.259, -0.098], [1.676, 0.232, -1.508, -0.145, 1.798, -1.388, 1.331, -0.37, -0.939, 0.043, 0.06, -0.414, -1.408, 0.24, 0.622, -0.55, -0.569, 1.873, -0.706, 1.924, -0.254, 1.927, -0.423, 0.152, -0.952, 0.509, -0.496, -0.968, 0.093, -1.049, -0.65, 0.312], [0.207, -0.775, -1.822, 0.321, -0.71, -0.201, 0.3, 1.146, -0.233, -0.753, -0.305, 1.309, -1.47, -0.21, 1.802, -1.555, -1.175, 1.323, -0.303, 0.722, -0.076, 0.103, -1.406, 1.931, 0.091, 0.237, 1.172, 1.607, 0.253, -0.9, -1.068, 0.438], [0.615, 1.077, 0.171, -0.175, 1.3, 0.901, -0.653, -0.138, 0.341, -0.654, -0.184, -0.441, -0.424, 0.356, -0.075, 0.26, -1.023, 0.814, 0.524, -0.904, -0.204, -0.623, 1.234, -1.03, 2.594, 0.56, 1.831, -0.199, -1.508, -0.492, -1.687, -2.165], [0.129, 0.008, -1.279, -0.412, -0.004, 1.663, 0.196, 0.104, 0.123, 0.119, 0.635, 1.757, 2.334, -0.799, -1.626, -1.26, 0.595, -0.316, -1.399, 0.232, 0.264, 1.386, -1.171, -0.256, -0.256, -1.944, 1.168, -0.368, -0.714, -0.51, 0.454, 1.148], [-0.32, 0.29, -1.309, -0.177, 0.453, 0.636, -0.024, 0.509, 0.931, -1.754, -1.575, 0.786, 0.046, -1.165, -1.416, 1.373, 1.293, -0.285, -1.541, -1.186, -0.106, -0.994, 2.001, 0.972, -0.02, 1.654, -0.236, 0.643, 1.02, 0.572, -0.914, -0.154], [0.7, -0.937, 0.441, 0.25, 0.78, -0.022, 0.282, -0.095, 1.558, -0.336, 1.706, 0.884, 1.28, 0.198, -0.796, 1.218, -1.769, 1.197, -0.342, -0.177, -0.645, 1.364, 0.008, -0.597, -0.484, -2.772, -0.696, -0.632, -0.34, -1.527, -0.562, 0.862], [2.504, 0.831, -1.271, -0.033, 0.298, -0.735, 1.339, 1.74, 0.233, -1.424, -0.819, -0.761, 0.291, 0.853, -0.092, -0.885, 0.164, 1.025, 0.907, 0.749, -1.515, -0.545, -1.365, 0.271, 0.034, -2.005, 0.031, 0.244, 0.621, 0.176, 0.336, -1.196], [-0.711, 0.591, -1.001, -0.946, 0.784, -1.66, 1.545, 0.799, -0.857, 1.148, 0.213, -0.285, 0.464, -0.139, 0.79, -1.663, -1.121, 0.575, -0.178, -0.508, 1.565, -0.242, -0.346, 1.024, -1.135, -0.158, -2.101, 0.275, 2.009, -0.425, 0.716, 0.981], [0.912, -1.186, -0.846, -0.421, -1.315, -0.827, 0.309, 0.533, 1.029, -2.343, 1.513, -1.238, 1.487, -0.849, 0.896, -0.927, -0.459, 0.159, 0.177, 0.873, 0.935, 1.433, -0.485, 0.737, 1.327, -0.338, 1.608, -0.47, -0.445, -1.118, -0.213, -0.446], [-0.434, -1.362, -1.098, -1.068, 1.507, 0.003, 0.413, -0.395, 0.897, -0.237, 1.405, -0.344, 1.693, 0.677, 0.097, -0.257, -0.602, 1.026, -1.229, 0.855, -0.713, 1.014, 0.443, 0.238, 0.425, -2.184, 1.933, -1.157, -1.132, -0.597, -0.785, 0.967], [0.58, -0.971, 0.789, -0.468, -0.576, 1.779, 1.747, 1.715, -1.939, 0.125, 0.656, -0.042, -1.024, -1.767, 0.107, -0.408, -0.866, -1.774, 1.248, 0.939, -0.033, 1.523, 1.168, -0.744, 0.209, -0.168, -0.316, 0.207, -0.432, 0.047, -0.646, -0.664], [-0.185, -0.613, -1.695, 1.602, -0.32, -0.277, 0.967, 0.728, -0.965, -0.234, 1.069, -0.63, -1.631, 0.711, 0.426, 1.298, -0.191, -0.467, -0.771, 0.971, -0.118, -1.577, -2.064, -0.055, -0.59, 0.642, -0.997, 1.251, 0.538, 1.367, 0.106, 1.704]]]) # fmt: skip
# test with various tokenizer parameters
tokenize_kwargs = {"max_length": 3}
outputs = feature_extractor("This is a test", tokenize_kwargs=tokenize_kwargs)
self.assertEqual(np.squeeze(outputs).shape, (3, 32))
tokenize_kwargs = {"truncation": True, "padding": True, "max_length": 4}
outputs = feature_extractor(
["This is a test", "This", "This is", "This is a", "This is a test test test test"],
tokenize_kwargs=tokenize_kwargs,
)
self.assertEqual(np.squeeze(outputs).shape, (5, 4, 32))
tokenize_kwargs = {"padding": True, "max_length": 4}
outputs = feature_extractor(
["This is a test", "This", "This is", "This is a", "This is a test test test test"],
truncation=True,
tokenize_kwargs=tokenize_kwargs,
)
self.assertEqual(np.squeeze(outputs).shape, (5, 4, 32))
# raise value error if truncation parameter given for two places
tokenize_kwargs = {"truncation": True}
with self.assertRaises(ValueError):
_ = feature_extractor(
["This is a test", "This", "This is", "This is a", "This is a test test test test"],
truncation=True,
tokenize_kwargs=tokenize_kwargs,
)
@require_tf
def test_tokenization_small_model_tf(self):
feature_extractor = pipeline(
task="feature-extraction", model="hf-internal-testing/tiny-random-distilbert", framework="tf"
)
# test with empty parameters
outputs = feature_extractor("This is a test")
self.assertEqual(
nested_simplify(outputs),
[[[2.287, 1.234, 0.042, 1.53, 1.306, 0.879, -0.526, -1.71, -1.276, 0.756, -0.775, -1.048, -0.25, -0.595, -0.137, -0.598, 2.022, -0.812, 0.284, -0.488, -0.391, -0.403, -0.525, -0.061, -0.228, 1.086, 0.378, -0.14, 0.599, -0.087, -2.259, -0.098], [1.676, 0.232, -1.508, -0.145, 1.798, -1.388, 1.331, -0.37, -0.939, 0.043, 0.06, -0.414, -1.408, 0.24, 0.622, -0.55, -0.569, 1.873, -0.706, 1.924, -0.254, 1.927, -0.423, 0.152, -0.952, 0.509, -0.496, -0.968, 0.093, -1.049, -0.65, 0.312], [0.207, -0.775, -1.822, 0.321, -0.71, -0.201, 0.3, 1.146, -0.233, -0.753, -0.305, 1.309, -1.47, -0.21, 1.802, -1.555, -1.175, 1.323, -0.303, 0.722, -0.076, 0.103, -1.406, 1.931, 0.091, 0.237, 1.172, 1.607, 0.253, -0.9, -1.068, 0.438], [0.615, 1.077, 0.171, -0.175, 1.3, 0.901, -0.653, -0.138, 0.341, -0.654, -0.184, -0.441, -0.424, 0.356, -0.075, 0.26, -1.023, 0.814, 0.524, -0.904, -0.204, -0.623, 1.234, -1.03, 2.594, 0.56, 1.831, -0.199, -1.508, -0.492, -1.687, -2.165], [0.129, 0.008, -1.279, -0.412, -0.004, 1.663, 0.196, 0.104, 0.123, 0.119, 0.635, 1.757, 2.334, -0.799, -1.626, -1.26, 0.595, -0.316, -1.399, 0.232, 0.264, 1.386, -1.171, -0.256, -0.256, -1.944, 1.168, -0.368, -0.714, -0.51, 0.454, 1.148], [-0.32, 0.29, -1.309, -0.177, 0.453, 0.636, -0.024, 0.509, 0.931, -1.754, -1.575, 0.786, 0.046, -1.165, -1.416, 1.373, 1.293, -0.285, -1.541, -1.186, -0.106, -0.994, 2.001, 0.972, -0.02, 1.654, -0.236, 0.643, 1.02, 0.572, -0.914, -0.154], [0.7, -0.937, 0.441, 0.25, 0.78, -0.022, 0.282, -0.095, 1.558, -0.336, 1.706, 0.884, 1.28, 0.198, -0.796, 1.218, -1.769, 1.197, -0.342, -0.177, -0.645, 1.364, 0.008, -0.597, -0.484, -2.772, -0.696, -0.632, -0.34, -1.527, -0.562, 0.862], [2.504, 0.831, -1.271, -0.033, 0.298, -0.735, 1.339, 1.74, 0.233, -1.424, -0.819, -0.761, 0.291, 0.853, -0.092, -0.885, 0.164, 1.025, 0.907, 0.749, -1.515, -0.545, -1.365, 0.271, 0.034, -2.005, 0.031, 0.244, 0.621, 0.176, 0.336, -1.196], [-0.711, 0.591, -1.001, -0.946, 0.784, -1.66, 1.545, 0.799, -0.857, 1.148, 0.213, -0.285, 0.464, -0.139, 0.79, -1.663, -1.121, 0.575, -0.178, -0.508, 1.565, -0.242, -0.346, 1.024, -1.135, -0.158, -2.101, 0.275, 2.009, -0.425, 0.716, 0.981], [0.912, -1.186, -0.846, -0.421, -1.315, -0.827, 0.309, 0.533, 1.029, -2.343, 1.513, -1.238, 1.487, -0.849, 0.896, -0.927, -0.459, 0.159, 0.177, 0.873, 0.935, 1.433, -0.485, 0.737, 1.327, -0.338, 1.608, -0.47, -0.445, -1.118, -0.213, -0.446], [-0.434, -1.362, -1.098, -1.068, 1.507, 0.003, 0.413, -0.395, 0.897, -0.237, 1.405, -0.344, 1.693, 0.677, 0.097, -0.257, -0.602, 1.026, -1.229, 0.855, -0.713, 1.014, 0.443, 0.238, 0.425, -2.184, 1.933, -1.157, -1.132, -0.597, -0.785, 0.967], [0.58, -0.971, 0.789, -0.468, -0.576, 1.779, 1.747, 1.715, -1.939, 0.125, 0.656, -0.042, -1.024, -1.767, 0.107, -0.408, -0.866, -1.774, 1.248, 0.939, -0.033, 1.523, 1.168, -0.744, 0.209, -0.168, -0.316, 0.207, -0.432, 0.047, -0.646, -0.664], [-0.185, -0.613, -1.695, 1.602, -0.32, -0.277, 0.967, 0.728, -0.965, -0.234, 1.069, -0.63, -1.631, 0.711, 0.426, 1.298, -0.191, -0.467, -0.771, 0.971, -0.118, -1.577, -2.064, -0.055, -0.59, 0.642, -0.997, 1.251, 0.538, 1.367, 0.106, 1.704]]]) # fmt: skip
# test with various tokenizer parameters
tokenize_kwargs = {"max_length": 3}
outputs = feature_extractor("This is a test", tokenize_kwargs=tokenize_kwargs)
self.assertEqual(np.squeeze(outputs).shape, (3, 32))
tokenize_kwargs = {"truncation": True, "padding": True, "max_length": 4}
outputs = feature_extractor(
["This is a test", "This", "This is", "This is a", "This is a test test test test"],
tokenize_kwargs=tokenize_kwargs,
)
self.assertEqual(np.squeeze(outputs).shape, (5, 4, 32))
tokenize_kwargs = {"padding": True, "max_length": 4}
outputs = feature_extractor(
["This is a test", "This", "This is", "This is a", "This is a test test test test"],
truncation=True,
tokenize_kwargs=tokenize_kwargs,
)
self.assertEqual(np.squeeze(outputs).shape, (5, 4, 32))
# raise value error if truncation parameter given for two places
tokenize_kwargs = {"truncation": True}
with self.assertRaises(ValueError):
_ = feature_extractor(
["This is a test", "This", "This is", "This is a", "This is a test test test test"],
truncation=True,
tokenize_kwargs=tokenize_kwargs,
)
@require_torch
def test_return_tensors_pt(self):
feature_extractor = pipeline(
task="feature-extraction", model="hf-internal-testing/tiny-random-distilbert", framework="pt"
)
outputs = feature_extractor("This is a test", return_tensors=True)
self.assertTrue(torch.is_tensor(outputs))
@require_tf
def test_return_tensors_tf(self):
feature_extractor = pipeline(
task="feature-extraction", model="hf-internal-testing/tiny-random-distilbert", framework="tf"
)
outputs = feature_extractor("This is a test", return_tensors=True)
self.assertTrue(tf.is_tensor(outputs))
def get_shape(self, input_, shape=None):
if shape is None:
shape = []
if isinstance(input_, list):
subshapes = [self.get_shape(in_, shape) for in_ in input_]
if all(s == 0 for s in subshapes):
shape.append(len(input_))
else:
subshape = subshapes[0]
shape = [len(input_), *subshape]
elif isinstance(input_, float):
return 0
else:
raise ValueError("We expect lists of floats, nothing else")
return shape
def get_test_pipeline(self, model, tokenizer, processor):
if tokenizer is None:
self.skipTest("No tokenizer")
return
elif (
type(model.config) in FEATURE_EXTRACTOR_MAPPING
or isinstance(model.config, LxmertConfig)
or type(model.config) in IMAGE_PROCESSOR_MAPPING
):
self.skipTest("This is a bimodal model, we need to find a more consistent way to switch on those models.")
return
elif model.config.is_encoder_decoder:
self.skipTest(
"""encoder_decoder models are trickier for this pipeline.
Do we want encoder + decoder inputs to get some featues?
Do we want encoder only features ?
For now ignore those.
"""
)
return
feature_extractor = FeatureExtractionPipeline(model=model, tokenizer=tokenizer, feature_extractor=processor)
return feature_extractor, ["This is a test", "This is another test"]
def run_pipeline_test(self, feature_extractor, examples):
outputs = feature_extractor("This is a test")
shape = self.get_shape(outputs)
self.assertEqual(shape[0], 1)
# If we send too small input
# there's a bug within FunnelModel (output with shape [1, 4, 2, 1] doesn't match the broadcast shape [1, 4, 2, 2])
outputs = feature_extractor(["This is a test", "Another longer test"])
shape = self.get_shape(outputs)
self.assertEqual(shape[0], 2)
outputs = feature_extractor("This is a test" * 100, truncation=True)
shape = self.get_shape(outputs)
self.assertEqual(shape[0], 1)
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/pipelines/test_pipelines_document_question_answering.py | # Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectron2,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class Image:
@staticmethod
def open(*args, **kwargs):
pass
def load_image(_):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
INVOICE_URL = (
"https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"
)
@is_pipeline_test
@require_torch
@require_vision
class DocumentQuestionAnsweringPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def get_test_pipeline(self, model, tokenizer, processor):
dqa_pipeline = pipeline(
"document-question-answering", model=model, tokenizer=tokenizer, image_processor=processor
)
image = INVOICE_URL
word_boxes = list(zip(*apply_tesseract(load_image(image), None, "")))
question = "What is the placebo?"
examples = [
{
"image": load_image(image),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def run_pipeline_test(self, dqa_pipeline, examples):
outputs = dqa_pipeline(examples, top_k=2)
self.assertEqual(
outputs,
[
[
{"score": ANY(float), "answer": ANY(str), "start": ANY(int), "end": ANY(int)},
{"score": ANY(float), "answer": ANY(str), "start": ANY(int), "end": ANY(int)},
]
]
* 3,
)
@require_torch
@require_detectron2
@require_pytesseract
def test_small_model_pt(self):
dqa_pipeline = pipeline("document-question-answering", model="hf-internal-testing/tiny-random-layoutlmv2")
image = INVOICE_URL
question = "How many cats are there?"
expected_output = [
{"score": 0.0001, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
outputs = dqa_pipeline(image=image, question=question, top_k=2)
self.assertEqual(nested_simplify(outputs, decimals=4), expected_output)
outputs = dqa_pipeline({"image": image, "question": question}, top_k=2)
self.assertEqual(nested_simplify(outputs, decimals=4), expected_output)
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
image = "./tests/fixtures/tests_samples/COCO/000000039769.png"
outputs = dqa_pipeline(image=image, question=question, top_k=2)
self.assertEqual(outputs, [])
# We can optionnally pass directly the words and bounding boxes
image = "./tests/fixtures/tests_samples/COCO/000000039769.png"
words = []
boxes = []
outputs = dqa_pipeline(image=image, question=question, words=words, boxes=boxes, top_k=2)
self.assertEqual(outputs, [])
# TODO: Enable this once hf-internal-testing/tiny-random-donut is implemented
# @require_torch
# def test_small_model_pt_donut(self):
# dqa_pipeline = pipeline("document-question-answering", model="hf-internal-testing/tiny-random-donut")
# # dqa_pipeline = pipeline("document-question-answering", model="../tiny-random-donut")
# image = "https://templates.invoicehome.com/invoice-template-us-neat-750px.png"
# question = "How many cats are there?"
#
# outputs = dqa_pipeline(image=image, question=question, top_k=2)
# self.assertEqual(
# nested_simplify(outputs, decimals=4), [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}]
# )
@slow
@require_torch
@require_detectron2
@require_pytesseract
def test_large_model_pt(self):
dqa_pipeline = pipeline(
"document-question-answering",
model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa",
revision="9977165",
)
image = INVOICE_URL
question = "What is the invoice number?"
outputs = dqa_pipeline(image=image, question=question, top_k=2)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
],
)
outputs = dqa_pipeline({"image": image, "question": question}, top_k=2)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
],
)
outputs = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}], top_k=2
)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
[
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2,
)
@slow
@require_torch
@require_detectron2
@require_pytesseract
def test_large_model_pt_chunk(self):
dqa_pipeline = pipeline(
"document-question-answering",
model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa",
revision="9977165",
max_seq_len=50,
)
image = INVOICE_URL
question = "What is the invoice number?"
outputs = dqa_pipeline(image=image, question=question, top_k=2)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
],
)
outputs = dqa_pipeline({"image": image, "question": question}, top_k=2)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
],
)
outputs = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}], top_k=2
)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
[
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2,
)
@slow
@require_torch
@require_pytesseract
@require_vision
def test_large_model_pt_layoutlm(self):
tokenizer = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=True
)
dqa_pipeline = pipeline(
"document-question-answering",
model="impira/layoutlm-document-qa",
tokenizer=tokenizer,
revision="3dc6de3",
)
image = INVOICE_URL
question = "What is the invoice number?"
outputs = dqa_pipeline(image=image, question=question, top_k=2)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
],
)
outputs = dqa_pipeline({"image": image, "question": question}, top_k=2)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
],
)
outputs = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}], top_k=2
)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
[
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2,
)
word_boxes = list(zip(*apply_tesseract(load_image(image), None, "")))
# This model should also work if `image` is set to None
outputs = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
],
)
@slow
@require_torch
@require_pytesseract
@require_vision
def test_large_model_pt_layoutlm_chunk(self):
tokenizer = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=True
)
dqa_pipeline = pipeline(
"document-question-answering",
model="impira/layoutlm-document-qa",
tokenizer=tokenizer,
revision="3dc6de3",
max_seq_len=50,
)
image = INVOICE_URL
question = "What is the invoice number?"
outputs = dqa_pipeline(image=image, question=question, top_k=2)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
],
)
outputs = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}], top_k=2
)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
[
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2,
)
word_boxes = list(zip(*apply_tesseract(load_image(image), None, "")))
# This model should also work if `image` is set to None
outputs = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
],
)
@slow
@require_torch
def test_large_model_pt_donut(self):
dqa_pipeline = pipeline(
"document-question-answering",
model="naver-clova-ix/donut-base-finetuned-docvqa",
tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa"),
feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa",
)
image = INVOICE_URL
question = "What is the invoice number?"
outputs = dqa_pipeline(image=image, question=question, top_k=2)
self.assertEqual(nested_simplify(outputs, decimals=4), [{"answer": "us-001"}])
@require_tf
@unittest.skip("Document question answering not implemented in TF")
def test_small_model_tf(self):
pass
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/pipelines/test_pipelines_text2text_generation.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
Text2TextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class Text2TextGenerationPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
tf_model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def get_test_pipeline(self, model, tokenizer, processor):
generator = Text2TextGenerationPipeline(model=model, tokenizer=tokenizer)
return generator, ["Something to write", "Something else"]
def run_pipeline_test(self, generator, _):
outputs = generator("Something there")
self.assertEqual(outputs, [{"generated_text": ANY(str)}])
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there"))
outputs = generator(["This is great !", "Something else"], num_return_sequences=2, do_sample=True)
self.assertEqual(
outputs,
[
[{"generated_text": ANY(str)}, {"generated_text": ANY(str)}],
[{"generated_text": ANY(str)}, {"generated_text": ANY(str)}],
],
)
outputs = generator(
["This is great !", "Something else"], num_return_sequences=2, batch_size=2, do_sample=True
)
self.assertEqual(
outputs,
[
[{"generated_text": ANY(str)}, {"generated_text": ANY(str)}],
[{"generated_text": ANY(str)}, {"generated_text": ANY(str)}],
],
)
with self.assertRaises(ValueError):
generator(4)
@require_torch
def test_small_model_pt(self):
generator = pipeline("text2text-generation", model="patrickvonplaten/t5-tiny-random", framework="pt")
# do_sample=False necessary for reproducibility
outputs = generator("Something there", do_sample=False)
self.assertEqual(outputs, [{"generated_text": ""}])
num_return_sequences = 3
outputs = generator(
"Something there",
num_return_sequences=num_return_sequences,
num_beams=num_return_sequences,
)
target_outputs = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(outputs, target_outputs)
outputs = generator("This is a test", do_sample=True, num_return_sequences=2, return_tensors=True)
self.assertEqual(
outputs,
[
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
],
)
generator.tokenizer.pad_token_id = generator.model.config.eos_token_id
generator.tokenizer.pad_token = "<pad>"
outputs = generator(
["This is a test", "This is a second test"],
do_sample=True,
num_return_sequences=2,
batch_size=2,
return_tensors=True,
)
self.assertEqual(
outputs,
[
[
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
],
[
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
],
],
)
@require_tf
def test_small_model_tf(self):
generator = pipeline("text2text-generation", model="patrickvonplaten/t5-tiny-random", framework="tf")
# do_sample=False necessary for reproducibility
outputs = generator("Something there", do_sample=False)
self.assertEqual(outputs, [{"generated_text": ""}])
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/pipelines/test_pipelines_automatic_speech_recognition.py | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import pytest
from datasets import Audio, load_dataset
from huggingface_hub import hf_hub_download, snapshot_download
from transformers import (
MODEL_FOR_CTC_MAPPING,
MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
Speech2TextForConditionalGeneration,
Wav2Vec2ForCTC,
WhisperForConditionalGeneration,
)
from transformers.pipelines import AutomaticSpeechRecognitionPipeline, pipeline
from transformers.pipelines.audio_utils import chunk_bytes_iter
from transformers.pipelines.automatic_speech_recognition import _find_timestamp_sequence, chunk_iter
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_pyctcdecode,
require_tf,
require_torch,
require_torch_accelerator,
require_torchaudio,
slow,
torch_device,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
# We can't use this mixin because it assumes TF support.
# from .test_pipelines_common import CustomInputPipelineCommonMixin
@is_pipeline_test
class AutomaticSpeechRecognitionPipelineTests(unittest.TestCase):
model_mapping = dict(
(list(MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING.items()) if MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING else [])
+ (MODEL_FOR_CTC_MAPPING.items() if MODEL_FOR_CTC_MAPPING else [])
)
def get_test_pipeline(self, model, tokenizer, processor):
if tokenizer is None:
# Side effect of no Fast Tokenizer class for these model, so skipping
# But the slow tokenizer test should still run as they're quite small
self.skipTest("No tokenizer available")
return
# return None, None
speech_recognizer = AutomaticSpeechRecognitionPipeline(
model=model, tokenizer=tokenizer, feature_extractor=processor
)
# test with a raw waveform
audio = np.zeros((34000,))
audio2 = np.zeros((14000,))
return speech_recognizer, [audio, audio2]
def run_pipeline_test(self, speech_recognizer, examples):
audio = np.zeros((34000,))
outputs = speech_recognizer(audio)
self.assertEqual(outputs, {"text": ANY(str)})
# Striding
audio = {"raw": audio, "stride": (0, 4000), "sampling_rate": speech_recognizer.feature_extractor.sampling_rate}
if speech_recognizer.type == "ctc":
outputs = speech_recognizer(audio)
self.assertEqual(outputs, {"text": ANY(str)})
elif "Whisper" in speech_recognizer.model.__class__.__name__:
outputs = speech_recognizer(audio)
self.assertEqual(outputs, {"text": ANY(str)})
else:
# Non CTC models cannot use striding.
with self.assertRaises(ValueError):
outputs = speech_recognizer(audio)
# Timestamps
audio = np.zeros((34000,))
if speech_recognizer.type == "ctc":
outputs = speech_recognizer(audio, return_timestamps="char")
self.assertIsInstance(outputs["chunks"], list)
n = len(outputs["chunks"])
self.assertEqual(
outputs,
{
"text": ANY(str),
"chunks": [{"text": ANY(str), "timestamp": (ANY(float), ANY(float))} for i in range(n)],
},
)
outputs = speech_recognizer(audio, return_timestamps="word")
self.assertIsInstance(outputs["chunks"], list)
n = len(outputs["chunks"])
self.assertEqual(
outputs,
{
"text": ANY(str),
"chunks": [{"text": ANY(str), "timestamp": (ANY(float), ANY(float))} for i in range(n)],
},
)
elif "Whisper" in speech_recognizer.model.__class__.__name__:
outputs = speech_recognizer(audio, return_timestamps=True)
self.assertIsInstance(outputs["chunks"], list)
nb_chunks = len(outputs["chunks"])
self.assertGreater(nb_chunks, 0)
self.assertEqual(
outputs,
{
"text": ANY(str),
"chunks": [{"text": ANY(str), "timestamp": (ANY(float), ANY(float))} for i in range(nb_chunks)],
},
)
else:
# Non CTC models cannot use return_timestamps
with self.assertRaisesRegex(
ValueError, "^We cannot return_timestamps yet on non-CTC models apart from Whisper!$"
):
outputs = speech_recognizer(audio, return_timestamps="char")
@require_torch
@slow
def test_pt_defaults(self):
pipeline("automatic-speech-recognition", framework="pt")
@require_torch
def test_small_model_pt(self):
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model="facebook/s2t-small-mustc-en-fr-st",
tokenizer="facebook/s2t-small-mustc-en-fr-st",
framework="pt",
)
waveform = np.tile(np.arange(1000, dtype=np.float32), 34)
output = speech_recognizer(waveform)
self.assertEqual(output, {"text": "(Applaudissements)"})
output = speech_recognizer(waveform, chunk_length_s=10)
self.assertEqual(output, {"text": "(Applaudissements)"})
# Non CTC models cannot use return_timestamps
with self.assertRaisesRegex(
ValueError, "^We cannot return_timestamps yet on non-CTC models apart from Whisper!$"
):
_ = speech_recognizer(waveform, return_timestamps="char")
@slow
@require_torch_accelerator
def test_whisper_fp16(self):
speech_recognizer = pipeline(
model="openai/whisper-base",
device=torch_device,
torch_dtype=torch.float16,
)
waveform = np.tile(np.arange(1000, dtype=np.float32), 34)
speech_recognizer(waveform)
@require_torch
def test_small_model_pt_seq2seq(self):
speech_recognizer = pipeline(
model="hf-internal-testing/tiny-random-speech-encoder-decoder",
framework="pt",
)
waveform = np.tile(np.arange(1000, dtype=np.float32), 34)
output = speech_recognizer(waveform)
self.assertEqual(output, {"text": "あл ش 湯 清 ه ܬ া लᆨしث ल eか u w 全 u"})
@require_torch
def test_small_model_pt_seq2seq_gen_kwargs(self):
speech_recognizer = pipeline(
model="hf-internal-testing/tiny-random-speech-encoder-decoder",
framework="pt",
)
waveform = np.tile(np.arange(1000, dtype=np.float32), 34)
output = speech_recognizer(waveform, max_new_tokens=10, generate_kwargs={"num_beams": 2})
self.assertEqual(output, {"text": "あл † γ ت ב オ 束 泣 足"})
@slow
@require_torch
@require_pyctcdecode
def test_large_model_pt_with_lm(self):
dataset = load_dataset("Narsil/asr_dummy", streaming=True)
third_item = next(iter(dataset["test"].skip(3)))
filename = third_item["file"]
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model="patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm",
framework="pt",
)
self.assertEqual(speech_recognizer.type, "ctc_with_lm")
output = speech_recognizer(filename)
self.assertEqual(
output,
{"text": "y en las ramas medio sumergidas revoloteaban algunos pájaros de quimérico y legendario plumaje"},
)
# Override back to pure CTC
speech_recognizer.type = "ctc"
output = speech_recognizer(filename)
# plumajre != plumaje
self.assertEqual(
output,
{
"text": (
"y en las ramas medio sumergidas revoloteaban algunos pájaros de quimérico y legendario plumajre"
)
},
)
speech_recognizer.type = "ctc_with_lm"
# Simple test with CTC with LM, chunking + timestamps
output = speech_recognizer(filename, chunk_length_s=2.0, return_timestamps="word")
self.assertEqual(
output,
{
"text": (
"y en las ramas medio sumergidas revoloteaban algunos pájaros de quimérico y legendario plumajcri"
),
"chunks": [
{"text": "y", "timestamp": (0.52, 0.54)},
{"text": "en", "timestamp": (0.6, 0.68)},
{"text": "las", "timestamp": (0.74, 0.84)},
{"text": "ramas", "timestamp": (0.94, 1.24)},
{"text": "medio", "timestamp": (1.32, 1.52)},
{"text": "sumergidas", "timestamp": (1.56, 2.22)},
{"text": "revoloteaban", "timestamp": (2.36, 3.0)},
{"text": "algunos", "timestamp": (3.06, 3.38)},
{"text": "pájaros", "timestamp": (3.46, 3.86)},
{"text": "de", "timestamp": (3.92, 4.0)},
{"text": "quimérico", "timestamp": (4.08, 4.6)},
{"text": "y", "timestamp": (4.66, 4.68)},
{"text": "legendario", "timestamp": (4.74, 5.26)},
{"text": "plumajcri", "timestamp": (5.34, 5.74)},
],
},
)
# CTC + LM models cannot use return_timestamps="char"
with self.assertRaisesRegex(
ValueError, "^CTC with LM can only predict word level timestamps, set `return_timestamps='word'`$"
):
_ = speech_recognizer(filename, return_timestamps="char")
@require_tf
def test_small_model_tf(self):
self.skipTest("Tensorflow not supported yet.")
@require_torch
def test_torch_small_no_tokenizer_files(self):
# test that model without tokenizer file cannot be loaded
with pytest.raises(OSError):
pipeline(
task="automatic-speech-recognition",
model="patrickvonplaten/tiny-wav2vec2-no-tokenizer",
framework="pt",
)
@require_torch
@slow
def test_torch_large(self):
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model="facebook/wav2vec2-base-960h",
tokenizer="facebook/wav2vec2-base-960h",
framework="pt",
)
waveform = np.tile(np.arange(1000, dtype=np.float32), 34)
output = speech_recognizer(waveform)
self.assertEqual(output, {"text": ""})
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id")
filename = ds[40]["file"]
output = speech_recognizer(filename)
self.assertEqual(output, {"text": "A MAN SAID TO THE UNIVERSE SIR I EXIST"})
@slow
@require_torch
@slow
def test_return_timestamps_in_preprocess(self):
pipe = pipeline(
task="automatic-speech-recognition",
model="openai/whisper-tiny",
chunk_length_s=8,
stride_length_s=1,
)
data = load_dataset("librispeech_asr", "clean", split="test", streaming=True)
sample = next(iter(data))
pipe.model.config.forced_decoder_ids = pipe.tokenizer.get_decoder_prompt_ids(language="en", task="transcribe")
res = pipe(sample["audio"]["array"])
self.assertEqual(res, {"text": " Conquered returned to its place amidst the tents."})
res = pipe(sample["audio"]["array"], return_timestamps=True)
self.assertEqual(
res,
{
"text": " Conquered returned to its place amidst the tents.",
"chunks": [{"timestamp": (0.0, 3.36), "text": " Conquered returned to its place amidst the tents."}],
},
)
pipe.model.generation_config.alignment_heads = [[2, 2], [3, 0], [3, 2], [3, 3], [3, 4], [3, 5]]
res = pipe(sample["audio"]["array"], return_timestamps="word")
# fmt: off
self.assertEqual(
res,
{
'text': ' Conquered returned to its place amidst the tents.',
'chunks': [
{'text': ' Conquered', 'timestamp': (0.5, 1.2)},
{'text': ' returned', 'timestamp': (1.2, 1.64)},
{'text': ' to', 'timestamp': (1.64, 1.84)},
{'text': ' its', 'timestamp': (1.84, 2.02)},
{'text': ' place', 'timestamp': (2.02, 2.28)},
{'text': ' amidst', 'timestamp': (2.28, 2.8)},
{'text': ' the', 'timestamp': (2.8, 2.98)},
{'text': ' tents.', 'timestamp': (2.98, 3.48)},
],
},
)
# fmt: on
@require_torch
def test_return_timestamps_in_init(self):
# segment-level timestamps are accepted
model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny")
tokenizer = AutoTokenizer.from_pretrained("openai/whisper-tiny")
feature_extractor = AutoFeatureExtractor.from_pretrained("openai/whisper-tiny")
dummy_speech = np.ones(100)
pipe = pipeline(
task="automatic-speech-recognition",
model=model,
feature_extractor=feature_extractor,
tokenizer=tokenizer,
chunk_length_s=8,
stride_length_s=1,
return_timestamps=True,
)
_ = pipe(dummy_speech)
# word-level timestamps are accepted
pipe = pipeline(
task="automatic-speech-recognition",
model=model,
feature_extractor=feature_extractor,
tokenizer=tokenizer,
chunk_length_s=8,
stride_length_s=1,
return_timestamps="word",
)
_ = pipe(dummy_speech)
# char-level timestamps are not accepted
with self.assertRaisesRegex(
ValueError,
"^Whisper cannot return `char` timestamps, only word level or segment level timestamps. "
"Use `return_timestamps='word'` or `return_timestamps=True` respectively.$",
):
pipe = pipeline(
task="automatic-speech-recognition",
model=model,
feature_extractor=feature_extractor,
tokenizer=tokenizer,
chunk_length_s=8,
stride_length_s=1,
return_timestamps="char",
)
_ = pipe(dummy_speech)
@require_torch
@slow
def test_torch_whisper(self):
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model="openai/whisper-tiny",
framework="pt",
)
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id")
filename = ds[40]["file"]
output = speech_recognizer(filename)
self.assertEqual(output, {"text": " A man said to the universe, Sir, I exist."})
output = speech_recognizer([filename], chunk_length_s=5, batch_size=4)
self.assertEqual(output, [{"text": " A man said to the universe, Sir, I exist."}])
@slow
def test_find_longest_common_subsequence(self):
max_source_positions = 1500
processor = AutoProcessor.from_pretrained("openai/whisper-tiny")
previous_sequence = [[51492, 406, 3163, 1953, 466, 13, 51612, 51612]]
self.assertEqual(
processor.decode(previous_sequence[0], output_offsets=True),
{
"text": " not worth thinking about.",
"offsets": [{"text": " not worth thinking about.", "timestamp": (22.56, 24.96)}],
},
)
# Merge when the previous sequence is a suffix of the next sequence
# fmt: off
next_sequences_1 = [
[50364, 295, 6177, 3391, 11, 19817, 3337, 507, 307, 406, 3163, 1953, 466, 13, 50614, 50614, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 50834, 50257]
]
# fmt: on
self.assertEqual(
processor.decode(next_sequences_1[0], output_offsets=True),
{
"text": (
" of spectators, retrievality is not worth thinking about. His instant panic was followed by a"
" small, sharp blow high on his chest.<|endoftext|>"
),
"offsets": [
{"text": " of spectators, retrievality is not worth thinking about.", "timestamp": (0.0, 5.0)},
{
"text": " His instant panic was followed by a small, sharp blow high on his chest.",
"timestamp": (5.0, 9.4),
},
],
},
)
merge = _find_timestamp_sequence(
[[previous_sequence, (480_000, 0, 0)], [next_sequences_1, (480_000, 120_000, 0)]],
processor.tokenizer,
processor.feature_extractor,
max_source_positions,
)
# fmt: off
self.assertEqual(
merge,
[51492, 406, 3163, 1953, 466, 13, 51739, 51739, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 51959],
)
# fmt: on
self.assertEqual(
processor.decode(merge, output_offsets=True),
{
"text": (
" not worth thinking about. His instant panic was followed by a small, sharp blow high on his"
" chest."
),
"offsets": [
{"text": " not worth thinking about.", "timestamp": (22.56, 27.5)},
{
"text": " His instant panic was followed by a small, sharp blow high on his chest.",
"timestamp": (27.5, 31.900000000000002),
},
],
},
)
# Merge when the sequence is in the middle of the 1st next sequence
# fmt: off
next_sequences_2 = [
[50364, 295, 6177, 3391, 11, 19817, 3337, 507, 307, 406, 3163, 1953, 466, 13, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 50834, 50257]
]
# fmt: on
# {'text': ' of spectators, retrievality is not worth thinking about. His instant panic was followed by a small, sharp blow high on his chest.','timestamp': (0.0, 9.4)}
merge = _find_timestamp_sequence(
[[previous_sequence, (480_000, 0, 0)], [next_sequences_2, (480_000, 120_000, 0)]],
processor.tokenizer,
processor.feature_extractor,
max_source_positions,
)
# fmt: off
self.assertEqual(
merge,
[51492, 406, 3163, 1953, 466, 13, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 51959],
)
# fmt: on
self.assertEqual(
processor.decode(merge, output_offsets=True),
{
"text": (
" not worth thinking about. His instant panic was followed by a small, sharp blow high on his"
" chest."
),
"offsets": [
{
"text": (
" not worth thinking about. His instant panic was followed by a small, sharp blow high on"
" his chest."
),
"timestamp": (22.56, 31.900000000000002),
},
],
},
)
# Merge when the previous sequence is not included in the current sequence
next_sequences_3 = [[50364, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 50584, 50257]] # fmt: skip
# {'text': ' His instant panic was followed by a small, sharp blow high on his chest.','timestamp': (0.0, 9.4)}
merge = _find_timestamp_sequence(
[[previous_sequence, (480_000, 0, 0)], [next_sequences_3, (480_000, 120_000, 0)]],
processor.tokenizer,
processor.feature_extractor,
max_source_positions,
)
self.assertEqual(
merge,
[51492, 406, 3163, 1953, 466, 13, 51612, 51612, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 51832],
) # fmt: skip
self.assertEqual(
processor.decode(merge, output_offsets=True),
{
"text": (
" not worth thinking about. His instant panic was followed by a small, sharp blow high on his"
" chest."
),
"offsets": [
{"text": " not worth thinking about.", "timestamp": (22.56, 24.96)},
{
"text": " His instant panic was followed by a small, sharp blow high on his chest.",
"timestamp": (24.96, 29.36),
},
],
},
)
# last case is when the sequence is not in the first next predicted start and end of timestamp
next_sequences_3 = [
[50364, 2812, 9836, 14783, 390, 406, 3163, 1953, 466, 13, 50634, 50634, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 50934]
] # fmt: skip
merge = _find_timestamp_sequence(
[[previous_sequence, (480_000, 0, 0)], [next_sequences_3, (480_000, 167_000, 0)]],
processor.tokenizer,
processor.feature_extractor,
max_source_positions,
)
self.assertEqual(
merge,
[51492, 406, 3163, 1953, 466, 13, 51612, 51612, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 51912]
) # fmt: skip
self.assertEqual(
processor.decode(merge, output_offsets=True),
{
"text": (
" not worth thinking about. His instant panic was followed by a small, sharp blow high on his"
" chest."
),
"offsets": [
{"text": " not worth thinking about.", "timestamp": (22.56, 24.96)},
{
"text": " His instant panic was followed by a small, sharp blow high on his chest.",
"timestamp": (24.96, 30.96),
},
],
},
)
@slow
@require_torch
def test_whisper_timestamp_prediction(self):
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id")
array = np.concatenate(
[ds[40]["audio"]["array"], ds[41]["audio"]["array"], ds[42]["audio"]["array"], ds[43]["audio"]["array"]]
)
pipe = pipeline(
model="openai/whisper-small",
return_timestamps=True,
)
output = pipe(ds[40]["audio"])
self.assertDictEqual(
output,
{
"text": " A man said to the universe, Sir, I exist.",
"chunks": [{"text": " A man said to the universe, Sir, I exist.", "timestamp": (0.0, 4.26)}],
},
)
output = pipe(array, chunk_length_s=10)
self.assertDictEqual(
nested_simplify(output),
{
"chunks": [
{"text": " A man said to the universe, Sir, I exist.", "timestamp": (0.0, 5.5)},
{
"text": (
" Sweat covered Brion's body, trickling into the "
"tight-loan cloth that was the only garment he wore, the "
"cut"
),
"timestamp": (5.5, 11.95),
},
{
"text": (
" on his chest still dripping blood, the ache of his "
"overstrained eyes, even the soaring arena around him "
"with"
),
"timestamp": (11.95, 19.61),
},
{
"text": " the thousands of spectators, retrievality is not worth thinking about.",
"timestamp": (19.61, 25.0),
},
{
"text": " His instant panic was followed by a small, sharp blow high on his chest.",
"timestamp": (25.0, 29.4),
},
],
"text": (
" A man said to the universe, Sir, I exist. Sweat covered Brion's "
"body, trickling into the tight-loan cloth that was the only garment "
"he wore, the cut on his chest still dripping blood, the ache of his "
"overstrained eyes, even the soaring arena around him with the "
"thousands of spectators, retrievality is not worth thinking about. "
"His instant panic was followed by a small, sharp blow high on his "
"chest."
),
},
)
output = pipe(array)
self.assertDictEqual(
output,
{
"chunks": [
{"text": " A man said to the universe, Sir, I exist.", "timestamp": (0.0, 5.5)},
{
"text": (
" Sweat covered Brion's body, trickling into the "
"tight-loan cloth that was the only garment"
),
"timestamp": (5.5, 10.18),
},
{"text": " he wore.", "timestamp": (10.18, 11.68)},
{"text": " The cut on his chest still dripping blood.", "timestamp": (11.68, 14.92)},
{"text": " The ache of his overstrained eyes.", "timestamp": (14.92, 17.6)},
{
"text": (
" Even the soaring arena around him with the thousands of spectators were trivialities"
),
"timestamp": (17.6, 22.56),
},
{"text": " not worth thinking about.", "timestamp": (22.56, 24.96)},
],
"text": (
" A man said to the universe, Sir, I exist. Sweat covered Brion's "
"body, trickling into the tight-loan cloth that was the only garment "
"he wore. The cut on his chest still dripping blood. The ache of his "
"overstrained eyes. Even the soaring arena around him with the "
"thousands of spectators were trivialities not worth thinking about."
),
},
)
@require_torch
@slow
def test_torch_speech_encoder_decoder(self):
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model="facebook/s2t-wav2vec2-large-en-de",
feature_extractor="facebook/s2t-wav2vec2-large-en-de",
framework="pt",
)
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id")
filename = ds[40]["file"]
output = speech_recognizer(filename)
self.assertEqual(output, {"text": 'Ein Mann sagte zum Universum : " Sir, ich existiert! "'})
@slow
@require_torch
def test_simple_wav2vec2(self):
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
tokenizer = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h")
feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h")
asr = AutomaticSpeechRecognitionPipeline(model=model, tokenizer=tokenizer, feature_extractor=feature_extractor)
waveform = np.tile(np.arange(1000, dtype=np.float32), 34)
output = asr(waveform)
self.assertEqual(output, {"text": ""})
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id")
filename = ds[40]["file"]
output = asr(filename)
self.assertEqual(output, {"text": "A MAN SAID TO THE UNIVERSE SIR I EXIST"})
filename = ds[40]["file"]
with open(filename, "rb") as f:
data = f.read()
output = asr(data)
self.assertEqual(output, {"text": "A MAN SAID TO THE UNIVERSE SIR I EXIST"})
@slow
@require_torch
@require_torchaudio
def test_simple_s2t(self):
model = Speech2TextForConditionalGeneration.from_pretrained("facebook/s2t-small-mustc-en-it-st")
tokenizer = AutoTokenizer.from_pretrained("facebook/s2t-small-mustc-en-it-st")
feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/s2t-small-mustc-en-it-st")
asr = AutomaticSpeechRecognitionPipeline(model=model, tokenizer=tokenizer, feature_extractor=feature_extractor)
waveform = np.tile(np.arange(1000, dtype=np.float32), 34)
output = asr(waveform)
self.assertEqual(output, {"text": "(Applausi)"})
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id")
filename = ds[40]["file"]
output = asr(filename)
self.assertEqual(output, {"text": "Un uomo disse all'universo: \"Signore, io esisto."})
filename = ds[40]["file"]
with open(filename, "rb") as f:
data = f.read()
output = asr(data)
self.assertEqual(output, {"text": "Un uomo disse all'universo: \"Signore, io esisto."})
@slow
@require_torch
@require_torchaudio
def test_simple_whisper_asr(self):
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model="openai/whisper-tiny.en",
framework="pt",
)
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
filename = ds[0]["file"]
output = speech_recognizer(filename)
self.assertEqual(
output,
{"text": " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel."},
)
output = speech_recognizer(filename, return_timestamps=True)
self.assertEqual(
output,
{
"text": " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.",
"chunks": [
{
"text": (
" Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel."
),
"timestamp": (0.0, 5.44),
}
],
},
)
speech_recognizer.model.generation_config.alignment_heads = [[2, 2], [3, 0], [3, 2], [3, 3], [3, 4], [3, 5]]
output = speech_recognizer(filename, return_timestamps="word")
# fmt: off
self.assertEqual(
output,
{
'text': ' Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.',
'chunks': [
{'text': ' Mr.', 'timestamp': (0.38, 1.04)},
{'text': ' Quilter', 'timestamp': (1.04, 1.18)},
{'text': ' is', 'timestamp': (1.18, 1.44)},
{'text': ' the', 'timestamp': (1.44, 1.58)},
{'text': ' apostle', 'timestamp': (1.58, 1.98)},
{'text': ' of', 'timestamp': (1.98, 2.32)},
{'text': ' the', 'timestamp': (2.32, 2.46)},
{'text': ' middle', 'timestamp': (2.46, 2.56)},
{'text': ' classes,', 'timestamp': (2.56, 3.4)},
{'text': ' and', 'timestamp': (3.4, 3.54)},
{'text': ' we', 'timestamp': (3.54, 3.62)},
{'text': ' are', 'timestamp': (3.62, 3.72)},
{'text': ' glad', 'timestamp': (3.72, 4.0)},
{'text': ' to', 'timestamp': (4.0, 4.26)},
{'text': ' welcome', 'timestamp': (4.26, 4.56)},
{'text': ' his', 'timestamp': (4.56, 4.92)},
{'text': ' gospel.', 'timestamp': (4.92, 5.84)}
]
}
)
# fmt: on
# Whisper can only predict segment level timestamps or word level, not character level
with self.assertRaisesRegex(
ValueError,
"^Whisper cannot return `char` timestamps, only word level or segment level timestamps. "
"Use `return_timestamps='word'` or `return_timestamps=True` respectively.$",
):
_ = speech_recognizer(filename, return_timestamps="char")
@slow
@require_torch
@require_torchaudio
def test_simple_whisper_translation(self):
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model="openai/whisper-large",
framework="pt",
)
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id")
filename = ds[40]["file"]
output = speech_recognizer(filename)
self.assertEqual(output, {"text": " A man said to the universe, Sir, I exist."})
model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large")
tokenizer = AutoTokenizer.from_pretrained("openai/whisper-large")
feature_extractor = AutoFeatureExtractor.from_pretrained("openai/whisper-large")
speech_recognizer_2 = AutomaticSpeechRecognitionPipeline(
model=model, tokenizer=tokenizer, feature_extractor=feature_extractor
)
output_2 = speech_recognizer_2(filename)
self.assertEqual(output, output_2)
# either use generate_kwargs or set the model's generation_config
# model.generation_config.task = "transcribe"
# model.generation_config.lang = "<|it|>"
speech_translator = AutomaticSpeechRecognitionPipeline(
model=model,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
generate_kwargs={"task": "transcribe", "language": "<|it|>"},
)
output_3 = speech_translator(filename)
self.assertEqual(output_3, {"text": " Un uomo ha detto all'universo, Sir, esiste."})
@slow
@require_torch
def test_whisper_language(self):
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model="openai/whisper-tiny.en",
framework="pt",
)
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
filename = ds[0]["file"]
# 1. English-only model compatible with no language argument
output = speech_recognizer(filename)
self.assertEqual(
output,
{"text": " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel."},
)
# 2. English-only Whisper does not accept the language argument
with self.assertRaisesRegex(
ValueError,
"Cannot specify `task` or `language` for an English-only model. If the model is intended to be multilingual, "
"pass `is_multilingual=True` to generate, or update the generation config.",
):
_ = speech_recognizer(filename, generate_kwargs={"language": "en"})
# 3. Multilingual model accepts language argument
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model="openai/whisper-tiny",
framework="pt",
)
output = speech_recognizer(filename, generate_kwargs={"language": "en"})
self.assertEqual(
output,
{"text": " Mr. Quilter is the apostle of the middle classes and we are glad to welcome his gospel."},
)
@slow
@require_torch
@require_torchaudio
def test_xls_r_to_en(self):
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model="facebook/wav2vec2-xls-r-1b-21-to-en",
feature_extractor="facebook/wav2vec2-xls-r-1b-21-to-en",
framework="pt",
)
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id")
filename = ds[40]["file"]
output = speech_recognizer(filename)
self.assertEqual(output, {"text": "A man said to the universe: “Sir, I exist."})
@slow
@require_torch
@require_torchaudio
def test_xls_r_from_en(self):
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model="facebook/wav2vec2-xls-r-1b-en-to-15",
feature_extractor="facebook/wav2vec2-xls-r-1b-en-to-15",
framework="pt",
)
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id")
filename = ds[40]["file"]
output = speech_recognizer(filename)
self.assertEqual(output, {"text": "Ein Mann sagte zu dem Universum, Sir, ich bin da."})
@slow
@require_torch
@require_torchaudio
def test_speech_to_text_leveraged(self):
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model="patrickvonplaten/wav2vec2-2-bart-base",
feature_extractor="patrickvonplaten/wav2vec2-2-bart-base",
tokenizer=AutoTokenizer.from_pretrained("patrickvonplaten/wav2vec2-2-bart-base"),
framework="pt",
)
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id")
filename = ds[40]["file"]
output = speech_recognizer(filename)
self.assertEqual(output, {"text": "a man said to the universe sir i exist"})
@slow
@require_torch_accelerator
def test_wav2vec2_conformer_float16(self):
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model="facebook/wav2vec2-conformer-rope-large-960h-ft",
device=torch_device,
torch_dtype=torch.float16,
framework="pt",
)
dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
sample = dataset[0]["audio"]
output = speech_recognizer(sample)
self.assertEqual(
output,
{"text": "MISTER QUILTER IS THE APOSTLE OF THE MIDDLE CLASSES AND WE ARE GLAD TO WELCOME HIS GOSPEL"},
)
@require_torch
def test_chunking_fast(self):
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model="hf-internal-testing/tiny-random-wav2vec2",
chunk_length_s=10.0,
)
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id")
audio = ds[40]["audio"]["array"]
n_repeats = 2
audio_tiled = np.tile(audio, n_repeats)
output = speech_recognizer([audio_tiled], batch_size=2)
self.assertEqual(output, [{"text": ANY(str)}])
self.assertEqual(output[0]["text"][:6], "ZBT ZC")
@require_torch
def test_return_timestamps_ctc_fast(self):
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model="hf-internal-testing/tiny-random-wav2vec2",
)
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id")
# Take short audio to keep the test readable
audio = ds[40]["audio"]["array"][:800]
output = speech_recognizer(audio, return_timestamps="char")
self.assertEqual(
output,
{
"text": "ZBT ZX G",
"chunks": [
{"text": " ", "timestamp": (0.0, 0.012)},
{"text": "Z", "timestamp": (0.012, 0.016)},
{"text": "B", "timestamp": (0.016, 0.02)},
{"text": "T", "timestamp": (0.02, 0.024)},
{"text": " ", "timestamp": (0.024, 0.028)},
{"text": "Z", "timestamp": (0.028, 0.032)},
{"text": "X", "timestamp": (0.032, 0.036)},
{"text": " ", "timestamp": (0.036, 0.04)},
{"text": "G", "timestamp": (0.04, 0.044)},
],
},
)
output = speech_recognizer(audio, return_timestamps="word")
self.assertEqual(
output,
{
"text": "ZBT ZX G",
"chunks": [
{"text": "ZBT", "timestamp": (0.012, 0.024)},
{"text": "ZX", "timestamp": (0.028, 0.036)},
{"text": "G", "timestamp": (0.04, 0.044)},
],
},
)
@require_torch
@require_pyctcdecode
def test_chunking_fast_with_lm(self):
speech_recognizer = pipeline(
model="hf-internal-testing/processor_with_lm",
chunk_length_s=10.0,
)
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id")
audio = ds[40]["audio"]["array"]
n_repeats = 2
audio_tiled = np.tile(audio, n_repeats)
# Batch_size = 1
output1 = speech_recognizer([audio_tiled], batch_size=1)
self.assertEqual(output1, [{"text": ANY(str)}])
self.assertEqual(output1[0]["text"][:6], "<s> <s")
# batch_size = 2
output2 = speech_recognizer([audio_tiled], batch_size=2)
self.assertEqual(output2, [{"text": ANY(str)}])
self.assertEqual(output2[0]["text"][:6], "<s> <s")
# TODO There is an offby one error because of the ratio.
# Maybe logits get affected by the padding on this random
# model is more likely. Add some masking ?
# self.assertEqual(output1, output2)
@require_torch
@require_pyctcdecode
def test_with_lm_fast(self):
speech_recognizer = pipeline(
model="hf-internal-testing/processor_with_lm",
)
self.assertEqual(speech_recognizer.type, "ctc_with_lm")
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id")
audio = ds[40]["audio"]["array"]
n_repeats = 2
audio_tiled = np.tile(audio, n_repeats)
output = speech_recognizer([audio_tiled], batch_size=2)
self.assertEqual(output, [{"text": ANY(str)}])
self.assertEqual(output[0]["text"][:6], "<s> <s")
# Making sure the argument are passed to the decoder
# Since no change happens in the result, check the error comes from
# the `decode_beams` function.
with self.assertRaises(TypeError) as e:
output = speech_recognizer([audio_tiled], decoder_kwargs={"num_beams": 2})
self.assertContains(e.msg, "TypeError: decode_beams() got an unexpected keyword argument 'num_beams'")
output = speech_recognizer([audio_tiled], decoder_kwargs={"beam_width": 2})
@require_torch
@require_pyctcdecode
def test_with_local_lm_fast(self):
local_dir = snapshot_download("hf-internal-testing/processor_with_lm")
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model=local_dir,
)
self.assertEqual(speech_recognizer.type, "ctc_with_lm")
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id")
audio = ds[40]["audio"]["array"]
n_repeats = 2
audio_tiled = np.tile(audio, n_repeats)
output = speech_recognizer([audio_tiled], batch_size=2)
self.assertEqual(output, [{"text": ANY(str)}])
self.assertEqual(output[0]["text"][:6], "<s> <s")
@require_torch
@slow
def test_whisper_longform(self):
# fmt: off
EXPECTED_RESULT = """ Folks, if you watch the show, you know, I spent a lot of time right over there. Patiently and astutely scrutinizing the boxwood and mahogany chest set of the day's biggest stories developing the central headline pawns, definitely maneuvering an oso topical night to F6, fainting a classic Sicilian, nade door variation on the news, all the while seeing eight moves deep and patiently marshalling the latest press releases into a fisher's shows in Lip Nitsky attack that culminates in the elegant lethal slow-played, all-passant checkmate that is my nightly monologue. But sometimes, sometimes, folks, I. CHEERING AND APPLAUSE Sometimes I startle away, cubside down in the monkey bars of a condemned playground on a super fun site. Get all hept up on goofballs. Rummage that were discarded tag bag of defective toys. Yank out of fist bowl of disembodied doll limbs, toss them on a stained kid's place mat from a defunct denny's, set up a table inside a rusty cargo container down by the Wharf and challenged toothless drifters to the godless bughouse blitz of tournament that is my segment. Meanwhile!"""
# fmt: on
processor = AutoProcessor.from_pretrained("openai/whisper-tiny.en")
model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en")
model = model.to("cuda")
pipe = pipeline(
"automatic-speech-recognition",
model=model,
tokenizer=processor.tokenizer,
feature_extractor=processor.feature_extractor,
max_new_tokens=128,
device="cuda:0",
)
ds = load_dataset("distil-whisper/meanwhile", "default")["test"]
ds = ds.cast_column("audio", Audio(sampling_rate=16000))
audio = ds[:1]["audio"]
result = pipe(audio)[0]["text"]
assert result == EXPECTED_RESULT
@require_torch
@slow
def test_seamless_v2(self):
pipe = pipeline(
"automatic-speech-recognition",
model="facebook/seamless-m4t-v2-large",
device="cuda:0",
)
dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
sample = dataset[0]["audio"]
result = pipe(sample, generate_kwargs={"tgt_lang": "eng"})
EXPECTED_RESULT = "mister quilter is the apostle of the middle classes and we are glad to welcome his gospel"
assert result["text"] == EXPECTED_RESULT
@require_torch
@slow
def test_chunking_and_timestamps(self):
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
tokenizer = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h")
feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h")
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model=model,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
framework="pt",
chunk_length_s=10.0,
)
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id")
audio = ds[40]["audio"]["array"]
n_repeats = 10
audio_tiled = np.tile(audio, n_repeats)
output = speech_recognizer([audio_tiled], batch_size=2)
self.assertEqual(output, [{"text": ("A MAN SAID TO THE UNIVERSE SIR I EXIST " * n_repeats).strip()}])
output = speech_recognizer(audio, return_timestamps="char")
self.assertEqual(audio.shape, (74_400,))
self.assertEqual(speech_recognizer.feature_extractor.sampling_rate, 16_000)
# The audio is 74_400 / 16_000 = 4.65s long.
self.assertEqual(
output,
{
"text": "A MAN SAID TO THE UNIVERSE SIR I EXIST",
"chunks": [
{"text": "A", "timestamp": (0.6, 0.62)},
{"text": " ", "timestamp": (0.62, 0.66)},
{"text": "M", "timestamp": (0.68, 0.7)},
{"text": "A", "timestamp": (0.78, 0.8)},
{"text": "N", "timestamp": (0.84, 0.86)},
{"text": " ", "timestamp": (0.92, 0.98)},
{"text": "S", "timestamp": (1.06, 1.08)},
{"text": "A", "timestamp": (1.14, 1.16)},
{"text": "I", "timestamp": (1.16, 1.18)},
{"text": "D", "timestamp": (1.2, 1.24)},
{"text": " ", "timestamp": (1.24, 1.28)},
{"text": "T", "timestamp": (1.28, 1.32)},
{"text": "O", "timestamp": (1.34, 1.36)},
{"text": " ", "timestamp": (1.38, 1.42)},
{"text": "T", "timestamp": (1.42, 1.44)},
{"text": "H", "timestamp": (1.44, 1.46)},
{"text": "E", "timestamp": (1.46, 1.5)},
{"text": " ", "timestamp": (1.5, 1.56)},
{"text": "U", "timestamp": (1.58, 1.62)},
{"text": "N", "timestamp": (1.64, 1.68)},
{"text": "I", "timestamp": (1.7, 1.72)},
{"text": "V", "timestamp": (1.76, 1.78)},
{"text": "E", "timestamp": (1.84, 1.86)},
{"text": "R", "timestamp": (1.86, 1.9)},
{"text": "S", "timestamp": (1.96, 1.98)},
{"text": "E", "timestamp": (1.98, 2.02)},
{"text": " ", "timestamp": (2.02, 2.06)},
{"text": "S", "timestamp": (2.82, 2.86)},
{"text": "I", "timestamp": (2.94, 2.96)},
{"text": "R", "timestamp": (2.98, 3.02)},
{"text": " ", "timestamp": (3.06, 3.12)},
{"text": "I", "timestamp": (3.5, 3.52)},
{"text": " ", "timestamp": (3.58, 3.6)},
{"text": "E", "timestamp": (3.66, 3.68)},
{"text": "X", "timestamp": (3.68, 3.7)},
{"text": "I", "timestamp": (3.9, 3.92)},
{"text": "S", "timestamp": (3.94, 3.96)},
{"text": "T", "timestamp": (4.0, 4.02)},
{"text": " ", "timestamp": (4.06, 4.1)},
],
},
)
output = speech_recognizer(audio, return_timestamps="word")
self.assertEqual(
output,
{
"text": "A MAN SAID TO THE UNIVERSE SIR I EXIST",
"chunks": [
{"text": "A", "timestamp": (0.6, 0.62)},
{"text": "MAN", "timestamp": (0.68, 0.86)},
{"text": "SAID", "timestamp": (1.06, 1.24)},
{"text": "TO", "timestamp": (1.28, 1.36)},
{"text": "THE", "timestamp": (1.42, 1.5)},
{"text": "UNIVERSE", "timestamp": (1.58, 2.02)},
{"text": "SIR", "timestamp": (2.82, 3.02)},
{"text": "I", "timestamp": (3.5, 3.52)},
{"text": "EXIST", "timestamp": (3.66, 4.02)},
],
},
)
output = speech_recognizer(audio, return_timestamps="word", chunk_length_s=2.0)
self.assertEqual(
output,
{
"text": "A MAN SAID TO THE UNIVERSE SIR I EXIST",
"chunks": [
{"text": "A", "timestamp": (0.6, 0.62)},
{"text": "MAN", "timestamp": (0.68, 0.86)},
{"text": "SAID", "timestamp": (1.06, 1.24)},
{"text": "TO", "timestamp": (1.3, 1.36)},
{"text": "THE", "timestamp": (1.42, 1.48)},
{"text": "UNIVERSE", "timestamp": (1.58, 2.02)},
# Tiny change linked to chunking.
{"text": "SIR", "timestamp": (2.84, 3.02)},
{"text": "I", "timestamp": (3.5, 3.52)},
{"text": "EXIST", "timestamp": (3.66, 4.02)},
],
},
)
# CTC models must specify return_timestamps type - cannot set `return_timestamps=True` blindly
with self.assertRaisesRegex(
ValueError,
"^CTC can either predict character level timestamps, or word level timestamps. "
"Set `return_timestamps='char'` or `return_timestamps='word'` as required.$",
):
_ = speech_recognizer(audio, return_timestamps=True)
@require_torch
@slow
def test_chunking_with_lm(self):
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model="patrickvonplaten/wav2vec2-base-100h-with-lm",
chunk_length_s=10.0,
)
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id")
audio = ds[40]["audio"]["array"]
n_repeats = 10
audio = np.tile(audio, n_repeats)
output = speech_recognizer([audio], batch_size=2)
expected_text = "A MAN SAID TO THE UNIVERSE SIR I EXIST " * n_repeats
expected = [{"text": expected_text.strip()}]
self.assertEqual(output, expected)
@require_torch
def test_chunk_iterator(self):
feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h")
inputs = torch.arange(100).long()
ratio = 1
outs = list(chunk_iter(inputs, feature_extractor, 100, 0, 0, ratio))
self.assertEqual(len(outs), 1)
self.assertEqual([o["stride"] for o in outs], [(100, 0, 0)])
self.assertEqual([o["input_values"].shape for o in outs], [(1, 100)])
self.assertEqual([o["is_last"] for o in outs], [True])
# two chunks no stride
outs = list(chunk_iter(inputs, feature_extractor, 50, 0, 0, ratio))
self.assertEqual(len(outs), 2)
self.assertEqual([o["stride"] for o in outs], [(50, 0, 0), (50, 0, 0)])
self.assertEqual([o["input_values"].shape for o in outs], [(1, 50), (1, 50)])
self.assertEqual([o["is_last"] for o in outs], [False, True])
# two chunks incomplete last
outs = list(chunk_iter(inputs, feature_extractor, 80, 0, 0, ratio))
self.assertEqual(len(outs), 2)
self.assertEqual([o["stride"] for o in outs], [(80, 0, 0), (20, 0, 0)])
self.assertEqual([o["input_values"].shape for o in outs], [(1, 80), (1, 20)])
self.assertEqual([o["is_last"] for o in outs], [False, True])
# one chunk since first is also last, because it contains only data
# in the right strided part we just mark that part as non stride
# This test is specifically crafted to trigger a bug if next chunk
# would be ignored by the fact that all the data would be
# contained in the strided left data.
outs = list(chunk_iter(inputs, feature_extractor, 105, 5, 5, ratio))
self.assertEqual(len(outs), 1)
self.assertEqual([o["stride"] for o in outs], [(100, 0, 0)])
self.assertEqual([o["input_values"].shape for o in outs], [(1, 100)])
self.assertEqual([o["is_last"] for o in outs], [True])
@require_torch
def test_chunk_iterator_stride(self):
feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h")
inputs = torch.arange(100).long()
input_values = feature_extractor(inputs, sampling_rate=feature_extractor.sampling_rate, return_tensors="pt")[
"input_values"
]
ratio = 1
outs = list(chunk_iter(inputs, feature_extractor, 100, 20, 10, ratio))
self.assertEqual(len(outs), 2)
self.assertEqual([o["stride"] for o in outs], [(100, 0, 10), (30, 20, 0)])
self.assertEqual([o["input_values"].shape for o in outs], [(1, 100), (1, 30)])
self.assertEqual([o["is_last"] for o in outs], [False, True])
outs = list(chunk_iter(inputs, feature_extractor, 80, 20, 10, ratio))
self.assertEqual(len(outs), 2)
self.assertEqual([o["stride"] for o in outs], [(80, 0, 10), (50, 20, 0)])
self.assertEqual([o["input_values"].shape for o in outs], [(1, 80), (1, 50)])
self.assertEqual([o["is_last"] for o in outs], [False, True])
outs = list(chunk_iter(inputs, feature_extractor, 90, 20, 0, ratio))
self.assertEqual(len(outs), 2)
self.assertEqual([o["stride"] for o in outs], [(90, 0, 0), (30, 20, 0)])
self.assertEqual([o["input_values"].shape for o in outs], [(1, 90), (1, 30)])
outs = list(chunk_iter(inputs, feature_extractor, 36, 6, 6, ratio))
self.assertEqual(len(outs), 4)
self.assertEqual([o["stride"] for o in outs], [(36, 0, 6), (36, 6, 6), (36, 6, 6), (28, 6, 0)])
self.assertEqual([o["input_values"].shape for o in outs], [(1, 36), (1, 36), (1, 36), (1, 28)])
inputs = torch.LongTensor([i % 2 for i in range(100)])
input_values = feature_extractor(inputs, sampling_rate=feature_extractor.sampling_rate, return_tensors="pt")[
"input_values"
]
outs = list(chunk_iter(inputs, feature_extractor, 30, 5, 5, ratio))
self.assertEqual(len(outs), 5)
self.assertEqual([o["stride"] for o in outs], [(30, 0, 5), (30, 5, 5), (30, 5, 5), (30, 5, 5), (20, 5, 0)])
self.assertEqual([o["input_values"].shape for o in outs], [(1, 30), (1, 30), (1, 30), (1, 30), (1, 20)])
self.assertEqual([o["is_last"] for o in outs], [False, False, False, False, True])
# (0, 25)
self.assertEqual(nested_simplify(input_values[:, :30]), nested_simplify(outs[0]["input_values"]))
# (25, 45)
self.assertEqual(nested_simplify(input_values[:, 20:50]), nested_simplify(outs[1]["input_values"]))
# (45, 65)
self.assertEqual(nested_simplify(input_values[:, 40:70]), nested_simplify(outs[2]["input_values"]))
# (65, 85)
self.assertEqual(nested_simplify(input_values[:, 60:90]), nested_simplify(outs[3]["input_values"]))
# (85, 100)
self.assertEqual(nested_simplify(input_values[:, 80:100]), nested_simplify(outs[4]["input_values"]))
@require_torch
def test_stride(self):
speech_recognizer = pipeline(
task="automatic-speech-recognition",
model="hf-internal-testing/tiny-random-wav2vec2",
)
waveform = np.tile(np.arange(1000, dtype=np.float32), 10)
output = speech_recognizer({"raw": waveform, "stride": (0, 0), "sampling_rate": 16_000})
self.assertEqual(output, {"text": "OB XB B EB BB B EB B OB X"})
# 0 effective ids Just take the middle one
output = speech_recognizer({"raw": waveform, "stride": (5000, 5000), "sampling_rate": 16_000})
self.assertEqual(output, {"text": ""})
# Only 1 arange.
output = speech_recognizer({"raw": waveform, "stride": (0, 9000), "sampling_rate": 16_000})
self.assertEqual(output, {"text": "OB"})
# 2nd arange
output = speech_recognizer({"raw": waveform, "stride": (1000, 8000), "sampling_rate": 16_000})
self.assertEqual(output, {"text": "XB"})
@slow
@require_torch_accelerator
def test_slow_unfinished_sequence(self):
from transformers import GenerationConfig
pipe = pipeline(
"automatic-speech-recognition",
model="vasista22/whisper-hindi-large-v2",
device=torch_device,
)
# Original model wasn't trained with timestamps and has incorrect generation config
pipe.model.generation_config = GenerationConfig.from_pretrained("openai/whisper-large-v2")
audio = hf_hub_download("Narsil/asr_dummy", filename="hindi.ogg", repo_type="dataset")
out = pipe(
audio,
return_timestamps=True,
)
self.assertEqual(
out,
{
"chunks": [
{"text": "", "timestamp": (18.94, 0.02)},
{"text": "मिर्ची में कितने विभिन्न प्रजातियां हैं", "timestamp": (None, None)},
],
"text": "मिर्ची में कितने विभिन्न प्रजातियां हैं",
},
)
def require_ffmpeg(test_case):
"""
Decorator marking a test that requires FFmpeg.
These tests are skipped when FFmpeg isn't installed.
"""
import subprocess
try:
subprocess.check_output(["ffmpeg", "-h"], stderr=subprocess.DEVNULL)
return test_case
except Exception:
return unittest.skip("test requires ffmpeg")(test_case)
def bytes_iter(chunk_size, chunks):
for i in range(chunks):
yield bytes(range(i * chunk_size, (i + 1) * chunk_size))
@require_ffmpeg
class AudioUtilsTest(unittest.TestCase):
def test_chunk_bytes_iter_too_big(self):
iter_ = iter(chunk_bytes_iter(bytes_iter(chunk_size=3, chunks=2), 10, stride=(0, 0)))
self.assertEqual(next(iter_), {"raw": b"\x00\x01\x02\x03\x04\x05", "stride": (0, 0)})
with self.assertRaises(StopIteration):
next(iter_)
def test_chunk_bytes_iter(self):
iter_ = iter(chunk_bytes_iter(bytes_iter(chunk_size=3, chunks=2), 3, stride=(0, 0)))
self.assertEqual(next(iter_), {"raw": b"\x00\x01\x02", "stride": (0, 0)})
self.assertEqual(next(iter_), {"raw": b"\x03\x04\x05", "stride": (0, 0)})
with self.assertRaises(StopIteration):
next(iter_)
def test_chunk_bytes_iter_stride(self):
iter_ = iter(chunk_bytes_iter(bytes_iter(chunk_size=3, chunks=2), 3, stride=(1, 1)))
self.assertEqual(next(iter_), {"raw": b"\x00\x01\x02", "stride": (0, 1)})
self.assertEqual(next(iter_), {"raw": b"\x01\x02\x03", "stride": (1, 1)})
self.assertEqual(next(iter_), {"raw": b"\x02\x03\x04", "stride": (1, 1)})
# This is finished, but the chunk_bytes doesn't know it yet.
self.assertEqual(next(iter_), {"raw": b"\x03\x04\x05", "stride": (1, 1)})
self.assertEqual(next(iter_), {"raw": b"\x04\x05", "stride": (1, 0)})
with self.assertRaises(StopIteration):
next(iter_)
def test_chunk_bytes_iter_stride_stream(self):
iter_ = iter(chunk_bytes_iter(bytes_iter(chunk_size=3, chunks=2), 5, stride=(1, 1), stream=True))
self.assertEqual(next(iter_), {"raw": b"\x00\x01\x02", "stride": (0, 0), "partial": True})
self.assertEqual(next(iter_), {"raw": b"\x00\x01\x02\x03\x04", "stride": (0, 1), "partial": False})
self.assertEqual(next(iter_), {"raw": b"\x03\x04\x05", "stride": (1, 0), "partial": False})
with self.assertRaises(StopIteration):
next(iter_)
iter_ = iter(chunk_bytes_iter(bytes_iter(chunk_size=3, chunks=3), 5, stride=(1, 1), stream=True))
self.assertEqual(next(iter_), {"raw": b"\x00\x01\x02", "stride": (0, 0), "partial": True})
self.assertEqual(next(iter_), {"raw": b"\x00\x01\x02\x03\x04", "stride": (0, 1), "partial": False})
self.assertEqual(next(iter_), {"raw": b"\x03\x04\x05\x06\x07", "stride": (1, 1), "partial": False})
self.assertEqual(next(iter_), {"raw": b"\x06\x07\x08", "stride": (1, 0), "partial": False})
with self.assertRaises(StopIteration):
next(iter_)
iter_ = iter(chunk_bytes_iter(bytes_iter(chunk_size=3, chunks=3), 10, stride=(1, 1), stream=True))
self.assertEqual(next(iter_), {"raw": b"\x00\x01\x02", "stride": (0, 0), "partial": True})
self.assertEqual(next(iter_), {"raw": b"\x00\x01\x02\x03\x04\x05", "stride": (0, 0), "partial": True})
self.assertEqual(
next(iter_), {"raw": b"\x00\x01\x02\x03\x04\x05\x06\x07\x08", "stride": (0, 0), "partial": True}
)
self.assertEqual(
next(iter_), {"raw": b"\x00\x01\x02\x03\x04\x05\x06\x07\x08", "stride": (0, 0), "partial": False}
)
with self.assertRaises(StopIteration):
next(iter_)
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/pipelines/test_pipelines_image_classification.py | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
PreTrainedTokenizerBase,
is_vision_available,
)
from transformers.pipelines import ImageClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class Image:
@staticmethod
def open(*args, **kwargs):
pass
@is_pipeline_test
@require_torch_or_tf
@require_vision
class ImageClassificationPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
tf_model_mapping = TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
def get_test_pipeline(self, model, tokenizer, processor):
image_classifier = ImageClassificationPipeline(model=model, image_processor=processor, top_k=2)
examples = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"),
"http://images.cocodataset.org/val2017/000000039769.jpg",
]
return image_classifier, examples
def run_pipeline_test(self, image_classifier, examples):
outputs = image_classifier("./tests/fixtures/tests_samples/COCO/000000039769.png")
self.assertEqual(
outputs,
[
{"score": ANY(float), "label": ANY(str)},
{"score": ANY(float), "label": ANY(str)},
],
)
import datasets
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test")
# Accepts URL + PIL.Image + lists
outputs = image_classifier(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
)
self.assertEqual(
outputs,
[
[
{"score": ANY(float), "label": ANY(str)},
{"score": ANY(float), "label": ANY(str)},
],
[
{"score": ANY(float), "label": ANY(str)},
{"score": ANY(float), "label": ANY(str)},
],
[
{"score": ANY(float), "label": ANY(str)},
{"score": ANY(float), "label": ANY(str)},
],
[
{"score": ANY(float), "label": ANY(str)},
{"score": ANY(float), "label": ANY(str)},
],
[
{"score": ANY(float), "label": ANY(str)},
{"score": ANY(float), "label": ANY(str)},
],
],
)
@require_torch
def test_small_model_pt(self):
small_model = "hf-internal-testing/tiny-random-vit"
image_classifier = pipeline("image-classification", model=small_model)
outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg")
self.assertEqual(
nested_simplify(outputs, decimals=4),
[{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}],
)
outputs = image_classifier(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
],
top_k=2,
)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
[{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}],
[{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}],
],
)
@require_tf
def test_small_model_tf(self):
small_model = "hf-internal-testing/tiny-random-vit"
image_classifier = pipeline("image-classification", model=small_model, framework="tf")
outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg")
self.assertEqual(
nested_simplify(outputs, decimals=4),
[{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}],
)
outputs = image_classifier(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
],
top_k=2,
)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
[{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}],
[{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}],
],
)
def test_custom_tokenizer(self):
tokenizer = PreTrainedTokenizerBase()
# Assert that the pipeline can be initialized with a feature extractor that is not in any mapping
image_classifier = pipeline(
"image-classification", model="hf-internal-testing/tiny-random-vit", tokenizer=tokenizer
)
self.assertIs(image_classifier.tokenizer, tokenizer)
@slow
@require_torch
def test_perceiver(self):
# Perceiver is not tested by `run_pipeline_test` properly.
# That is because the type of feature_extractor and model preprocessor need to be kept
# in sync, which is not the case in the current design
image_classifier = pipeline("image-classification", model="deepmind/vision-perceiver-conv")
outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg")
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{"score": 0.4385, "label": "tabby, tabby cat"},
{"score": 0.321, "label": "tiger cat"},
{"score": 0.0502, "label": "Egyptian cat"},
{"score": 0.0137, "label": "crib, cot"},
{"score": 0.007, "label": "radiator"},
],
)
image_classifier = pipeline("image-classification", model="deepmind/vision-perceiver-fourier")
outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg")
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{"score": 0.5658, "label": "tabby, tabby cat"},
{"score": 0.1309, "label": "tiger cat"},
{"score": 0.0722, "label": "Egyptian cat"},
{"score": 0.0707, "label": "remote control, remote"},
{"score": 0.0082, "label": "computer keyboard, keypad"},
],
)
image_classifier = pipeline("image-classification", model="deepmind/vision-perceiver-learned")
outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg")
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{"score": 0.3022, "label": "tabby, tabby cat"},
{"score": 0.2362, "label": "Egyptian cat"},
{"score": 0.1856, "label": "tiger cat"},
{"score": 0.0324, "label": "remote control, remote"},
{"score": 0.0096, "label": "quilt, comforter, comfort, puff"},
],
)
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/pipelines/test_pipelines_zero_shot_object_detection.py | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class Image:
@staticmethod
def open(*args, **kwargs):
pass
@is_pipeline_test
@require_vision
@require_torch
class ZeroShotObjectDetectionPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def get_test_pipeline(self, model, tokenizer, processor):
object_detector = pipeline(
"zero-shot-object-detection", model="hf-internal-testing/tiny-random-owlvit-object-detection"
)
examples = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def run_pipeline_test(self, object_detector, examples):
outputs = object_detector(examples[0], threshold=0.0)
n = len(outputs)
self.assertGreater(n, 0)
self.assertEqual(
outputs,
[
{
"score": ANY(float),
"label": ANY(str),
"box": {"xmin": ANY(int), "ymin": ANY(int), "xmax": ANY(int), "ymax": ANY(int)},
}
for i in range(n)
],
)
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF")
def test_small_model_tf(self):
pass
@require_torch
def test_small_model_pt(self):
object_detector = pipeline(
"zero-shot-object-detection", model="hf-internal-testing/tiny-random-owlvit-object-detection"
)
outputs = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png",
candidate_labels=["cat", "remote", "couch"],
threshold=0.64,
)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
],
)
outputs = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
],
threshold=0.64,
)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
[
{"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
]
],
)
@require_torch
@slow
def test_large_model_pt(self):
object_detector = pipeline("zero-shot-object-detection")
outputs = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg",
candidate_labels=["cat", "remote", "couch"],
)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
)
outputs = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
],
)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
],
)
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF")
def test_large_model_tf(self):
pass
@require_torch
@slow
def test_threshold(self):
threshold = 0.2
object_detector = pipeline("zero-shot-object-detection")
outputs = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg",
candidate_labels=["cat", "remote", "couch"],
threshold=threshold,
)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
],
)
@require_torch
@slow
def test_top_k(self):
top_k = 2
object_detector = pipeline("zero-shot-object-detection")
outputs = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg",
candidate_labels=["cat", "remote", "couch"],
top_k=top_k,
)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
],
)
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/pipelines/test_pipelines_zero_shot_image_classification.py | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class Image:
@staticmethod
def open(*args, **kwargs):
pass
@is_pipeline_test
@require_vision
class ZeroShotImageClassificationPipelineTests(unittest.TestCase):
# Deactivating auto tests since we don't have a good MODEL_FOR_XX mapping,
# and only CLIP would be there for now.
# model_mapping = {CLIPConfig: CLIPModel}
# def get_test_pipeline(self, model, tokenizer, processor):
# if tokenizer is None:
# # Side effect of no Fast Tokenizer class for these model, so skipping
# # But the slow tokenizer test should still run as they're quite small
# self.skipTest("No tokenizer available")
# return
# # return None, None
# image_classifier = ZeroShotImageClassificationPipeline(
# model=model, tokenizer=tokenizer, feature_extractor=processor
# )
# # test with a raw waveform
# image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
# image2 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
# return image_classifier, [image, image2]
# def run_pipeline_test(self, pipe, examples):
# image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
# outputs = pipe(image, candidate_labels=["A", "B"])
# self.assertEqual(outputs, {"text": ANY(str)})
# # Batching
# outputs = pipe([image] * 3, batch_size=2, candidate_labels=["A", "B"])
@require_torch
def test_small_model_pt(self):
image_classifier = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification",
)
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
output = image_classifier(image, candidate_labels=["a", "b", "c"])
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(output),
[
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
[{"score": 0.333, "label": "b"}, {"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}],
],
)
output = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2)
self.assertEqual(
nested_simplify(output),
# Pipeline outputs are supposed to be deterministic and
# So we could in theory have real values "A", "B", "C" instead
# of ANY(str).
# However it seems that in this particular case, the floating
# scores are so close, we enter floating error approximation
# and the order is not guaranteed anymore with batching.
[
[
{"score": 0.333, "label": ANY(str)},
{"score": 0.333, "label": ANY(str)},
{"score": 0.333, "label": ANY(str)},
],
[
{"score": 0.333, "label": ANY(str)},
{"score": 0.333, "label": ANY(str)},
{"score": 0.333, "label": ANY(str)},
],
[
{"score": 0.333, "label": ANY(str)},
{"score": 0.333, "label": ANY(str)},
{"score": 0.333, "label": ANY(str)},
],
[
{"score": 0.333, "label": ANY(str)},
{"score": 0.333, "label": ANY(str)},
{"score": 0.333, "label": ANY(str)},
],
[
{"score": 0.333, "label": ANY(str)},
{"score": 0.333, "label": ANY(str)},
{"score": 0.333, "label": ANY(str)},
],
],
)
@require_tf
def test_small_model_tf(self):
image_classifier = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", framework="tf"
)
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
output = image_classifier(image, candidate_labels=["a", "b", "c"])
self.assertEqual(
nested_simplify(output),
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
)
output = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2)
self.assertEqual(
nested_simplify(output),
# Pipeline outputs are supposed to be deterministic and
# So we could in theory have real values "A", "B", "C" instead
# of ANY(str).
# However it seems that in this particular case, the floating
# scores are so close, we enter floating error approximation
# and the order is not guaranteed anymore with batching.
[
[
{"score": 0.333, "label": ANY(str)},
{"score": 0.333, "label": ANY(str)},
{"score": 0.333, "label": ANY(str)},
],
[
{"score": 0.333, "label": ANY(str)},
{"score": 0.333, "label": ANY(str)},
{"score": 0.333, "label": ANY(str)},
],
[
{"score": 0.333, "label": ANY(str)},
{"score": 0.333, "label": ANY(str)},
{"score": 0.333, "label": ANY(str)},
],
[
{"score": 0.333, "label": ANY(str)},
{"score": 0.333, "label": ANY(str)},
{"score": 0.333, "label": ANY(str)},
],
[
{"score": 0.333, "label": ANY(str)},
{"score": 0.333, "label": ANY(str)},
{"score": 0.333, "label": ANY(str)},
],
],
)
@slow
@require_torch
def test_large_model_pt(self):
image_classifier = pipeline(
task="zero-shot-image-classification",
model="openai/clip-vit-base-patch32",
)
# This is an image of 2 cats with remotes and no planes
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
output = image_classifier(image, candidate_labels=["cat", "plane", "remote"])
self.assertEqual(
nested_simplify(output),
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
)
output = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2)
self.assertEqual(
nested_simplify(output),
[
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5,
)
@slow
@require_tf
def test_large_model_tf(self):
image_classifier = pipeline(
task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", framework="tf"
)
# This is an image of 2 cats with remotes and no planes
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
output = image_classifier(image, candidate_labels=["cat", "plane", "remote"])
self.assertEqual(
nested_simplify(output),
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
)
output = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2)
self.assertEqual(
nested_simplify(output),
[
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5,
)
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/pipelines/test_pipelines_table_question_answering.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import (
MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING,
AutoModelForTableQuestionAnswering,
AutoTokenizer,
TableQuestionAnsweringPipeline,
TFAutoModelForTableQuestionAnswering,
is_torch_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
require_pandas,
require_tensorflow_probability,
require_tf,
require_torch,
slow,
)
if is_torch_available():
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_12
else:
is_torch_greater_or_equal_than_1_12 = False
@is_pipeline_test
class TQAPipelineTests(unittest.TestCase):
# Putting it there for consistency, but TQA do not have fast tokenizer
# which are needed to generate automatic tests
model_mapping = MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING
@require_tensorflow_probability
@require_pandas
@require_tf
@require_torch
def test_small_model_tf(self):
model_id = "lysandre/tiny-tapas-random-wtq"
model = TFAutoModelForTableQuestionAnswering.from_pretrained(model_id, from_pt=True)
tokenizer = AutoTokenizer.from_pretrained(model_id)
self.assertIsInstance(model.config.aggregation_labels, dict)
self.assertIsInstance(model.config.no_aggregation_label_index, int)
table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer)
outputs = table_querier(
table={
"actors": ["brad pitt", "leonardo di caprio", "george clooney"],
"age": ["56", "45", "59"],
"number of movies": ["87", "53", "69"],
"date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
},
query="how many movies has george clooney played in?",
)
self.assertEqual(
outputs,
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
)
outputs = table_querier(
table={
"actors": ["brad pitt", "leonardo di caprio", "george clooney"],
"age": ["56", "45", "59"],
"number of movies": ["87", "53", "69"],
"date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
},
query=["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"],
)
self.assertEqual(
outputs,
[
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
],
)
outputs = table_querier(
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
query=[
"What repository has the largest number of stars?",
"Given that the numbers of stars defines if a repository is active, what repository is the most"
" active?",
"What is the number of repositories?",
"What is the average number of stars?",
"What is the total amount of stars?",
],
)
self.assertEqual(
outputs,
[
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
],
)
with self.assertRaises(ValueError):
table_querier(query="What does it do with empty context ?", table=None)
with self.assertRaises(ValueError):
table_querier(query="What does it do with empty context ?", table="")
with self.assertRaises(ValueError):
table_querier(query="What does it do with empty context ?", table={})
with self.assertRaises(ValueError):
table_querier(
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
}
)
with self.assertRaises(ValueError):
table_querier(
query="",
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
)
with self.assertRaises(ValueError):
table_querier(
query=None,
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
)
@unittest.skipIf(not is_torch_greater_or_equal_than_1_12, reason="Tapas is only available in torch v1.12+")
@require_torch
def test_small_model_pt(self):
model_id = "lysandre/tiny-tapas-random-wtq"
model = AutoModelForTableQuestionAnswering.from_pretrained(model_id)
tokenizer = AutoTokenizer.from_pretrained(model_id)
self.assertIsInstance(model.config.aggregation_labels, dict)
self.assertIsInstance(model.config.no_aggregation_label_index, int)
table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer)
outputs = table_querier(
table={
"actors": ["brad pitt", "leonardo di caprio", "george clooney"],
"age": ["56", "45", "59"],
"number of movies": ["87", "53", "69"],
"date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
},
query="how many movies has george clooney played in?",
)
self.assertEqual(
outputs,
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
)
outputs = table_querier(
table={
"actors": ["brad pitt", "leonardo di caprio", "george clooney"],
"age": ["56", "45", "59"],
"number of movies": ["87", "53", "69"],
"date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
},
query=["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"],
)
self.assertEqual(
outputs,
[
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
],
)
outputs = table_querier(
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
query=[
"What repository has the largest number of stars?",
"Given that the numbers of stars defines if a repository is active, what repository is the most"
" active?",
"What is the number of repositories?",
"What is the average number of stars?",
"What is the total amount of stars?",
],
)
self.assertEqual(
outputs,
[
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
],
)
with self.assertRaises(ValueError):
table_querier(query="What does it do with empty context ?", table=None)
with self.assertRaises(ValueError):
table_querier(query="What does it do with empty context ?", table="")
with self.assertRaises(ValueError):
table_querier(query="What does it do with empty context ?", table={})
with self.assertRaises(ValueError):
table_querier(
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
}
)
with self.assertRaises(ValueError):
table_querier(
query="",
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
)
with self.assertRaises(ValueError):
table_querier(
query=None,
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
)
@unittest.skipIf(not is_torch_greater_or_equal_than_1_12, reason="Tapas is only available in torch v1.12+")
@require_torch
def test_slow_tokenizer_sqa_pt(self):
model_id = "lysandre/tiny-tapas-random-sqa"
model = AutoModelForTableQuestionAnswering.from_pretrained(model_id)
tokenizer = AutoTokenizer.from_pretrained(model_id)
table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer)
inputs = {
"table": {
"actors": ["brad pitt", "leonardo di caprio", "george clooney"],
"age": ["56", "45", "59"],
"number of movies": ["87", "53", "69"],
"date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
},
"query": ["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"],
}
sequential_outputs = table_querier(**inputs, sequential=True)
batch_outputs = table_querier(**inputs, sequential=False)
self.assertEqual(len(sequential_outputs), 3)
self.assertEqual(len(batch_outputs), 3)
self.assertEqual(sequential_outputs[0], batch_outputs[0])
self.assertNotEqual(sequential_outputs[1], batch_outputs[1])
# self.assertNotEqual(sequential_outputs[2], batch_outputs[2])
table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer)
outputs = table_querier(
table={
"actors": ["brad pitt", "leonardo di caprio", "george clooney"],
"age": ["56", "45", "59"],
"number of movies": ["87", "53", "69"],
"date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
},
query="how many movies has george clooney played in?",
)
self.assertEqual(
outputs,
{"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]},
)
outputs = table_querier(
table={
"actors": ["brad pitt", "leonardo di caprio", "george clooney"],
"age": ["56", "45", "59"],
"number of movies": ["87", "53", "69"],
"date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
},
query=["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"],
)
self.assertEqual(
outputs,
[
{"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]},
{"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]},
{"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]},
],
)
outputs = table_querier(
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
query=[
"What repository has the largest number of stars?",
"Given that the numbers of stars defines if a repository is active, what repository is the most"
" active?",
"What is the number of repositories?",
"What is the average number of stars?",
"What is the total amount of stars?",
],
)
self.assertEqual(
outputs,
[
{"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]},
{"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]},
{"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]},
{"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]},
{"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]},
],
)
with self.assertRaises(ValueError):
table_querier(query="What does it do with empty context ?", table=None)
with self.assertRaises(ValueError):
table_querier(query="What does it do with empty context ?", table="")
with self.assertRaises(ValueError):
table_querier(query="What does it do with empty context ?", table={})
with self.assertRaises(ValueError):
table_querier(
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
}
)
with self.assertRaises(ValueError):
table_querier(
query="",
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
)
with self.assertRaises(ValueError):
table_querier(
query=None,
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
)
@require_tf
@require_tensorflow_probability
@require_pandas
@require_torch
def test_slow_tokenizer_sqa_tf(self):
model_id = "lysandre/tiny-tapas-random-sqa"
model = TFAutoModelForTableQuestionAnswering.from_pretrained(model_id, from_pt=True)
tokenizer = AutoTokenizer.from_pretrained(model_id)
table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer)
inputs = {
"table": {
"actors": ["brad pitt", "leonardo di caprio", "george clooney"],
"age": ["56", "45", "59"],
"number of movies": ["87", "53", "69"],
"date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
},
"query": ["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"],
}
sequential_outputs = table_querier(**inputs, sequential=True)
batch_outputs = table_querier(**inputs, sequential=False)
self.assertEqual(len(sequential_outputs), 3)
self.assertEqual(len(batch_outputs), 3)
self.assertEqual(sequential_outputs[0], batch_outputs[0])
self.assertNotEqual(sequential_outputs[1], batch_outputs[1])
# self.assertNotEqual(sequential_outputs[2], batch_outputs[2])
table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer)
outputs = table_querier(
table={
"actors": ["brad pitt", "leonardo di caprio", "george clooney"],
"age": ["56", "45", "59"],
"number of movies": ["87", "53", "69"],
"date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
},
query="how many movies has george clooney played in?",
)
self.assertEqual(
outputs,
{"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]},
)
outputs = table_querier(
table={
"actors": ["brad pitt", "leonardo di caprio", "george clooney"],
"age": ["56", "45", "59"],
"number of movies": ["87", "53", "69"],
"date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
},
query=["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"],
)
self.assertEqual(
outputs,
[
{"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]},
{"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]},
{"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]},
],
)
outputs = table_querier(
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
query=[
"What repository has the largest number of stars?",
"Given that the numbers of stars defines if a repository is active, what repository is the most"
" active?",
"What is the number of repositories?",
"What is the average number of stars?",
"What is the total amount of stars?",
],
)
self.assertEqual(
outputs,
[
{"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]},
{"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]},
{"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]},
{"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]},
{"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]},
],
)
with self.assertRaises(ValueError):
table_querier(query="What does it do with empty context ?", table=None)
with self.assertRaises(ValueError):
table_querier(query="What does it do with empty context ?", table="")
with self.assertRaises(ValueError):
table_querier(query="What does it do with empty context ?", table={})
with self.assertRaises(ValueError):
table_querier(
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
}
)
with self.assertRaises(ValueError):
table_querier(
query="",
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
)
with self.assertRaises(ValueError):
table_querier(
query=None,
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
)
@unittest.skipIf(not is_torch_greater_or_equal_than_1_12, reason="Tapas is only available in torch v1.12+")
@slow
@require_torch
def test_integration_wtq_pt(self):
table_querier = pipeline("table-question-answering")
data = {
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
}
queries = [
"What repository has the largest number of stars?",
"Given that the numbers of stars defines if a repository is active, what repository is the most active?",
"What is the number of repositories?",
"What is the average number of stars?",
"What is the total amount of stars?",
]
results = table_querier(data, queries)
expected_results = [
{"answer": "Transformers", "coordinates": [(0, 0)], "cells": ["Transformers"], "aggregator": "NONE"},
{"answer": "Transformers", "coordinates": [(0, 0)], "cells": ["Transformers"], "aggregator": "NONE"},
{
"answer": "COUNT > Transformers, Datasets, Tokenizers",
"coordinates": [(0, 0), (1, 0), (2, 0)],
"cells": ["Transformers", "Datasets", "Tokenizers"],
"aggregator": "COUNT",
},
{
"answer": "AVERAGE > 36542, 4512, 3934",
"coordinates": [(0, 1), (1, 1), (2, 1)],
"cells": ["36542", "4512", "3934"],
"aggregator": "AVERAGE",
},
{
"answer": "SUM > 36542, 4512, 3934",
"coordinates": [(0, 1), (1, 1), (2, 1)],
"cells": ["36542", "4512", "3934"],
"aggregator": "SUM",
},
]
self.assertListEqual(results, expected_results)
@slow
@require_tensorflow_probability
@require_pandas
def test_integration_wtq_tf(self):
model_id = "google/tapas-base-finetuned-wtq"
model = TFAutoModelForTableQuestionAnswering.from_pretrained(model_id)
tokenizer = AutoTokenizer.from_pretrained(model_id)
table_querier = pipeline("table-question-answering", model=model, tokenizer=tokenizer)
data = {
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
}
queries = [
"What repository has the largest number of stars?",
"Given that the numbers of stars defines if a repository is active, what repository is the most active?",
"What is the number of repositories?",
"What is the average number of stars?",
"What is the total amount of stars?",
]
results = table_querier(data, queries)
expected_results = [
{"answer": "Transformers", "coordinates": [(0, 0)], "cells": ["Transformers"], "aggregator": "NONE"},
{"answer": "Transformers", "coordinates": [(0, 0)], "cells": ["Transformers"], "aggregator": "NONE"},
{
"answer": "COUNT > Transformers, Datasets, Tokenizers",
"coordinates": [(0, 0), (1, 0), (2, 0)],
"cells": ["Transformers", "Datasets", "Tokenizers"],
"aggregator": "COUNT",
},
{
"answer": "AVERAGE > 36542, 4512, 3934",
"coordinates": [(0, 1), (1, 1), (2, 1)],
"cells": ["36542", "4512", "3934"],
"aggregator": "AVERAGE",
},
{
"answer": "SUM > 36542, 4512, 3934",
"coordinates": [(0, 1), (1, 1), (2, 1)],
"cells": ["36542", "4512", "3934"],
"aggregator": "SUM",
},
]
self.assertListEqual(results, expected_results)
@unittest.skipIf(not is_torch_greater_or_equal_than_1_12, reason="Tapas is only available in torch v1.12+")
@slow
@require_torch
def test_integration_sqa_pt(self):
table_querier = pipeline(
"table-question-answering",
model="google/tapas-base-finetuned-sqa",
tokenizer="google/tapas-base-finetuned-sqa",
)
data = {
"Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"],
"Age": ["56", "45", "59"],
"Number of movies": ["87", "53", "69"],
"Date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
}
queries = ["How many movies has George Clooney played in?", "How old is he?", "What's his date of birth?"]
results = table_querier(data, queries, sequential=True)
expected_results = [
{"answer": "69", "coordinates": [(2, 2)], "cells": ["69"]},
{"answer": "59", "coordinates": [(2, 1)], "cells": ["59"]},
{"answer": "28 november 1967", "coordinates": [(2, 3)], "cells": ["28 november 1967"]},
]
self.assertListEqual(results, expected_results)
@slow
@require_tensorflow_probability
@require_pandas
def test_integration_sqa_tf(self):
model_id = "google/tapas-base-finetuned-sqa"
model = TFAutoModelForTableQuestionAnswering.from_pretrained(model_id)
tokenizer = AutoTokenizer.from_pretrained(model_id)
table_querier = pipeline(
"table-question-answering",
model=model,
tokenizer=tokenizer,
)
data = {
"Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"],
"Age": ["56", "45", "59"],
"Number of movies": ["87", "53", "69"],
"Date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
}
queries = ["How many movies has George Clooney played in?", "How old is he?", "What's his date of birth?"]
results = table_querier(data, queries, sequential=True)
expected_results = [
{"answer": "69", "coordinates": [(2, 2)], "cells": ["69"]},
{"answer": "59", "coordinates": [(2, 1)], "cells": ["59"]},
{"answer": "28 november 1967", "coordinates": [(2, 3)], "cells": ["28 november 1967"]},
]
self.assertListEqual(results, expected_results)
@slow
@require_torch
def test_large_model_pt_tapex(self):
model_id = "microsoft/tapex-large-finetuned-wtq"
table_querier = pipeline(
"table-question-answering",
model=model_id,
)
data = {
"Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"],
"Age": ["56", "45", "59"],
"Number of movies": ["87", "53", "69"],
"Date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
}
queries = [
"How many movies has George Clooney played in?",
"How old is Mr Clooney ?",
"What's the date of birth of Leonardo ?",
]
results = table_querier(data, queries, sequential=True)
expected_results = [
{"answer": " 69"},
{"answer": " 59"},
{"answer": " 10 june 1996"},
]
self.assertListEqual(results, expected_results)
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/pipelines/test_pipelines_image_to_text.py | # Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import requests
from transformers import MODEL_FOR_VISION_2_SEQ_MAPPING, TF_MODEL_FOR_VISION_2_SEQ_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
is_torch_greater_or_equal_than_1_11 = False
if is_vision_available():
from PIL import Image
else:
class Image:
@staticmethod
def open(*args, **kwargs):
pass
@is_pipeline_test
@require_vision
class ImageToTextPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_VISION_2_SEQ_MAPPING
tf_model_mapping = TF_MODEL_FOR_VISION_2_SEQ_MAPPING
def get_test_pipeline(self, model, tokenizer, processor):
pipe = pipeline("image-to-text", model=model, tokenizer=tokenizer, image_processor=processor)
examples = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"),
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
return pipe, examples
def run_pipeline_test(self, pipe, examples):
outputs = pipe(examples)
self.assertEqual(
outputs,
[
[{"generated_text": ANY(str)}],
[{"generated_text": ANY(str)}],
],
)
@require_tf
def test_small_model_tf(self):
pipe = pipeline("image-to-text", model="hf-internal-testing/tiny-random-vit-gpt2", framework="tf")
image = "./tests/fixtures/tests_samples/COCO/000000039769.png"
outputs = pipe(image)
self.assertEqual(
outputs,
[
{
"generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO"
},
],
)
outputs = pipe([image, image])
self.assertEqual(
outputs,
[
[
{
"generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO"
}
],
[
{
"generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO"
}
],
],
)
outputs = pipe(image, max_new_tokens=1)
self.assertEqual(
outputs,
[{"generated_text": "growth"}],
)
@require_torch
def test_small_model_pt(self):
pipe = pipeline("image-to-text", model="hf-internal-testing/tiny-random-vit-gpt2")
image = "./tests/fixtures/tests_samples/COCO/000000039769.png"
outputs = pipe(image)
self.assertEqual(
outputs,
[
{
"generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO"
},
],
)
outputs = pipe([image, image])
self.assertEqual(
outputs,
[
[
{
"generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO"
}
],
[
{
"generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO"
}
],
],
)
@require_torch
def test_small_model_pt_conditional(self):
pipe = pipeline("image-to-text", model="hf-internal-testing/tiny-random-BlipForConditionalGeneration")
image = "./tests/fixtures/tests_samples/COCO/000000039769.png"
prompt = "a photo of"
outputs = pipe(image, prompt=prompt)
self.assertTrue(outputs[0]["generated_text"].startswith(prompt))
@slow
@require_torch
def test_large_model_pt(self):
pipe = pipeline("image-to-text", model="ydshieh/vit-gpt2-coco-en")
image = "./tests/fixtures/tests_samples/COCO/000000039769.png"
outputs = pipe(image)
self.assertEqual(outputs, [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}])
outputs = pipe([image, image])
self.assertEqual(
outputs,
[
[{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}],
[{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}],
],
)
@slow
@require_torch
def test_generation_pt_blip(self):
pipe = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base")
url = "https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/pokemon.png"
image = Image.open(requests.get(url, stream=True).raw)
outputs = pipe(image)
self.assertEqual(outputs, [{"generated_text": "a pink pokemon pokemon with a blue shirt and a blue shirt"}])
@slow
@require_torch
def test_generation_pt_git(self):
pipe = pipeline("image-to-text", model="microsoft/git-base-coco")
url = "https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/pokemon.png"
image = Image.open(requests.get(url, stream=True).raw)
outputs = pipe(image)
self.assertEqual(outputs, [{"generated_text": "a cartoon of a purple character."}])
@slow
@require_torch
def test_conditional_generation_pt_blip(self):
pipe = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base")
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg"
image = Image.open(requests.get(url, stream=True).raw)
prompt = "a photography of"
outputs = pipe(image, prompt=prompt)
self.assertEqual(outputs, [{"generated_text": "a photography of a volcano"}])
with self.assertRaises(ValueError):
outputs = pipe([image, image], prompt=[prompt, prompt])
@slow
@require_torch
def test_conditional_generation_pt_git(self):
pipe = pipeline("image-to-text", model="microsoft/git-base-coco")
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg"
image = Image.open(requests.get(url, stream=True).raw)
prompt = "a photo of a"
outputs = pipe(image, prompt=prompt)
self.assertEqual(outputs, [{"generated_text": "a photo of a tent with a tent and a tent in the background."}])
with self.assertRaises(ValueError):
outputs = pipe([image, image], prompt=[prompt, prompt])
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11, reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`."
)
@slow
@require_torch
def test_conditional_generation_pt_pix2struct(self):
pipe = pipeline("image-to-text", model="google/pix2struct-ai2d-base")
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg"
image = Image.open(requests.get(url, stream=True).raw)
prompt = "What does the label 15 represent? (1) lava (2) core (3) tunnel (4) ash cloud"
outputs = pipe(image, prompt=prompt)
self.assertEqual(outputs, [{"generated_text": "ash cloud"}])
with self.assertRaises(ValueError):
outputs = pipe([image, image], prompt=[prompt, prompt])
@slow
@require_tf
def test_large_model_tf(self):
pipe = pipeline("image-to-text", model="ydshieh/vit-gpt2-coco-en", framework="tf")
image = "./tests/fixtures/tests_samples/COCO/000000039769.png"
outputs = pipe(image)
self.assertEqual(outputs, [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}])
outputs = pipe([image, image])
self.assertEqual(
outputs,
[
[{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}],
[{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}],
],
)
@slow
@require_torch
def test_conditional_generation_llava(self):
pipe = pipeline("image-to-text", model="llava-hf/bakLlava-v1-hf")
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg"
image = Image.open(requests.get(url, stream=True).raw)
prompt = (
"<image>\nUSER: What does the label 15 represent? (1) lava (2) core (3) tunnel (4) ash cloud?\nASSISTANT:"
)
outputs = pipe(image, prompt=prompt, generate_kwargs={"max_new_tokens": 200})
self.assertEqual(
outputs,
[
{
"generated_text": "<image> \nUSER: What does the label 15 represent? (1) lava (2) core (3) tunnel (4) ash cloud?\nASSISTANT: Lava"
}
],
)
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/pipelines/test_pipelines_image_segmentation.py | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import unittest
from typing import Dict
import datasets
import numpy as np
import requests
from datasets import load_dataset
from huggingface_hub.utils import insecure_hashlib
from transformers import (
MODEL_FOR_IMAGE_SEGMENTATION_MAPPING,
MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING,
MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING,
AutoImageProcessor,
AutoModelForImageSegmentation,
AutoModelForInstanceSegmentation,
DetrForSegmentation,
ImageSegmentationPipeline,
MaskFormerForInstanceSegmentation,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class Image:
@staticmethod
def open(*args, **kwargs):
pass
def hashimage(image: Image) -> str:
m = insecure_hashlib.md5(image.tobytes())
return m.hexdigest()[:10]
def mask_to_test_readable(mask: Image) -> Dict:
npimg = np.array(mask)
white_pixels = (npimg == 255).sum()
shape = npimg.shape
return {"hash": hashimage(mask), "white_pixels": white_pixels, "shape": shape}
def mask_to_test_readable_only_shape(mask: Image) -> Dict:
npimg = np.array(mask)
shape = npimg.shape
return {"shape": shape}
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class ImageSegmentationPipelineTests(unittest.TestCase):
model_mapping = dict(
(list(MODEL_FOR_IMAGE_SEGMENTATION_MAPPING.items()) if MODEL_FOR_IMAGE_SEGMENTATION_MAPPING else [])
+ (MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING.items() if MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING else [])
+ (MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING.items() if MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING else [])
)
def get_test_pipeline(self, model, tokenizer, processor):
image_segmenter = ImageSegmentationPipeline(model=model, image_processor=processor)
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def run_pipeline_test(self, image_segmenter, examples):
outputs = image_segmenter(
"./tests/fixtures/tests_samples/COCO/000000039769.png",
threshold=0.0,
mask_threshold=0,
overlap_mask_area_threshold=0,
)
self.assertIsInstance(outputs, list)
n = len(outputs)
if isinstance(image_segmenter.model, (MaskFormerForInstanceSegmentation, DetrForSegmentation)):
# Instance segmentation (maskformer, and detr) have a slot for null class
# and can output nothing even with a low threshold
self.assertGreaterEqual(n, 0)
else:
self.assertGreaterEqual(n, 1)
# XXX: PIL.Image implements __eq__ which bypasses ANY, so we inverse the comparison
# to make it work
self.assertEqual([{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n, outputs)
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test")
# RGBA
outputs = image_segmenter(dataset[0]["file"], threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0)
m = len(outputs)
self.assertEqual([{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * m, outputs)
# LA
outputs = image_segmenter(dataset[1]["file"], threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0)
m = len(outputs)
self.assertEqual([{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * m, outputs)
# L
outputs = image_segmenter(dataset[2]["file"], threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0)
m = len(outputs)
self.assertEqual([{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * m, outputs)
if isinstance(image_segmenter.model, DetrForSegmentation):
# We need to test batch_size with images with the same size.
# Detr doesn't normalize the size of the images, meaning we can have
# 800x800 or 800x1200, meaning we cannot batch simply.
# We simply bail on this
batch_size = 1
else:
batch_size = 2
# 5 times the same image so the output shape is predictable
batch = [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
outputs = image_segmenter(
batch,
threshold=0.0,
mask_threshold=0,
overlap_mask_area_threshold=0,
batch_size=batch_size,
)
self.assertEqual(len(batch), len(outputs))
self.assertEqual(len(outputs[0]), n)
self.assertEqual(
[
[{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n,
[{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n,
[{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n,
[{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n,
[{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n,
],
outputs,
f"Expected [{n}, {n}, {n}, {n}, {n}], got {[len(item) for item in outputs]}",
)
@require_tf
@unittest.skip("Image segmentation not implemented in TF")
def test_small_model_tf(self):
pass
@require_torch
def test_small_model_pt_no_panoptic(self):
model_id = "hf-internal-testing/tiny-random-mobilevit"
# The default task is `image-classification` we need to override
pipe = pipeline(task="image-segmentation", model=model_id)
# This model does NOT support neither `instance` nor `panoptic`
# We should error out
with self.assertRaises(ValueError) as e:
pipe("http://images.cocodataset.org/val2017/000000039769.jpg", subtask="panoptic")
self.assertEqual(
str(e.exception),
"Subtask panoptic is not supported for model <class"
" 'transformers.models.mobilevit.modeling_mobilevit.MobileViTForSemanticSegmentation'>",
)
with self.assertRaises(ValueError) as e:
pipe("http://images.cocodataset.org/val2017/000000039769.jpg", subtask="instance")
self.assertEqual(
str(e.exception),
"Subtask instance is not supported for model <class"
" 'transformers.models.mobilevit.modeling_mobilevit.MobileViTForSemanticSegmentation'>",
)
@require_torch
def test_small_model_pt(self):
model_id = "hf-internal-testing/tiny-detr-mobilenetsv3-panoptic"
model = AutoModelForImageSegmentation.from_pretrained(model_id)
image_processor = AutoImageProcessor.from_pretrained(model_id)
image_segmenter = ImageSegmentationPipeline(
model=model,
image_processor=image_processor,
subtask="panoptic",
threshold=0.0,
mask_threshold=0.0,
overlap_mask_area_threshold=0.0,
)
outputs = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg",
)
# Shortening by hashing
for o in outputs:
o["mask"] = mask_to_test_readable(o["mask"])
# This is extremely brittle, and those values are made specific for the CI.
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{
"score": 0.004,
"label": "LABEL_215",
"mask": {"hash": "a01498ca7c", "shape": (480, 640), "white_pixels": 307200},
},
],
)
outputs = image_segmenter(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
],
)
for output in outputs:
for o in output:
o["mask"] = mask_to_test_readable(o["mask"])
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
[
{
"score": 0.004,
"label": "LABEL_215",
"mask": {"hash": "a01498ca7c", "shape": (480, 640), "white_pixels": 307200},
},
],
[
{
"score": 0.004,
"label": "LABEL_215",
"mask": {"hash": "a01498ca7c", "shape": (480, 640), "white_pixels": 307200},
},
],
],
)
output = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", subtask="instance")
for o in output:
o["mask"] = mask_to_test_readable(o["mask"])
self.assertEqual(
nested_simplify(output, decimals=4),
[
{
"score": 0.004,
"label": "LABEL_215",
"mask": {"hash": "a01498ca7c", "shape": (480, 640), "white_pixels": 307200},
},
],
)
# This must be surprising to the reader.
# The `panoptic` returns only LABEL_215, and this returns 3 labels.
#
output = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", subtask="semantic")
output_masks = [o["mask"] for o in output]
# page links (to visualize)
expected_masks = [
"https://huggingface.co/datasets/hf-internal-testing/mask-for-image-segmentation-tests/blob/main/mask_0.png",
"https://huggingface.co/datasets/hf-internal-testing/mask-for-image-segmentation-tests/blob/main/mask_1.png",
"https://huggingface.co/datasets/hf-internal-testing/mask-for-image-segmentation-tests/blob/main/mask_2.png",
]
# actual links to get files
expected_masks = [x.replace("/blob/", "/resolve/") for x in expected_masks]
expected_masks = [Image.open(requests.get(image, stream=True).raw) for image in expected_masks]
# Convert masks to numpy array
output_masks = [np.array(x) for x in output_masks]
expected_masks = [np.array(x) for x in expected_masks]
self.assertEqual(output_masks[0].shape, expected_masks[0].shape)
self.assertEqual(output_masks[1].shape, expected_masks[1].shape)
self.assertEqual(output_masks[2].shape, expected_masks[2].shape)
# With un-trained tiny random models, the output `logits` tensor is very likely to contain many values
# close to each other, which cause `argmax` to give quite different results when running the test on 2
# environments. We use a lower threshold `0.9` here to avoid flakiness.
self.assertGreaterEqual(np.mean(output_masks[0] == expected_masks[0]), 0.9)
self.assertGreaterEqual(np.mean(output_masks[1] == expected_masks[1]), 0.9)
self.assertGreaterEqual(np.mean(output_masks[2] == expected_masks[2]), 0.9)
for o in output:
o["mask"] = mask_to_test_readable_only_shape(o["mask"])
self.maxDiff = None
self.assertEqual(
nested_simplify(output, decimals=4),
[
{
"label": "LABEL_88",
"mask": {"shape": (480, 640)},
"score": None,
},
{
"label": "LABEL_101",
"mask": {"shape": (480, 640)},
"score": None,
},
{
"label": "LABEL_215",
"mask": {"shape": (480, 640)},
"score": None,
},
],
)
@require_torch
def test_small_model_pt_semantic(self):
model_id = "hf-internal-testing/tiny-random-beit-pipeline"
image_segmenter = pipeline(model=model_id)
outputs = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg")
for o in outputs:
# shortening by hashing
o["mask"] = mask_to_test_readable(o["mask"])
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{
"score": None,
"label": "LABEL_0",
"mask": {"hash": "42d0907228", "shape": (480, 640), "white_pixels": 10714},
},
{
"score": None,
"label": "LABEL_1",
"mask": {"hash": "46b8cc3976", "shape": (480, 640), "white_pixels": 296486},
},
],
)
@require_torch
@slow
def test_integration_torch_image_segmentation(self):
model_id = "facebook/detr-resnet-50-panoptic"
image_segmenter = pipeline(
"image-segmentation",
model=model_id,
threshold=0.0,
overlap_mask_area_threshold=0.0,
)
outputs = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg",
)
# Shortening by hashing
for o in outputs:
o["mask"] = mask_to_test_readable(o["mask"])
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{
"score": 0.9094,
"label": "blanket",
"mask": {"hash": "dcff19a97a", "shape": (480, 640), "white_pixels": 16617},
},
{
"score": 0.9941,
"label": "cat",
"mask": {"hash": "9c0af87bd0", "shape": (480, 640), "white_pixels": 59185},
},
{
"score": 0.9987,
"label": "remote",
"mask": {"hash": "c7870600d6", "shape": (480, 640), "white_pixels": 4182},
},
{
"score": 0.9995,
"label": "remote",
"mask": {"hash": "ef899a25fd", "shape": (480, 640), "white_pixels": 2275},
},
{
"score": 0.9722,
"label": "couch",
"mask": {"hash": "37b8446ac5", "shape": (480, 640), "white_pixels": 172380},
},
{
"score": 0.9994,
"label": "cat",
"mask": {"hash": "6a09d3655e", "shape": (480, 640), "white_pixels": 52561},
},
],
)
outputs = image_segmenter(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
],
)
# Shortening by hashing
for output in outputs:
for o in output:
o["mask"] = mask_to_test_readable(o["mask"])
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
[
{
"score": 0.9094,
"label": "blanket",
"mask": {"hash": "dcff19a97a", "shape": (480, 640), "white_pixels": 16617},
},
{
"score": 0.9941,
"label": "cat",
"mask": {"hash": "9c0af87bd0", "shape": (480, 640), "white_pixels": 59185},
},
{
"score": 0.9987,
"label": "remote",
"mask": {"hash": "c7870600d6", "shape": (480, 640), "white_pixels": 4182},
},
{
"score": 0.9995,
"label": "remote",
"mask": {"hash": "ef899a25fd", "shape": (480, 640), "white_pixels": 2275},
},
{
"score": 0.9722,
"label": "couch",
"mask": {"hash": "37b8446ac5", "shape": (480, 640), "white_pixels": 172380},
},
{
"score": 0.9994,
"label": "cat",
"mask": {"hash": "6a09d3655e", "shape": (480, 640), "white_pixels": 52561},
},
],
[
{
"score": 0.9094,
"label": "blanket",
"mask": {"hash": "dcff19a97a", "shape": (480, 640), "white_pixels": 16617},
},
{
"score": 0.9941,
"label": "cat",
"mask": {"hash": "9c0af87bd0", "shape": (480, 640), "white_pixels": 59185},
},
{
"score": 0.9987,
"label": "remote",
"mask": {"hash": "c7870600d6", "shape": (480, 640), "white_pixels": 4182},
},
{
"score": 0.9995,
"label": "remote",
"mask": {"hash": "ef899a25fd", "shape": (480, 640), "white_pixels": 2275},
},
{
"score": 0.9722,
"label": "couch",
"mask": {"hash": "37b8446ac5", "shape": (480, 640), "white_pixels": 172380},
},
{
"score": 0.9994,
"label": "cat",
"mask": {"hash": "6a09d3655e", "shape": (480, 640), "white_pixels": 52561},
},
],
],
)
@require_torch
@slow
def test_threshold(self):
model_id = "facebook/detr-resnet-50-panoptic"
image_segmenter = pipeline("image-segmentation", model=model_id)
outputs = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", threshold=0.999)
# Shortening by hashing
for o in outputs:
o["mask"] = mask_to_test_readable(o["mask"])
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{
"score": 0.9995,
"label": "remote",
"mask": {"hash": "d02404f578", "shape": (480, 640), "white_pixels": 2789},
},
{
"score": 0.9994,
"label": "cat",
"mask": {"hash": "eaa115b40c", "shape": (480, 640), "white_pixels": 304411},
},
],
)
outputs = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", threshold=0.5)
for o in outputs:
o["mask"] = mask_to_test_readable(o["mask"])
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{
"score": 0.9941,
"label": "cat",
"mask": {"hash": "9c0af87bd0", "shape": (480, 640), "white_pixels": 59185},
},
{
"score": 0.9987,
"label": "remote",
"mask": {"hash": "c7870600d6", "shape": (480, 640), "white_pixels": 4182},
},
{
"score": 0.9995,
"label": "remote",
"mask": {"hash": "ef899a25fd", "shape": (480, 640), "white_pixels": 2275},
},
{
"score": 0.9722,
"label": "couch",
"mask": {"hash": "37b8446ac5", "shape": (480, 640), "white_pixels": 172380},
},
{
"score": 0.9994,
"label": "cat",
"mask": {"hash": "6a09d3655e", "shape": (480, 640), "white_pixels": 52561},
},
],
)
@require_torch
@slow
def test_maskformer(self):
threshold = 0.8
model_id = "facebook/maskformer-swin-base-ade"
model = AutoModelForInstanceSegmentation.from_pretrained(model_id)
image_processor = AutoImageProcessor.from_pretrained(model_id)
image_segmenter = pipeline("image-segmentation", model=model, image_processor=image_processor)
image = load_dataset("hf-internal-testing/fixtures_ade20k", split="test")
file = image[0]["file"]
outputs = image_segmenter(file, threshold=threshold)
# Shortening by hashing
for o in outputs:
o["mask"] = mask_to_test_readable(o["mask"])
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{
"score": 0.9974,
"label": "wall",
"mask": {"hash": "a547b7c062", "shape": (512, 683), "white_pixels": 14252},
},
{
"score": 0.949,
"label": "house",
"mask": {"hash": "0da9b7b38f", "shape": (512, 683), "white_pixels": 132177},
},
{
"score": 0.9995,
"label": "grass",
"mask": {"hash": "1d07ea0a26", "shape": (512, 683), "white_pixels": 53444},
},
{
"score": 0.9976,
"label": "tree",
"mask": {"hash": "6cdc97c7da", "shape": (512, 683), "white_pixels": 7944},
},
{
"score": 0.8239,
"label": "plant",
"mask": {"hash": "1ab4ce378f", "shape": (512, 683), "white_pixels": 4136},
},
{
"score": 0.9942,
"label": "road, route",
"mask": {"hash": "39c5d17be5", "shape": (512, 683), "white_pixels": 1941},
},
{
"score": 1.0,
"label": "sky",
"mask": {"hash": "a3756324a6", "shape": (512, 683), "white_pixels": 135802},
},
],
)
@require_torch
@slow
def test_oneformer(self):
image_segmenter = pipeline(model="shi-labs/oneformer_ade20k_swin_tiny")
image = load_dataset("hf-internal-testing/fixtures_ade20k", split="test")
file = image[0]["file"]
outputs = image_segmenter(file, threshold=0.99)
# Shortening by hashing
for o in outputs:
o["mask"] = mask_to_test_readable(o["mask"])
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{
"score": 0.9981,
"label": "grass",
"mask": {"hash": "3a92904d4c", "white_pixels": 118131, "shape": (512, 683)},
},
{
"score": 0.9992,
"label": "sky",
"mask": {"hash": "fa2300cc9a", "white_pixels": 231565, "shape": (512, 683)},
},
],
)
# Different task
outputs = image_segmenter(file, threshold=0.99, subtask="instance")
# Shortening by hashing
for o in outputs:
o["mask"] = mask_to_test_readable(o["mask"])
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{
"score": 0.9991,
"label": "sky",
"mask": {"hash": "8b1ffad016", "white_pixels": 230566, "shape": (512, 683)},
},
{
"score": 0.9981,
"label": "grass",
"mask": {"hash": "9bbdf83d3d", "white_pixels": 119130, "shape": (512, 683)},
},
],
)
# Different task
outputs = image_segmenter(file, subtask="semantic")
# Shortening by hashing
for o in outputs:
o["mask"] = mask_to_test_readable(o["mask"])
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{
"score": None,
"label": "wall",
"mask": {"hash": "897fb20b7f", "white_pixels": 14506, "shape": (512, 683)},
},
{
"score": None,
"label": "building",
"mask": {"hash": "f2a68c63e4", "white_pixels": 125019, "shape": (512, 683)},
},
{
"score": None,
"label": "sky",
"mask": {"hash": "e0ca3a548e", "white_pixels": 135330, "shape": (512, 683)},
},
{
"score": None,
"label": "tree",
"mask": {"hash": "7c9544bcac", "white_pixels": 16263, "shape": (512, 683)},
},
{
"score": None,
"label": "road, route",
"mask": {"hash": "2c7704e491", "white_pixels": 2143, "shape": (512, 683)},
},
{
"score": None,
"label": "grass",
"mask": {"hash": "bf6c2867e0", "white_pixels": 53040, "shape": (512, 683)},
},
{
"score": None,
"label": "plant",
"mask": {"hash": "93c4b7199e", "white_pixels": 3335, "shape": (512, 683)},
},
{
"score": None,
"label": "house",
"mask": {"hash": "93ec419ad5", "white_pixels": 60, "shape": (512, 683)},
},
],
)
def test_save_load(self):
model_id = "hf-internal-testing/tiny-detr-mobilenetsv3-panoptic"
model = AutoModelForImageSegmentation.from_pretrained(model_id)
image_processor = AutoImageProcessor.from_pretrained(model_id)
image_segmenter = pipeline(
task="image-segmentation",
model=model,
image_processor=image_processor,
)
with tempfile.TemporaryDirectory() as tmpdirname:
image_segmenter.save_pretrained(tmpdirname)
pipeline(task="image-segmentation", model=tmpdirname)
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/pipelines/test_pipelines_zero_shot_audio_classification.py | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class ZeroShotAudioClassificationPipelineTests(unittest.TestCase):
# Deactivating auto tests since we don't have a good MODEL_FOR_XX mapping,
# and only CLAP would be there for now.
# model_mapping = {CLAPConfig: CLAPModel}
@require_torch
def test_small_model_pt(self):
audio_classifier = pipeline(
task="zero-shot-audio-classification", model="hf-internal-testing/tiny-clap-htsat-unfused"
)
dataset = load_dataset("ashraq/esc50")
audio = dataset["train"]["audio"][-1]["array"]
output = audio_classifier(audio, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"])
self.assertEqual(
nested_simplify(output),
[{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}],
)
@unittest.skip("No models are available in TF")
def test_small_model_tf(self):
pass
@slow
@require_torch
def test_large_model_pt(self):
audio_classifier = pipeline(
task="zero-shot-audio-classification",
model="laion/clap-htsat-unfused",
)
# This is an audio of a dog
dataset = load_dataset("ashraq/esc50")
audio = dataset["train"]["audio"][-1]["array"]
output = audio_classifier(audio, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"])
self.assertEqual(
nested_simplify(output),
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
)
output = audio_classifier([audio] * 5, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"])
self.assertEqual(
nested_simplify(output),
[
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5,
)
output = audio_classifier(
[audio] * 5, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"], batch_size=5
)
self.assertEqual(
nested_simplify(output),
[
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5,
)
@unittest.skip("No models are available in TF")
def test_large_model_tf(self):
pass
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/pipelines/test_pipelines_question_answering.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import (
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
LxmertConfig,
QuestionAnsweringPipeline,
)
from transformers.data.processors.squad import SquadExample
from transformers.pipelines import QuestionAnsweringArgumentHandler, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torch_or_tf,
slow,
)
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_TO_SKIP = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class QAPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_QUESTION_ANSWERING_MAPPING
tf_model_mapping = TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING
if model_mapping is not None:
model_mapping = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
tf_model_mapping = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def get_test_pipeline(self, model, tokenizer, processor):
if isinstance(model.config, LxmertConfig):
# This is an bimodal model, we need to find a more consistent way
# to switch on those models.
return None, None
question_answerer = QuestionAnsweringPipeline(model, tokenizer)
examples = [
{"question": "Where was HuggingFace founded ?", "context": "HuggingFace was founded in Paris."},
{"question": "In what field is HuggingFace ?", "context": "HuggingFace is an AI startup."},
]
return question_answerer, examples
def run_pipeline_test(self, question_answerer, _):
outputs = question_answerer(
question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris."
)
self.assertEqual(outputs, {"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)})
outputs = question_answerer(
question="Where was HuggingFace founded ?",
context="HuggingFace was founded in Paris.",
handle_impossible_answer=True,
)
self.assertEqual(outputs, {"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)})
outputs = question_answerer(
question=["In what field is HuggingFace working ?", "In what field is HuggingFace working ?"],
context="HuggingFace was founded in Paris.",
)
self.assertEqual(
outputs,
[
{"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)},
{"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)},
],
)
outputs = question_answerer(
question=["What field is HuggingFace working ?", "In what field is HuggingFace ?"],
context=[
"HuggingFace is a startup based in New-York",
"HuggingFace is a startup founded in Paris",
],
)
self.assertEqual(
outputs,
[
{"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)},
{"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)},
],
)
with self.assertRaises(ValueError):
question_answerer(question="", context="HuggingFace was founded in Paris.")
with self.assertRaises(ValueError):
question_answerer(question=None, context="HuggingFace was founded in Paris.")
with self.assertRaises(ValueError):
question_answerer(question="In what field is HuggingFace working ?", context="")
with self.assertRaises(ValueError):
question_answerer(question="In what field is HuggingFace working ?", context=None)
outputs = question_answerer(
question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris.", top_k=20
)
self.assertEqual(
outputs, [{"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)} for i in range(20)]
)
# Very long context require multiple features
outputs = question_answerer(
question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris." * 20
)
self.assertEqual(outputs, {"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)})
# Using batch is OK
if question_answerer.tokenizer.pad_token_id is None:
question_answerer.tokenizer.pad_token_id = question_answerer.model.config.eos_token_id
new_outputs = question_answerer(
question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris." * 20, batch_size=2
)
self.assertEqual(new_outputs, {"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)})
self.assertEqual(nested_simplify(outputs), nested_simplify(new_outputs))
@require_torch
def test_small_model_pt(self):
question_answerer = pipeline(
"question-answering", model="sshleifer/tiny-distilbert-base-cased-distilled-squad"
)
outputs = question_answerer(
question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris."
)
self.assertEqual(nested_simplify(outputs), {"score": 0.01, "start": 0, "end": 11, "answer": "HuggingFace"})
@require_torch
def test_small_model_pt_iterator(self):
# https://github.com/huggingface/transformers/issues/18510
pipe = pipeline(model="sshleifer/tiny-distilbert-base-cased-distilled-squad", batch_size=16, framework="pt")
def data():
for i in range(10):
yield {"question": "Where was HuggingFace founded ?", "context": "HuggingFace was founded in Paris."}
for outputs in pipe(data()):
self.assertEqual(nested_simplify(outputs), {"score": 0.01, "start": 0, "end": 11, "answer": "HuggingFace"})
@require_torch
def test_small_model_pt_softmax_trick(self):
question_answerer = pipeline(
"question-answering", model="sshleifer/tiny-distilbert-base-cased-distilled-squad"
)
real_postprocess = question_answerer.postprocess
# Tweak start and stop to make sure we encounter the softmax logits
# bug.
def ensure_large_logits_postprocess(
model_outputs,
top_k=1,
handle_impossible_answer=False,
max_answer_len=15,
):
for output in model_outputs:
output["start"] = output["start"] * 1e6
output["end"] = output["end"] * 1e6
return real_postprocess(
model_outputs,
top_k=top_k,
handle_impossible_answer=handle_impossible_answer,
max_answer_len=max_answer_len,
)
question_answerer.postprocess = ensure_large_logits_postprocess
outputs = question_answerer(
question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris."
)
self.assertEqual(nested_simplify(outputs), {"score": 0.028, "start": 0, "end": 11, "answer": "HuggingFace"})
@slow
@require_torch
def test_small_model_japanese(self):
question_answerer = pipeline(
"question-answering",
model="KoichiYasuoka/deberta-base-japanese-aozora-ud-head",
)
output = question_answerer(question="国語", context="全学年にわたって小学校の国語の教科書に挿し絵が用いられている") # fmt: skip
# Wrong answer, the whole text is identified as one "word" since the tokenizer does not include
# a pretokenizer
self.assertEqual(nested_simplify(output),{"score": 1.0, "start": 0, "end": 30, "answer": "全学年にわたって小学校の国語の教科書に挿し絵が用いられている"}) # fmt: skip
# Disable word alignment
output = question_answerer(question="国語", context="全学年にわたって小学校の国語の教科書に挿し絵が用いられている", align_to_words=False) # fmt: skip
self.assertEqual(
nested_simplify(output),
{"score": 1.0, "start": 15, "end": 18, "answer": "教科書"},
)
@slow
@require_torch
def test_small_model_long_context_cls_slow(self):
question_answerer = pipeline(
"question-answering",
model="deepset/roberta-base-squad2",
handle_impossible_answer=True,
max_seq_length=512,
)
outputs = question_answerer(
question="What country is Paris the capital of?",
context="""London is the capital and largest city of England and the United Kingdom. It stands on the River Thames in south-east England at the head of a 50-mile (80 km) estuary down to the North Sea, and has been a major settlement for two millennia. The City of London, its ancient core and financial centre, was founded by the Romans as Londinium and retains boundaries close to its medieval ones. Since the 19th century, \"London\" has also referred to the metropolis around this core, historically split between the counties of Middlesex, Essex, Surrey, Kent, and Hertfordshire, which largely comprises Greater London, governed by the Greater London Authority. The City of Westminster, to the west of the City of London, has for centuries held the national government and parliament. As one of the world's global cities, London exerts strong influence on its arts, commerce, education, entertainment, fashion, finance, health care, media, tourism, and communications, and has sometimes been called the capital of the world. Its GDP (€801.66 billion in 2017) makes it the biggest urban economy in Europe, and it is one of the major financial centres in the world. In 2019 it had the second-highest number of ultra high-net-worth individuals in Europe after Paris and the second-highest number of billionaires in Europe after Moscow. As of 2021, London has the most millionaires of any city. With Europe's largest concentration of higher education institutions, it includes Imperial College London in natural and applied sciences, the London School of Economics in social sciences, and the comprehensive University College London. The city is home to the most 5-star hotels of any city in the world. In 2012, London became the first city to host three Summer Olympic Games. London is the capital and largest city of England and the United Kingdom. It stands on the River Thames in south-east England at the head of a 50-mile (80 km) estuary down to the North Sea, and has been a major settlement for two millennia. The City of London, its ancient core and financial centre, was founded by the Romans as Londinium and retains boundaries close to its medieval ones. Since the 19th century, \"London\" has also referred to the metropolis around this core, historically split between the counties of Middlesex, Essex, Surrey, Kent, and Hertfordshire, which largely comprises Greater London, governed by the Greater London Authority. The City of Westminster, to the west of the City of London, has for centuries held the national government and parliament. As one of the world's global cities, London exerts strong influence on its arts, commerce, education, entertainment, fashion, finance, health care, media, tourism, and communications, and has sometimes been called the capital of the world. Its GDP (€801.66 billion in 2017) makes it the biggest urban economy in Europe, and it is one of the major financial centres in the world. In 2019 it had the second-highest number of ultra high-net-worth individuals in Europe after Paris and the second-highest number of billionaires in Europe after Moscow. As of 2021, London has the most millionaires of any city. With Europe's largest concentration of higher education institutions, it includes Imperial College London in natural and applied sciences, the London School of Economics in social sciences, and the comprehensive University College London. The city is home to the most 5-star hotels of any city in the world. In 2012, London became the first city to host three Summer Olympic Games.""",
)
self.assertEqual(nested_simplify(outputs), {"score": 0.988, "start": 0, "end": 0, "answer": ""})
@require_tf
def test_small_model_tf(self):
question_answerer = pipeline(
"question-answering", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="tf"
)
outputs = question_answerer(
question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris."
)
self.assertEqual(nested_simplify(outputs), {"score": 0.011, "start": 0, "end": 11, "answer": "HuggingFace"})
@slow
@require_torch
def test_large_model_pt(self):
question_answerer = pipeline(
"question-answering",
)
outputs = question_answerer(
question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris."
)
self.assertEqual(nested_simplify(outputs), {"score": 0.979, "start": 27, "end": 32, "answer": "Paris"})
@slow
@require_torch
def test_large_model_issue(self):
qa_pipeline = pipeline(
"question-answering",
model="mrm8488/bert-multi-cased-finetuned-xquadv1",
)
outputs = qa_pipeline(
{
"context": (
"Yes Bank founder Rana Kapoor has approached the Bombay High Court, challenging a special court's"
" order from August this year that had remanded him in police custody for a week in a multi-crore"
" loan fraud case. Kapoor, who is currently lodged in Taloja Jail, is an accused in the loan fraud"
" case and some related matters being probed by the CBI and Enforcement Directorate. A single"
" bench presided over by Justice S K Shinde on Tuesday posted the plea for further hearing on"
" October 14. In his plea filed through advocate Vijay Agarwal, Kapoor claimed that the special"
" court's order permitting the CBI's request for police custody on August 14 was illegal and in"
" breach of the due process of law. Therefore, his police custody and subsequent judicial custody"
" in the case were all illegal. Kapoor has urged the High Court to quash and set aside the special"
" court's order dated August 14. As per his plea, in August this year, the CBI had moved two"
" applications before the special court, one seeking permission to arrest Kapoor, who was already"
" in judicial custody at the time in another case, and the other, seeking his police custody."
" While the special court refused to grant permission to the CBI to arrest Kapoor, it granted the"
" central agency's plea for his custody. Kapoor, however, said in his plea that before filing an"
" application for his arrest, the CBI had not followed the process of issuing him a notice under"
" Section 41 of the CrPC for appearance before it. He further said that the CBI had not taken"
" prior sanction as mandated under section 17 A of the Prevention of Corruption Act for"
" prosecuting him. The special court, however, had said in its order at the time that as Kapoor"
" was already in judicial custody in another case and was not a free man the procedure mandated"
" under Section 41 of the CrPC need not have been adhered to as far as issuing a prior notice of"
" appearance was concerned. ADVERTISING It had also said that case records showed that the"
" investigating officer had taken an approval from a managing director of Yes Bank before"
" beginning the proceedings against Kapoor and such a permission was a valid sanction. However,"
" Kapoor in his plea said that the above order was bad in law and sought that it be quashed and"
" set aside. The law mandated that if initial action was not in consonance with legal procedures,"
" then all subsequent actions must be held as illegal, he said, urging the High Court to declare"
" the CBI remand and custody and all subsequent proceedings including the further custody as"
" illegal and void ab-initio. In a separate plea before the High Court, Kapoor's daughter Rakhee"
" Kapoor-Tandon has sought exemption from in-person appearance before a special PMLA court. Rakhee"
" has stated that she is a resident of the United Kingdom and is unable to travel to India owing"
" to restrictions imposed due to the COVID-19 pandemic. According to the CBI, in the present case,"
" Kapoor had obtained a gratification or pecuniary advantage of ₹ 307 crore, and thereby caused"
" Yes Bank a loss of ₹ 1,800 crore by extending credit facilities to Avantha Group, when it was"
" not eligible for the same"
),
"question": "Is this person invovled in fraud?",
}
)
self.assertEqual(
nested_simplify(outputs),
{"answer": "an accused in the loan fraud case", "end": 294, "score": 0.001, "start": 261},
)
@slow
@require_torch
def test_large_model_course(self):
question_answerer = pipeline("question-answering")
long_context = """
🤗 Transformers: State of the Art NLP
🤗 Transformers provides thousands of pretrained models to perform tasks on texts such as classification, information extraction,
question answering, summarization, translation, text generation and more in over 100 languages.
Its aim is to make cutting-edge NLP easier to use for everyone.
🤗 Transformers provides APIs to quickly download and use those pretrained models on a given text, fine-tune them on your own datasets and
then share them with the community on our model hub. At the same time, each python module defining an architecture is fully standalone and
can be modified to enable quick research experiments.
Why should I use transformers?
1. Easy-to-use state-of-the-art models:
- High performance on NLU and NLG tasks.
- Low barrier to entry for educators and practitioners.
- Few user-facing abstractions with just three classes to learn.
- A unified API for using all our pretrained models.
- Lower compute costs, smaller carbon footprint:
2. Researchers can share trained models instead of always retraining.
- Practitioners can reduce compute time and production costs.
- Dozens of architectures with over 10,000 pretrained models, some in more than 100 languages.
3. Choose the right framework for every part of a model's lifetime:
- Train state-of-the-art models in 3 lines of code.
- Move a single model between TF2.0/PyTorch frameworks at will.
- Seamlessly pick the right framework for training, evaluation and production.
4. Easily customize a model or an example to your needs:
- We provide examples for each architecture to reproduce the results published by its original authors.
- Model internals are exposed as consistently as possible.
- Model files can be used independently of the library for quick experiments.
🤗 Transformers is backed by the three most popular deep learning libraries — Jax, PyTorch and TensorFlow — with a seamless integration
between them. It's straightforward to train your models with one before loading them for inference with the other.
"""
question = "Which deep learning libraries back 🤗 Transformers?"
outputs = question_answerer(question=question, context=long_context)
self.assertEqual(
nested_simplify(outputs),
{"answer": "Jax, PyTorch and TensorFlow", "end": 1919, "score": 0.971, "start": 1892},
)
@slow
@require_tf
def test_large_model_tf(self):
question_answerer = pipeline("question-answering", framework="tf")
outputs = question_answerer(
question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris."
)
self.assertEqual(nested_simplify(outputs), {"score": 0.979, "start": 27, "end": 32, "answer": "Paris"})
@require_torch_or_tf
class QuestionAnsweringArgumentHandlerTests(unittest.TestCase):
def test_argument_handler(self):
qa = QuestionAnsweringArgumentHandler()
Q = "Where was HuggingFace founded ?"
C = "HuggingFace was founded in Paris"
normalized = qa(Q, C)
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 1)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
normalized = qa(question=Q, context=C)
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 1)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
normalized = qa(question=Q, context=C)
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 1)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
normalized = qa(question=[Q, Q], context=C)
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 2)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
normalized = qa({"question": Q, "context": C})
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 1)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
normalized = qa([{"question": Q, "context": C}])
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 1)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
normalized = qa([{"question": Q, "context": C}, {"question": Q, "context": C}])
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 2)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
normalized = qa(X={"question": Q, "context": C})
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 1)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
normalized = qa(X=[{"question": Q, "context": C}])
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 1)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
normalized = qa(data={"question": Q, "context": C})
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 1)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
def test_argument_handler_error_handling(self):
qa = QuestionAnsweringArgumentHandler()
Q = "Where was HuggingFace founded ?"
C = "HuggingFace was founded in Paris"
with self.assertRaises(KeyError):
qa({"context": C})
with self.assertRaises(KeyError):
qa({"question": Q})
with self.assertRaises(KeyError):
qa([{"context": C}])
with self.assertRaises(ValueError):
qa(None, C)
with self.assertRaises(ValueError):
qa("", C)
with self.assertRaises(ValueError):
qa(Q, None)
with self.assertRaises(ValueError):
qa(Q, "")
with self.assertRaises(ValueError):
qa(question=None, context=C)
with self.assertRaises(ValueError):
qa(question="", context=C)
with self.assertRaises(ValueError):
qa(question=Q, context=None)
with self.assertRaises(ValueError):
qa(question=Q, context="")
with self.assertRaises(ValueError):
qa({"question": None, "context": C})
with self.assertRaises(ValueError):
qa({"question": "", "context": C})
with self.assertRaises(ValueError):
qa({"question": Q, "context": None})
with self.assertRaises(ValueError):
qa({"question": Q, "context": ""})
with self.assertRaises(ValueError):
qa([{"question": Q, "context": C}, {"question": None, "context": C}])
with self.assertRaises(ValueError):
qa([{"question": Q, "context": C}, {"question": "", "context": C}])
with self.assertRaises(ValueError):
qa([{"question": Q, "context": C}, {"question": Q, "context": None}])
with self.assertRaises(ValueError):
qa([{"question": Q, "context": C}, {"question": Q, "context": ""}])
with self.assertRaises(ValueError):
qa(question={"This": "Is weird"}, context="This is a context")
with self.assertRaises(ValueError):
qa(question=[Q, Q], context=[C, C, C])
with self.assertRaises(ValueError):
qa(question=[Q, Q, Q], context=[C, C])
def test_argument_handler_old_format(self):
qa = QuestionAnsweringArgumentHandler()
Q = "Where was HuggingFace founded ?"
C = "HuggingFace was founded in Paris"
# Backward compatibility for this
normalized = qa(question=[Q, Q], context=[C, C])
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 2)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
def test_argument_handler_error_handling_odd(self):
qa = QuestionAnsweringArgumentHandler()
with self.assertRaises(ValueError):
qa(None)
with self.assertRaises(ValueError):
qa(Y=None)
with self.assertRaises(ValueError):
qa(1)
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/pipelines/test_pipelines_fill_mask.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
backend_empty_cache,
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_accelerator,
slow,
torch_device,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class FillMaskPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_MASKED_LM_MAPPING
tf_model_mapping = TF_MODEL_FOR_MASKED_LM_MAPPING
def tearDown(self):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
backend_empty_cache(torch_device)
@require_tf
def test_small_model_tf(self):
unmasker = pipeline(task="fill-mask", model="sshleifer/tiny-distilroberta-base", top_k=2, framework="tf")
outputs = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(outputs, decimals=6),
[
{"sequence": "My name is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser"},
],
)
outputs = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(outputs, decimals=6),
[
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-05,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-05,
"token": 25506,
"token_str": " accuser",
},
],
)
outputs = unmasker("My name is <mask>", targets=[" Patrick", " Clara", " Teven"], top_k=3)
self.assertEqual(
nested_simplify(outputs, decimals=6),
[
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"},
],
)
@require_torch
def test_small_model_pt(self):
unmasker = pipeline(task="fill-mask", model="sshleifer/tiny-distilroberta-base", top_k=2, framework="pt")
outputs = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(outputs, decimals=6),
[
{"sequence": "My name is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
],
)
outputs = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(outputs, decimals=6),
[
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
],
)
outputs = unmasker("My name is <mask>", targets=[" Patrick", " Clara", " Teven"], top_k=3)
self.assertEqual(
nested_simplify(outputs, decimals=6),
[
{"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
],
)
outputs = unmasker("My name is <mask> <mask>", top_k=2)
self.assertEqual(
nested_simplify(outputs, decimals=6),
[
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
],
)
@require_torch_accelerator
def test_fp16_casting(self):
pipe = pipeline(
"fill-mask",
model="hf-internal-testing/tiny-random-distilbert",
device=torch_device,
framework="pt",
)
# convert model to fp16
pipe.model.half()
response = pipe("Paris is the [MASK] of France.")
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(response, list)
@slow
@require_torch
def test_large_model_pt(self):
unmasker = pipeline(task="fill-mask", model="distilroberta-base", top_k=2, framework="pt")
self.run_large_test(unmasker)
@slow
@require_tf
def test_large_model_tf(self):
unmasker = pipeline(task="fill-mask", model="distilroberta-base", top_k=2, framework="tf")
self.run_large_test(unmasker)
def run_large_test(self, unmasker):
outputs = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(outputs),
[
{"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.007, "token": 1573, "token_str": " Chris"},
],
)
outputs = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(outputs),
[
{
"sequence": "The largest city in France is Paris",
"score": 0.251,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.214,
"token": 12790,
"token_str": " Lyon",
},
],
)
outputs = unmasker("My name is <mask>", targets=[" Patrick", " Clara", " Teven"], top_k=3)
self.assertEqual(
nested_simplify(outputs),
[
{"sequence": "My name is Patrick", "score": 0.005, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.000, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.000, "token": 2941, "token_str": " Te"},
],
)
dummy_str = "Lorem ipsum dolor sit amet, consectetur adipiscing elit," * 100
outputs = unmasker(
"My name is <mask>" + dummy_str,
tokenizer_kwargs={"truncation": True},
)
simplified = nested_simplify(outputs, decimals=4)
self.assertEqual(
[{"sequence": x["sequence"][:100]} for x in simplified],
[
{"sequence": f"My name is,{dummy_str}"[:100]},
{"sequence": f"My name is:,{dummy_str}"[:100]},
],
)
self.assertEqual(
[{k: x[k] for k in x if k != "sequence"} for x in simplified],
[
{"score": 0.2819, "token": 6, "token_str": ","},
{"score": 0.0954, "token": 46686, "token_str": ":,"},
],
)
@require_torch
def test_model_no_pad_pt(self):
unmasker = pipeline(task="fill-mask", model="sshleifer/tiny-distilroberta-base", framework="pt")
unmasker.tokenizer.pad_token_id = None
unmasker.tokenizer.pad_token = None
self.run_pipeline_test(unmasker, [])
@require_tf
def test_model_no_pad_tf(self):
unmasker = pipeline(task="fill-mask", model="sshleifer/tiny-distilroberta-base", framework="tf")
unmasker.tokenizer.pad_token_id = None
unmasker.tokenizer.pad_token = None
self.run_pipeline_test(unmasker, [])
def get_test_pipeline(self, model, tokenizer, processor):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)")
fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer)
examples = [
f"This is another {tokenizer.mask_token} test",
]
return fill_masker, examples
def run_pipeline_test(self, fill_masker, examples):
tokenizer = fill_masker.tokenizer
model = fill_masker.model
outputs = fill_masker(
f"This is a {tokenizer.mask_token}",
)
self.assertEqual(
outputs,
[
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
],
)
outputs = fill_masker([f"This is a {tokenizer.mask_token}"])
self.assertEqual(
outputs,
[
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
],
)
outputs = fill_masker([f"This is a {tokenizer.mask_token}", f"Another {tokenizer.mask_token} great test."])
self.assertEqual(
outputs,
[
[
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
],
[
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
],
],
)
with self.assertRaises(ValueError):
fill_masker([None])
# No mask_token is not supported
with self.assertRaises(PipelineException):
fill_masker("This is")
self.run_test_top_k(model, tokenizer)
self.run_test_targets(model, tokenizer)
self.run_test_top_k_targets(model, tokenizer)
self.fill_mask_with_duplicate_targets_and_top_k(model, tokenizer)
self.fill_mask_with_multiple_masks(model, tokenizer)
def run_test_targets(self, model, tokenizer):
vocab = tokenizer.get_vocab()
targets = sorted(vocab.keys())[:2]
# Pipeline argument
fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer, targets=targets)
outputs = fill_masker(f"This is a {tokenizer.mask_token}")
self.assertEqual(
outputs,
[
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
],
)
target_ids = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs}, target_ids)
processed_targets = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs}, set(processed_targets))
# Call argument
fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer)
outputs = fill_masker(f"This is a {tokenizer.mask_token}", targets=targets)
self.assertEqual(
outputs,
[
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
],
)
target_ids = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs}, target_ids)
processed_targets = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs}, set(processed_targets))
# Score equivalence
outputs = fill_masker(f"This is a {tokenizer.mask_token}", targets=targets)
tokens = [top_mask["token_str"] for top_mask in outputs]
scores = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(tokens) == set(targets):
unmasked_targets = fill_masker(f"This is a {tokenizer.mask_token}", targets=tokens)
target_scores = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(scores), nested_simplify(target_scores))
# Raises with invalid
with self.assertRaises(ValueError):
outputs = fill_masker(f"This is a {tokenizer.mask_token}", targets=[])
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(ValueError):
outputs = fill_masker(f"This is a {tokenizer.mask_token}", targets=[""])
with self.assertRaises(ValueError):
outputs = fill_masker(f"This is a {tokenizer.mask_token}", targets="")
def run_test_top_k(self, model, tokenizer):
fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer, top_k=2)
outputs = fill_masker(f"This is a {tokenizer.mask_token}")
self.assertEqual(
outputs,
[
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
],
)
fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer)
outputs2 = fill_masker(f"This is a {tokenizer.mask_token}", top_k=2)
self.assertEqual(
outputs2,
[
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
],
)
self.assertEqual(nested_simplify(outputs), nested_simplify(outputs2))
def run_test_top_k_targets(self, model, tokenizer):
vocab = tokenizer.get_vocab()
fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer)
# top_k=2, ntargets=3
targets = sorted(vocab.keys())[:3]
outputs = fill_masker(f"This is a {tokenizer.mask_token}", top_k=2, targets=targets)
# If we use the most probably targets, and filter differently, we should still
# have the same results
targets2 = [el["token_str"] for el in sorted(outputs, key=lambda x: x["score"], reverse=True)]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(targets2).issubset(targets):
outputs2 = fill_masker(f"This is a {tokenizer.mask_token}", top_k=3, targets=targets2)
# They should yield exactly the same result
self.assertEqual(nested_simplify(outputs), nested_simplify(outputs2))
def fill_mask_with_duplicate_targets_and_top_k(self, model, tokenizer):
fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer)
vocab = tokenizer.get_vocab()
# String duplicates + id duplicates
targets = sorted(vocab.keys())[:3]
targets = [targets[0], targets[1], targets[0], targets[2], targets[1]]
outputs = fill_masker(f"My name is {tokenizer.mask_token}", targets=targets, top_k=10)
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(outputs), 3)
def fill_mask_with_multiple_masks(self, model, tokenizer):
fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer)
outputs = fill_masker(
f"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}", top_k=2
)
self.assertEqual(
outputs,
[
[
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
],
[
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
],
[
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
],
],
)
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/pipelines/test_pipelines_audio_classification.py | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class AudioClassificationPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
tf_model_mapping = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def get_test_pipeline(self, model, tokenizer, processor):
audio_classifier = AudioClassificationPipeline(model=model, feature_extractor=processor)
# test with a raw waveform
audio = np.zeros((34000,))
audio2 = np.zeros((14000,))
return audio_classifier, [audio2, audio]
def run_pipeline_test(self, audio_classifier, examples):
audio2, audio = examples
output = audio_classifier(audio)
# by default a model is initialized with num_labels=2
self.assertEqual(
output,
[
{"score": ANY(float), "label": ANY(str)},
{"score": ANY(float), "label": ANY(str)},
],
)
output = audio_classifier(audio, top_k=1)
self.assertEqual(
output,
[
{"score": ANY(float), "label": ANY(str)},
],
)
self.run_torchaudio(audio_classifier)
@require_torchaudio
def run_torchaudio(self, audio_classifier):
import datasets
# test with a local file
dataset = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
audio = dataset[0]["audio"]["array"]
output = audio_classifier(audio)
self.assertEqual(
output,
[
{"score": ANY(float), "label": ANY(str)},
{"score": ANY(float), "label": ANY(str)},
],
)
@require_torch
def test_small_model_pt(self):
model = "anton-l/wav2vec2-random-tiny-classifier"
audio_classifier = pipeline("audio-classification", model=model)
audio = np.ones((8000,))
output = audio_classifier(audio, top_k=4)
EXPECTED_OUTPUT = [
{"score": 0.0842, "label": "no"},
{"score": 0.0838, "label": "up"},
{"score": 0.0837, "label": "go"},
{"score": 0.0834, "label": "right"},
]
EXPECTED_OUTPUT_PT_2 = [
{"score": 0.0845, "label": "stop"},
{"score": 0.0844, "label": "on"},
{"score": 0.0841, "label": "right"},
{"score": 0.0834, "label": "left"},
]
self.assertIn(nested_simplify(output, decimals=4), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2])
audio_dict = {"array": np.ones((8000,)), "sampling_rate": audio_classifier.feature_extractor.sampling_rate}
output = audio_classifier(audio_dict, top_k=4)
self.assertIn(nested_simplify(output, decimals=4), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2])
@require_torch
@slow
def test_large_model_pt(self):
import datasets
model = "superb/wav2vec2-base-superb-ks"
audio_classifier = pipeline("audio-classification", model=model)
dataset = datasets.load_dataset("anton-l/superb_dummy", "ks", split="test")
audio = np.array(dataset[3]["speech"], dtype=np.float32)
output = audio_classifier(audio, top_k=4)
self.assertEqual(
nested_simplify(output, decimals=3),
[
{"score": 0.981, "label": "go"},
{"score": 0.007, "label": "up"},
{"score": 0.006, "label": "_unknown_"},
{"score": 0.001, "label": "down"},
],
)
@require_tf
@unittest.skip("Audio classification is not implemented for TF")
def test_small_model_tf(self):
pass
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/pipelines/test_pipelines_video_classification.py | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class VideoClassificationPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def get_test_pipeline(self, model, tokenizer, processor):
example_video_filepath = hf_hub_download(
repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset"
)
video_classifier = VideoClassificationPipeline(model=model, image_processor=processor, top_k=2)
examples = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def run_pipeline_test(self, video_classifier, examples):
for example in examples:
outputs = video_classifier(example)
self.assertEqual(
outputs,
[
{"score": ANY(float), "label": ANY(str)},
{"score": ANY(float), "label": ANY(str)},
],
)
@require_torch
def test_small_model_pt(self):
small_model = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
small_feature_extractor = VideoMAEFeatureExtractor(
size={"shortest_edge": 10}, crop_size={"height": 10, "width": 10}
)
video_classifier = pipeline(
"video-classification", model=small_model, feature_extractor=small_feature_extractor, frame_sampling_rate=4
)
video_file_path = hf_hub_download(repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset")
outputs = video_classifier(video_file_path, top_k=2)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
)
outputs = video_classifier(
[
video_file_path,
video_file_path,
],
top_k=2,
)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
],
)
@require_tf
def test_small_model_tf(self):
pass
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/utils/test_activations.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class TestActivations(unittest.TestCase):
def test_gelu_versions(self):
x = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100])
torch_builtin = get_activation("gelu")
self.assertTrue(torch.allclose(gelu_python(x), torch_builtin(x)))
self.assertFalse(torch.allclose(gelu_python(x), gelu_new(x)))
def test_gelu_10(self):
x = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100])
torch_builtin = get_activation("gelu")
gelu10 = get_activation("gelu_10")
y_gelu = torch_builtin(x)
y_gelu_10 = gelu10(x)
clipped_mask = torch.where(y_gelu_10 < 10.0, 1, 0)
self.assertTrue(torch.max(y_gelu_10).item() == 10.0)
self.assertTrue(torch.allclose(y_gelu * clipped_mask, y_gelu_10 * clipped_mask))
def test_get_activation(self):
get_activation("gelu")
get_activation("gelu_10")
get_activation("gelu_fast")
get_activation("gelu_new")
get_activation("gelu_python")
get_activation("gelu_pytorch_tanh")
get_activation("linear")
get_activation("mish")
get_activation("quick_gelu")
get_activation("relu")
get_activation("sigmoid")
get_activation("silu")
get_activation("swish")
get_activation("tanh")
with self.assertRaises(KeyError):
get_activation("bogus")
with self.assertRaises(KeyError):
get_activation(None)
def test_activations_are_distinct_objects(self):
act1 = get_activation("gelu")
act1.a = 1
act2 = get_activation("gelu")
self.assertEqual(act1.a, 1)
with self.assertRaises(AttributeError):
_ = act2.a
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/utils/test_dynamic_module_utils.py | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from transformers.dynamic_module_utils import get_imports
TOP_LEVEL_IMPORT = """
import os
"""
IMPORT_IN_FUNCTION = """
def foo():
import os
return False
"""
DEEPLY_NESTED_IMPORT = """
def foo():
def bar():
if True:
import os
return False
return bar()
"""
TOP_LEVEL_TRY_IMPORT = """
import os
try:
import bar
except ImportError:
raise ValueError()
"""
TRY_IMPORT_IN_FUNCTION = """
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
MULTIPLE_EXCEPTS_IMPORT = """
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
EXCEPT_AS_IMPORT = """
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
GENERIC_EXCEPT_IMPORT = """
import os
try:
import bar
except:
raise ValueError()
"""
MULTILINE_TRY_IMPORT = """
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
MULTILINE_BOTH_IMPORT = """
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
CASES = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("case", CASES)
def test_import_parsing(tmp_path, case):
tmp_file_path = os.path.join(tmp_path, "test_file.py")
with open(tmp_file_path, "w") as _tmp_file:
_tmp_file.write(case)
parsed_imports = get_imports(tmp_file_path)
assert parsed_imports == ["os"]
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/utils/test_doc_samples.py | # coding=utf-8
# Copyright 2019-present, the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import doctest
import logging
import os
import unittest
from glob import glob
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
logger = logging.getLogger()
@require_torch
class TestDocLists(unittest.TestCase):
def test_flash_support_list(self):
with open("./docs/source/en/perf_infer_gpu_one.md", "r") as f:
doctext = f.read()
doctext = doctext.split("FlashAttention-2 is currently supported for the following architectures:")[1]
doctext = doctext.split("You can request to add FlashAttention-2 support")[0]
patterns = glob("./src/transformers/models/**/modeling_*.py")
patterns_tf = glob("./src/transformers/models/**/modeling_tf_*.py")
patterns_flax = glob("./src/transformers/models/**/modeling_flax_*.py")
patterns = list(set(patterns) - set(patterns_tf) - set(patterns_flax))
archs_supporting_fa2 = []
for filename in patterns:
with open(filename, "r") as f:
text = f.read()
if "_supports_flash_attn_2 = True" in text:
model_name = os.path.basename(filename).replace(".py", "").replace("modeling_", "")
archs_supporting_fa2.append(model_name)
for arch in archs_supporting_fa2:
if arch not in doctext:
raise ValueError(
f"{arch} should be in listed in the flash attention documentation but is not. Please update the documentation."
)
def test_sdpa_support_list(self):
with open("./docs/source/en/perf_infer_gpu_one.md", "r") as f:
doctext = f.read()
doctext = doctext.split(
"For now, Transformers supports inference and training through SDPA for the following architectures:"
)[1]
doctext = doctext.split("Note that FlashAttention can only be used for models using the")[0]
patterns = glob("./src/transformers/models/**/modeling_*.py")
patterns_tf = glob("./src/transformers/models/**/modeling_tf_*.py")
patterns_flax = glob("./src/transformers/models/**/modeling_flax_*.py")
patterns = list(set(patterns) - set(patterns_tf) - set(patterns_flax))
archs_supporting_sdpa = []
for filename in patterns:
with open(filename, "r") as f:
text = f.read()
if "_supports_sdpa = True" in text:
model_name = os.path.basename(filename).replace(".py", "").replace("modeling_", "")
archs_supporting_sdpa.append(model_name)
for arch in archs_supporting_sdpa:
if arch not in doctext:
raise ValueError(
f"{arch} should be in listed in the SDPA documentation but is not. Please update the documentation."
)
@unittest.skip("Temporarily disable the doc tests.")
@require_torch
@require_tf
@slow
class TestCodeExamples(unittest.TestCase):
def analyze_directory(
self,
directory: Path,
identifier: Union[str, None] = None,
ignore_files: Union[List[str], None] = None,
n_identifier: Union[str, List[str], None] = None,
only_modules: bool = True,
):
"""
Runs through the specific directory, looking for the files identified with `identifier`. Executes
the doctests in those files
Args:
directory (`Path`): Directory containing the files
identifier (`str`): Will parse files containing this
ignore_files (`List[str]`): List of files to skip
n_identifier (`str` or `List[str]`): Will not parse files containing this/these identifiers.
only_modules (`bool`): Whether to only analyze modules
"""
files = [file for file in os.listdir(directory) if os.path.isfile(os.path.join(directory, file))]
if identifier is not None:
files = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(n_identifier, List):
for n_ in n_identifier:
files = [file for file in files if n_ not in file]
else:
files = [file for file in files if n_identifier not in file]
ignore_files = ignore_files or []
ignore_files.append("__init__.py")
files = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing", file)
if only_modules:
module_identifier = file.split(".")[0]
try:
module_identifier = getattr(transformers, module_identifier)
suite = doctest.DocTestSuite(module_identifier)
result = unittest.TextTestRunner().run(suite)
self.assertIs(len(result.failures), 0)
except AttributeError:
logger.info(f"{module_identifier} is not a module.")
else:
result = doctest.testfile(str(".." / directory / file), optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed, 0)
def test_modeling_examples(self):
transformers_directory = Path("src/transformers")
files = "modeling"
ignore_files = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(transformers_directory, identifier=files, ignore_files=ignore_files)
def test_tokenization_examples(self):
transformers_directory = Path("src/transformers")
files = "tokenization"
self.analyze_directory(transformers_directory, identifier=files)
def test_configuration_examples(self):
transformers_directory = Path("src/transformers")
files = "configuration"
self.analyze_directory(transformers_directory, identifier=files)
def test_remaining_examples(self):
transformers_directory = Path("src/transformers")
n_identifiers = ["configuration", "modeling", "tokenization"]
self.analyze_directory(transformers_directory, n_identifier=n_identifiers)
def test_doc_sources(self):
doc_source_directory = Path("docs/source")
ignore_files = ["favicon.ico"]
self.analyze_directory(doc_source_directory, ignore_files=ignore_files, only_modules=False)
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/utils/test_hub_utils.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
RANDOM_BERT = "hf-internal-testing/tiny-random-bert"
CACHE_DIR = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
FULL_COMMIT_HASH = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
GATED_REPO = "hf-internal-testing/dummy-gated-model"
README_FILE = "README.md"
class GetFromCacheTests(unittest.TestCase):
def test_cached_file(self):
archive_file = cached_file(RANDOM_BERT, CONFIG_NAME)
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(CACHE_DIR))
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(CACHE_DIR, subfolder)))
with open(os.path.join(CACHE_DIR, "refs", "main")) as f:
main_commit = f.read()
self.assertEqual(archive_file, os.path.join(CACHE_DIR, "snapshots", main_commit, CONFIG_NAME))
self.assertTrue(os.path.isfile(archive_file))
# File is cached at the same place the second time.
new_archive_file = cached_file(RANDOM_BERT, CONFIG_NAME)
self.assertEqual(archive_file, new_archive_file)
# Using a specific revision to test the full commit hash.
archive_file = cached_file(RANDOM_BERT, CONFIG_NAME, revision="9b8c223")
self.assertEqual(archive_file, os.path.join(CACHE_DIR, "snapshots", FULL_COMMIT_HASH, CONFIG_NAME))
def test_cached_file_errors(self):
with self.assertRaisesRegex(EnvironmentError, "is not a valid model identifier"):
_ = cached_file("tiny-random-bert", CONFIG_NAME)
with self.assertRaisesRegex(EnvironmentError, "is not a valid git identifier"):
_ = cached_file(RANDOM_BERT, CONFIG_NAME, revision="aaaa")
with self.assertRaisesRegex(EnvironmentError, "does not appear to have a file named"):
_ = cached_file(RANDOM_BERT, "conf")
def test_non_existence_is_cached(self):
with self.assertRaisesRegex(EnvironmentError, "does not appear to have a file named"):
_ = cached_file(RANDOM_BERT, "conf")
with open(os.path.join(CACHE_DIR, "refs", "main")) as f:
main_commit = f.read()
self.assertTrue(os.path.isfile(os.path.join(CACHE_DIR, ".no_exist", main_commit, "conf")))
path = cached_file(RANDOM_BERT, "conf", _raise_exceptions_for_missing_entries=False)
self.assertIsNone(path)
path = cached_file(RANDOM_BERT, "conf", local_files_only=True, _raise_exceptions_for_missing_entries=False)
self.assertIsNone(path)
response_mock = mock.Mock()
response_mock.status_code = 500
response_mock.headers = {}
response_mock.raise_for_status.side_effect = HTTPError
response_mock.json.return_value = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request", return_value=response_mock) as mock_head:
path = cached_file(RANDOM_BERT, "conf", _raise_exceptions_for_connection_errors=False)
self.assertIsNone(path)
# This check we did call the fake head request
mock_head.assert_called()
def test_has_file(self):
self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only", WEIGHTS_NAME))
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only", TF2_WEIGHTS_NAME))
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only", FLAX_WEIGHTS_NAME))
def test_get_file_from_repo_distant(self):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo("bert-base-cased", "ahah.txt"))
# The function raises if the repository does not exist.
with self.assertRaisesRegex(EnvironmentError, "is not a valid model identifier"):
get_file_from_repo("bert-base-case", CONFIG_NAME)
# The function raises if the revision does not exist.
with self.assertRaisesRegex(EnvironmentError, "is not a valid git identifier"):
get_file_from_repo("bert-base-cased", CONFIG_NAME, revision="ahaha")
resolved_file = get_file_from_repo("bert-base-cased", CONFIG_NAME)
# The name is the cached name which is not very easy to test, so instead we load the content.
config = json.loads(open(resolved_file, "r").read())
self.assertEqual(config["hidden_size"], 768)
def test_get_file_from_repo_local(self):
with tempfile.TemporaryDirectory() as tmp_dir:
filename = Path(tmp_dir) / "a.txt"
filename.touch()
self.assertEqual(get_file_from_repo(tmp_dir, "a.txt"), str(filename))
self.assertIsNone(get_file_from_repo(tmp_dir, "b.txt"))
def test_get_file_gated_repo(self):
"""Test download file from a gated repo fails with correct message when not authenticated."""
with self.assertRaisesRegex(EnvironmentError, "You are trying to access a gated repo."):
# All files except README.md are protected on a gated repo.
cached_file(GATED_REPO, "gated_file.txt", token=False)
def test_has_file_gated_repo(self):
"""Test check file existence from a gated repo fails with correct message when not authenticated."""
with self.assertRaisesRegex(EnvironmentError, "is a gated repository"):
# All files except README.md are protected on a gated repo.
has_file(GATED_REPO, "gated_file.txt", token=False)
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/utils/test_logging.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class HfArgumentParserTest(unittest.TestCase):
def test_set_level(self):
logger = logging.get_logger()
# the current default level is logging.WARNING
level_origin = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel(), logging.get_verbosity())
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel(), logging.get_verbosity())
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel(), logging.get_verbosity())
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel(), logging.get_verbosity())
# restore to the original level
logging.set_verbosity(level_origin)
def test_integration(self):
level_origin = logging.get_verbosity()
logger = logging.get_logger("transformers.models.bart.tokenization_bart")
msg = "Testing 1, 2, 3"
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(logger) as cl:
logger.warning(msg)
self.assertEqual(cl.out, msg + "\n")
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(logger) as cl:
logger.warning(msg)
self.assertEqual(cl.out, "")
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(logger) as cl:
logger.warning(msg)
self.assertEqual(cl.out, msg + "\n")
# restore to the original level
logging.set_verbosity(level_origin)
@mockenv(TRANSFORMERS_VERBOSITY="error")
def test_env_override(self):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
_ = logging.get_logger("transformers.models.bart.tokenization_bart")
env_level_str = os.getenv("TRANSFORMERS_VERBOSITY", None)
env_level = logging.log_levels[env_level_str]
current_level = logging.get_verbosity()
self.assertEqual(
env_level,
current_level,
f"TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}",
)
# restore to the original level
os.environ["TRANSFORMERS_VERBOSITY"] = ""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="super-error")
def test_env_invalid_override(self):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
logger = logging.logging.getLogger()
with CaptureLogger(logger) as cl:
# this action activates the env var
logging.get_logger("transformers.models.bart.tokenization_bart")
self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error", cl.out)
# no need to restore as nothing was changed
def test_advisory_warnings(self):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
logger = logging.get_logger("transformers.models.bart.tokenization_bart")
msg = "Testing 1, 2, 3"
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1"):
# nothing should be logged as env var disables this method
with CaptureLogger(logger) as cl:
logger.warning_advice(msg)
self.assertEqual(cl.out, "")
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS=""):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(logger) as cl:
logger.warning_advice(msg)
self.assertEqual(cl.out, msg + "\n")
def test_set_progress_bar_enabled():
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/utils/test_model_card.py | # coding=utf-8
# Copyright 2019 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import tempfile
import unittest
from transformers.modelcard import ModelCard
class ModelCardTester(unittest.TestCase):
def setUp(self):
self.inputs_dict = {
"model_details": {
"Organization": "testing",
"Model date": "today",
"Model version": "v2.1, Developed by Test Corp in 2019.",
"Architecture": "Convolutional Neural Network.",
},
"metrics": "BLEU and ROUGE-1",
"evaluation_data": {
"Datasets": {"BLEU": "My-great-dataset-v1", "ROUGE-1": "My-short-dataset-v2.1"},
"Preprocessing": "See details on https://arxiv.org/pdf/1810.03993.pdf",
},
"training_data": {
"Dataset": "English Wikipedia dump dated 2018-12-01",
"Preprocessing": (
"Using SentencePiece vocabulary of size 52k tokens. See details on"
" https://arxiv.org/pdf/1810.03993.pdf"
),
},
"quantitative_analyses": {"BLEU": 55.1, "ROUGE-1": 76},
}
def test_model_card_common_properties(self):
modelcard = ModelCard.from_dict(self.inputs_dict)
self.assertTrue(hasattr(modelcard, "model_details"))
self.assertTrue(hasattr(modelcard, "intended_use"))
self.assertTrue(hasattr(modelcard, "factors"))
self.assertTrue(hasattr(modelcard, "metrics"))
self.assertTrue(hasattr(modelcard, "evaluation_data"))
self.assertTrue(hasattr(modelcard, "training_data"))
self.assertTrue(hasattr(modelcard, "quantitative_analyses"))
self.assertTrue(hasattr(modelcard, "ethical_considerations"))
self.assertTrue(hasattr(modelcard, "caveats_and_recommendations"))
def test_model_card_to_json_string(self):
modelcard = ModelCard.from_dict(self.inputs_dict)
obj = json.loads(modelcard.to_json_string())
for key, value in self.inputs_dict.items():
self.assertEqual(obj[key], value)
def test_model_card_to_json_file(self):
model_card_first = ModelCard.from_dict(self.inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
filename = os.path.join(tmpdirname, "modelcard.json")
model_card_first.to_json_file(filename)
model_card_second = ModelCard.from_json_file(filename)
self.assertEqual(model_card_second.to_dict(), model_card_first.to_dict())
def test_model_card_from_and_save_pretrained(self):
model_card_first = ModelCard.from_dict(self.inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
model_card_first.save_pretrained(tmpdirname)
model_card_second = ModelCard.from_pretrained(tmpdirname)
self.assertEqual(model_card_second.to_dict(), model_card_first.to_dict())
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/utils/test_model_output.py | # coding=utf-8
# Copyright 2020 The Hugging Face Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from dataclasses import dataclass
from typing import Optional
from transformers.testing_utils import require_torch
from transformers.utils import ModelOutput
@dataclass
class ModelOutputTest(ModelOutput):
a: float
b: Optional[float] = None
c: Optional[float] = None
class ModelOutputTester(unittest.TestCase):
def test_get_attributes(self):
x = ModelOutputTest(a=30)
self.assertEqual(x.a, 30)
self.assertIsNone(x.b)
self.assertIsNone(x.c)
with self.assertRaises(AttributeError):
_ = x.d
def test_index_with_ints_and_slices(self):
x = ModelOutputTest(a=30, b=10)
self.assertEqual(x[0], 30)
self.assertEqual(x[1], 10)
self.assertEqual(x[:2], (30, 10))
self.assertEqual(x[:], (30, 10))
x = ModelOutputTest(a=30, c=10)
self.assertEqual(x[0], 30)
self.assertEqual(x[1], 10)
self.assertEqual(x[:2], (30, 10))
self.assertEqual(x[:], (30, 10))
def test_index_with_strings(self):
x = ModelOutputTest(a=30, b=10)
self.assertEqual(x["a"], 30)
self.assertEqual(x["b"], 10)
with self.assertRaises(KeyError):
_ = x["c"]
x = ModelOutputTest(a=30, c=10)
self.assertEqual(x["a"], 30)
self.assertEqual(x["c"], 10)
with self.assertRaises(KeyError):
_ = x["b"]
def test_dict_like_properties(self):
x = ModelOutputTest(a=30)
self.assertEqual(list(x.keys()), ["a"])
self.assertEqual(list(x.values()), [30])
self.assertEqual(list(x.items()), [("a", 30)])
self.assertEqual(list(x), ["a"])
x = ModelOutputTest(a=30, b=10)
self.assertEqual(list(x.keys()), ["a", "b"])
self.assertEqual(list(x.values()), [30, 10])
self.assertEqual(list(x.items()), [("a", 30), ("b", 10)])
self.assertEqual(list(x), ["a", "b"])
x = ModelOutputTest(a=30, c=10)
self.assertEqual(list(x.keys()), ["a", "c"])
self.assertEqual(list(x.values()), [30, 10])
self.assertEqual(list(x.items()), [("a", 30), ("c", 10)])
self.assertEqual(list(x), ["a", "c"])
with self.assertRaises(Exception):
x = x.update({"d": 20})
with self.assertRaises(Exception):
del x["a"]
with self.assertRaises(Exception):
_ = x.pop("a")
with self.assertRaises(Exception):
_ = x.setdefault("d", 32)
def test_set_attributes(self):
x = ModelOutputTest(a=30)
x.a = 10
self.assertEqual(x.a, 10)
self.assertEqual(x["a"], 10)
def test_set_keys(self):
x = ModelOutputTest(a=30)
x["a"] = 10
self.assertEqual(x.a, 10)
self.assertEqual(x["a"], 10)
def test_instantiate_from_dict(self):
x = ModelOutputTest({"a": 30, "b": 10})
self.assertEqual(list(x.keys()), ["a", "b"])
self.assertEqual(x.a, 30)
self.assertEqual(x.b, 10)
def test_instantiate_from_iterator(self):
x = ModelOutputTest([("a", 30), ("b", 10)])
self.assertEqual(list(x.keys()), ["a", "b"])
self.assertEqual(x.a, 30)
self.assertEqual(x.b, 10)
with self.assertRaises(ValueError):
_ = ModelOutputTest([("a", 30), (10, 10)])
x = ModelOutputTest(a=(30, 30))
self.assertEqual(list(x.keys()), ["a"])
self.assertEqual(x.a, (30, 30))
@require_torch
def test_torch_pytree(self):
# ensure torch.utils._pytree treats ModelOutput subclasses as nodes (and not leaves)
# this is important for DistributedDataParallel gradient synchronization with static_graph=True
import torch.utils._pytree as pytree
x = ModelOutput({"a": 1.0, "c": 2.0})
self.assertFalse(pytree._is_leaf(x))
x = ModelOutputTest(a=1.0, c=2.0)
self.assertFalse(pytree._is_leaf(x))
expected_flat_outs = [1.0, 2.0]
expected_tree_spec = pytree.TreeSpec(
ModelOutputTest, (ModelOutputTest, ["a", "c"]), [pytree.LeafSpec(), pytree.LeafSpec()]
)
actual_flat_outs, actual_tree_spec = pytree.tree_flatten(x)
self.assertEqual(expected_flat_outs, actual_flat_outs)
self.assertEqual(expected_tree_spec, actual_tree_spec)
unflattened_x = pytree.tree_unflatten(actual_flat_outs, actual_tree_spec)
self.assertEqual(x, unflattened_x)
class ModelOutputTestNoDataclass(ModelOutput):
"""Invalid test subclass of ModelOutput where @dataclass decorator is not used"""
a: float
b: Optional[float] = None
c: Optional[float] = None
class ModelOutputSubclassTester(unittest.TestCase):
def test_direct_model_output(self):
# Check that direct usage of ModelOutput instantiates without errors
ModelOutput({"a": 1.1})
def test_subclass_no_dataclass(self):
# Check that a subclass of ModelOutput without @dataclass is invalid
# A valid subclass is inherently tested other unit tests above.
with self.assertRaises(TypeError):
ModelOutputTestNoDataclass(a=1.1, b=2.2, c=3.3)
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/utils/test_versions_utils.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib.metadata
import sys
from transformers.testing_utils import TestCasePlus
from transformers.utils.versions import require_version, require_version_core
numpy_ver = importlib.metadata.version("numpy")
python_ver = ".".join([str(x) for x in sys.version_info[:3]])
class DependencyVersionCheckTest(TestCasePlus):
def test_core(self):
# lt + different version strings
require_version_core("numpy<1000.4.5")
require_version_core("numpy<1000.4")
require_version_core("numpy<1000")
# le
require_version_core("numpy<=1000.4.5")
require_version_core(f"numpy<={numpy_ver}")
# eq
require_version_core(f"numpy=={numpy_ver}")
# ne
require_version_core("numpy!=1000.4.5")
# ge
require_version_core("numpy>=1.0")
require_version_core("numpy>=1.0.0")
require_version_core(f"numpy>={numpy_ver}")
# gt
require_version_core("numpy>1.0.0")
# mix
require_version_core("numpy>1.0.0,<1000")
# requirement w/o version
require_version_core("numpy")
# unmet requirements due to version conflict
for req in ["numpy==1.0.0", "numpy>=1000.0.0", f"numpy<{numpy_ver}"]:
try:
require_version_core(req)
except ImportError as e:
self.assertIn(f"{req} is required", str(e))
self.assertIn("but found", str(e))
# unmet requirements due to missing module
for req in ["numpipypie>1", "numpipypie2"]:
try:
require_version_core(req)
except importlib.metadata.PackageNotFoundError as e:
self.assertIn(f"The '{req}' distribution was not found and is required by this application", str(e))
self.assertIn("Try: pip install transformers -U", str(e))
# bogus requirements formats:
# 1. whole thing
for req in ["numpy??1.0.0", "numpy1.0.0"]:
try:
require_version_core(req)
except ValueError as e:
self.assertIn("requirement needs to be in the pip package format", str(e))
# 2. only operators
for req in ["numpy=1.0.0", "numpy == 1.00", "numpy<>1.0.0", "numpy><1.00", "numpy>>1.0.0"]:
try:
require_version_core(req)
except ValueError as e:
self.assertIn("need one of ", str(e))
def test_python(self):
# matching requirement
require_version("python>=3.6.0")
# not matching requirements
for req in ["python>9.9.9", "python<3.0.0"]:
try:
require_version_core(req)
except ImportError as e:
self.assertIn(f"{req} is required", str(e))
self.assertIn(f"but found python=={python_ver}", str(e))
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/utils/test_backbone_utils.py | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class BackboneUtilsTester(unittest.TestCase):
def test_get_aligned_output_features_output_indices(self):
stage_names = ["a", "b", "c"]
# Defaults to last layer if both are None
out_features, out_indices = get_aligned_output_features_output_indices(None, None, stage_names)
self.assertEqual(out_features, ["c"])
self.assertEqual(out_indices, [2])
# Out indices set to match out features
out_features, out_indices = get_aligned_output_features_output_indices(["a", "c"], None, stage_names)
self.assertEqual(out_features, ["a", "c"])
self.assertEqual(out_indices, [0, 2])
# Out features set to match out indices
out_features, out_indices = get_aligned_output_features_output_indices(None, [0, 2], stage_names)
self.assertEqual(out_features, ["a", "c"])
self.assertEqual(out_indices, [0, 2])
# Out features selected from negative indices
out_features, out_indices = get_aligned_output_features_output_indices(None, [-3, -1], stage_names)
self.assertEqual(out_features, ["a", "c"])
self.assertEqual(out_indices, [-3, -1])
def test_verify_out_features_out_indices(self):
# Stage names must be set
with self.assertRaises(ValueError):
verify_out_features_out_indices(["a", "b"], (0, 1), None)
# Out features must be a list
with self.assertRaises(ValueError):
verify_out_features_out_indices(("a", "b"), (0, 1), ["a", "b"])
# Out features must be a subset of stage names
with self.assertRaises(ValueError):
verify_out_features_out_indices(["a", "b"], (0, 1), ["a"])
# Out indices must be a list or tuple
with self.assertRaises(ValueError):
verify_out_features_out_indices(None, 0, ["a", "b"])
# Out indices must be a subset of stage names
with self.assertRaises(ValueError):
verify_out_features_out_indices(None, (0, 1), ["a"])
# Out features and out indices must be the same length
with self.assertRaises(ValueError):
verify_out_features_out_indices(["a", "b"], (0,), ["a", "b", "c"])
# Out features should match out indices
with self.assertRaises(ValueError):
verify_out_features_out_indices(["a", "b"], (0, 2), ["a", "b", "c"])
# Out features and out indices should be in order
with self.assertRaises(ValueError):
verify_out_features_out_indices(["b", "a"], (0, 1), ["a", "b"])
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"], (0, 1, -1), ["a", "b", "c", "d"])
def test_backbone_mixin(self):
backbone = BackboneMixin()
backbone.stage_names = ["a", "b", "c"]
backbone._out_features = ["a", "c"]
backbone._out_indices = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features, ["a", "c"])
self.assertEqual(backbone.out_indices, [0, 2])
# Check out features and indices are updated correctly
backbone.out_features = ["a", "b"]
self.assertEqual(backbone.out_features, ["a", "b"])
self.assertEqual(backbone.out_indices, [0, 1])
backbone.out_indices = [-3, -1]
self.assertEqual(backbone.out_features, ["a", "c"])
self.assertEqual(backbone.out_indices, [-3, -1])
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/utils/test_offline.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class OfflineTests(TestCasePlus):
@require_torch
def test_offline_mode(self):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
load = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
run = """
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
"""
mock = """
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn't access internet")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipeline(task="fill-mask", model=mname)
# baseline - just load from_pretrained with normal network
cmd = [sys.executable, "-c", "\n".join([load, run, mock])]
# should succeed
env = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
env["TRANSFORMERS_OFFLINE"] = "1"
result = subprocess.run(cmd, env=env, check=False, capture_output=True)
self.assertEqual(result.returncode, 0, result.stderr)
self.assertIn("success", result.stdout.decode())
@require_torch
def test_offline_mode_no_internet(self):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
load = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
run = """
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
"""
mock = """
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipeline(task="fill-mask", model=mname)
# baseline - just load from_pretrained with normal network
cmd = [sys.executable, "-c", "\n".join([load, run, mock])]
# should succeed
env = self.get_env()
result = subprocess.run(cmd, env=env, check=False, capture_output=True)
self.assertEqual(result.returncode, 0, result.stderr)
self.assertIn("success", result.stdout.decode())
@require_torch
def test_offline_mode_sharded_checkpoint(self):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
load = """
from transformers import BertConfig, BertModel, BertTokenizer
"""
run = """
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
"""
mock = """
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
"""
# baseline - just load from_pretrained with normal network
cmd = [sys.executable, "-c", "\n".join([load, run])]
# should succeed
env = self.get_env()
result = subprocess.run(cmd, env=env, check=False, capture_output=True)
self.assertEqual(result.returncode, 0, result.stderr)
self.assertIn("success", result.stdout.decode())
# next emulate no network
cmd = [sys.executable, "-c", "\n".join([load, mock, run])]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
env["TRANSFORMERS_OFFLINE"] = "1"
result = subprocess.run(cmd, env=env, check=False, capture_output=True)
self.assertEqual(result.returncode, 0, result.stderr)
self.assertIn("success", result.stdout.decode())
@require_torch
def test_offline_mode_pipeline_exception(self):
load = """
from transformers import pipeline
"""
run = """
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
"""
mock = """
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
"""
env = self.get_env()
env["TRANSFORMERS_OFFLINE"] = "1"
cmd = [sys.executable, "-c", "\n".join([load, mock, run])]
result = subprocess.run(cmd, env=env, check=False, capture_output=True)
self.assertEqual(result.returncode, 1, result.stderr)
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode",
result.stderr.decode().replace("\n", ""),
)
@require_torch
def test_offline_model_dynamic_model(self):
load = """
from transformers import AutoModel
"""
run = """
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
"""
# baseline - just load from_pretrained with normal network
cmd = [sys.executable, "-c", "\n".join([load, run])]
# should succeed
env = self.get_env()
result = subprocess.run(cmd, env=env, check=False, capture_output=True)
self.assertEqual(result.returncode, 0, result.stderr)
self.assertIn("success", result.stdout.decode())
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
env["TRANSFORMERS_OFFLINE"] = "1"
result = subprocess.run(cmd, env=env, check=False, capture_output=True)
self.assertEqual(result.returncode, 0, result.stderr)
self.assertIn("success", result.stdout.decode())
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/utils/test_image_utils.py | # coding=utf-8
# Copyright 2021 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
import datasets
import numpy as np
import pytest
from huggingface_hub.file_download import http_get
from requests import ConnectTimeout, ReadTimeout
from tests.pipelines.test_pipelines_document_question_answering import INVOICE_URL
from transformers import is_torch_available, is_vision_available
from transformers.image_utils import ChannelDimension, get_channel_dimension_axis, make_list_of_images
from transformers.testing_utils import is_flaky, require_torch, require_vision
if is_torch_available():
import torch
if is_vision_available():
import PIL.Image
from transformers import ImageFeatureExtractionMixin
from transformers.image_utils import get_image_size, infer_channel_dimension_format, load_image
def get_random_image(height, width):
random_array = np.random.randint(0, 256, (height, width, 3), dtype=np.uint8)
return PIL.Image.fromarray(random_array)
@require_vision
class ImageFeatureExtractionTester(unittest.TestCase):
def test_conversion_image_to_array(self):
feature_extractor = ImageFeatureExtractionMixin()
image = get_random_image(16, 32)
# Conversion with defaults (rescale + channel first)
array1 = feature_extractor.to_numpy_array(image)
self.assertTrue(array1.dtype, np.float32)
self.assertEqual(array1.shape, (3, 16, 32))
# Conversion with rescale and not channel first
array2 = feature_extractor.to_numpy_array(image, channel_first=False)
self.assertTrue(array2.dtype, np.float32)
self.assertEqual(array2.shape, (16, 32, 3))
self.assertTrue(np.array_equal(array1, array2.transpose(2, 0, 1)))
# Conversion with no rescale and channel first
array3 = feature_extractor.to_numpy_array(image, rescale=False)
self.assertTrue(array3.dtype, np.uint8)
self.assertEqual(array3.shape, (3, 16, 32))
self.assertTrue(np.array_equal(array1, array3.astype(np.float32) * (1 / 255.0)))
# Conversion with no rescale and not channel first
array4 = feature_extractor.to_numpy_array(image, rescale=False, channel_first=False)
self.assertTrue(array4.dtype, np.uint8)
self.assertEqual(array4.shape, (16, 32, 3))
self.assertTrue(np.array_equal(array2, array4.astype(np.float32) * (1 / 255.0)))
def test_conversion_array_to_array(self):
feature_extractor = ImageFeatureExtractionMixin()
array = np.random.randint(0, 256, (16, 32, 3), dtype=np.uint8)
# By default, rescale (for an array of ints) and channel permute
array1 = feature_extractor.to_numpy_array(array)
self.assertTrue(array1.dtype, np.float32)
self.assertEqual(array1.shape, (3, 16, 32))
self.assertTrue(np.array_equal(array1, array.transpose(2, 0, 1).astype(np.float32) * (1 / 255.0)))
# Same with no permute
array2 = feature_extractor.to_numpy_array(array, channel_first=False)
self.assertTrue(array2.dtype, np.float32)
self.assertEqual(array2.shape, (16, 32, 3))
self.assertTrue(np.array_equal(array2, array.astype(np.float32) * (1 / 255.0)))
# Force rescale to False
array3 = feature_extractor.to_numpy_array(array, rescale=False)
self.assertTrue(array3.dtype, np.uint8)
self.assertEqual(array3.shape, (3, 16, 32))
self.assertTrue(np.array_equal(array3, array.transpose(2, 0, 1)))
# Force rescale to False and no channel permute
array4 = feature_extractor.to_numpy_array(array, rescale=False, channel_first=False)
self.assertTrue(array4.dtype, np.uint8)
self.assertEqual(array4.shape, (16, 32, 3))
self.assertTrue(np.array_equal(array4, array))
# Now test the default rescale for a float array (defaults to False)
array5 = feature_extractor.to_numpy_array(array2)
self.assertTrue(array5.dtype, np.float32)
self.assertEqual(array5.shape, (3, 16, 32))
self.assertTrue(np.array_equal(array5, array1))
def test_make_list_of_images_numpy(self):
# Test a single image is converted to a list of 1 image
images = np.random.randint(0, 256, (16, 32, 3))
images_list = make_list_of_images(images)
self.assertEqual(len(images_list), 1)
self.assertTrue(np.array_equal(images_list[0], images))
self.assertIsInstance(images_list, list)
# Test a batch of images is converted to a list of images
images = np.random.randint(0, 256, (4, 16, 32, 3))
images_list = make_list_of_images(images)
self.assertEqual(len(images_list), 4)
self.assertTrue(np.array_equal(images_list[0], images[0]))
self.assertIsInstance(images_list, list)
# Test a list of images is not modified
images = [np.random.randint(0, 256, (16, 32, 3)) for _ in range(4)]
images_list = make_list_of_images(images)
self.assertEqual(len(images_list), 4)
self.assertTrue(np.array_equal(images_list[0], images[0]))
self.assertIsInstance(images_list, list)
# Test batched masks with no channel dimension are converted to a list of masks
masks = np.random.randint(0, 2, (4, 16, 32))
masks_list = make_list_of_images(masks, expected_ndims=2)
self.assertEqual(len(masks_list), 4)
self.assertTrue(np.array_equal(masks_list[0], masks[0]))
self.assertIsInstance(masks_list, list)
@require_torch
def test_make_list_of_images_torch(self):
# Test a single image is converted to a list of 1 image
images = torch.randint(0, 256, (16, 32, 3))
images_list = make_list_of_images(images)
self.assertEqual(len(images_list), 1)
self.assertTrue(np.array_equal(images_list[0], images))
self.assertIsInstance(images_list, list)
# Test a batch of images is converted to a list of images
images = torch.randint(0, 256, (4, 16, 32, 3))
images_list = make_list_of_images(images)
self.assertEqual(len(images_list), 4)
self.assertTrue(np.array_equal(images_list[0], images[0]))
self.assertIsInstance(images_list, list)
# Test a list of images is left unchanged
images = [torch.randint(0, 256, (16, 32, 3)) for _ in range(4)]
images_list = make_list_of_images(images)
self.assertEqual(len(images_list), 4)
self.assertTrue(np.array_equal(images_list[0], images[0]))
self.assertIsInstance(images_list, list)
@require_torch
def test_conversion_torch_to_array(self):
feature_extractor = ImageFeatureExtractionMixin()
tensor = torch.randint(0, 256, (16, 32, 3))
array = tensor.numpy()
# By default, rescale (for a tensor of ints) and channel permute
array1 = feature_extractor.to_numpy_array(array)
self.assertTrue(array1.dtype, np.float32)
self.assertEqual(array1.shape, (3, 16, 32))
self.assertTrue(np.array_equal(array1, array.transpose(2, 0, 1).astype(np.float32) * (1 / 255.0)))
# Same with no permute
array2 = feature_extractor.to_numpy_array(array, channel_first=False)
self.assertTrue(array2.dtype, np.float32)
self.assertEqual(array2.shape, (16, 32, 3))
self.assertTrue(np.array_equal(array2, array.astype(np.float32) * (1 / 255.0)))
# Force rescale to False
array3 = feature_extractor.to_numpy_array(array, rescale=False)
self.assertTrue(array3.dtype, np.uint8)
self.assertEqual(array3.shape, (3, 16, 32))
self.assertTrue(np.array_equal(array3, array.transpose(2, 0, 1)))
# Force rescale to False and no channel permute
array4 = feature_extractor.to_numpy_array(array, rescale=False, channel_first=False)
self.assertTrue(array4.dtype, np.uint8)
self.assertEqual(array4.shape, (16, 32, 3))
self.assertTrue(np.array_equal(array4, array))
# Now test the default rescale for a float tensor (defaults to False)
array5 = feature_extractor.to_numpy_array(array2)
self.assertTrue(array5.dtype, np.float32)
self.assertEqual(array5.shape, (3, 16, 32))
self.assertTrue(np.array_equal(array5, array1))
def test_conversion_image_to_image(self):
feature_extractor = ImageFeatureExtractionMixin()
image = get_random_image(16, 32)
# On an image, `to_pil_image1` is a noop.
image1 = feature_extractor.to_pil_image(image)
self.assertTrue(isinstance(image, PIL.Image.Image))
self.assertTrue(np.array_equal(np.array(image), np.array(image1)))
def test_conversion_array_to_image(self):
feature_extractor = ImageFeatureExtractionMixin()
array = np.random.randint(0, 256, (16, 32, 3), dtype=np.uint8)
# By default, no rescale (for an array of ints)
image1 = feature_extractor.to_pil_image(array)
self.assertTrue(isinstance(image1, PIL.Image.Image))
self.assertTrue(np.array_equal(np.array(image1), array))
# If the array is channel-first, proper reordering of the channels is done.
image2 = feature_extractor.to_pil_image(array.transpose(2, 0, 1))
self.assertTrue(isinstance(image2, PIL.Image.Image))
self.assertTrue(np.array_equal(np.array(image2), array))
# If the array has floating type, it's rescaled by default.
image3 = feature_extractor.to_pil_image(array.astype(np.float32) * (1 / 255.0))
self.assertTrue(isinstance(image3, PIL.Image.Image))
self.assertTrue(np.array_equal(np.array(image3), array))
# You can override the default to rescale.
image4 = feature_extractor.to_pil_image(array.astype(np.float32), rescale=False)
self.assertTrue(isinstance(image4, PIL.Image.Image))
self.assertTrue(np.array_equal(np.array(image4), array))
# And with floats + channel first.
image5 = feature_extractor.to_pil_image(array.transpose(2, 0, 1).astype(np.float32) * (1 / 255.0))
self.assertTrue(isinstance(image5, PIL.Image.Image))
self.assertTrue(np.array_equal(np.array(image5), array))
@require_torch
def test_conversion_tensor_to_image(self):
feature_extractor = ImageFeatureExtractionMixin()
tensor = torch.randint(0, 256, (16, 32, 3))
array = tensor.numpy()
# By default, no rescale (for a tensor of ints)
image1 = feature_extractor.to_pil_image(tensor)
self.assertTrue(isinstance(image1, PIL.Image.Image))
self.assertTrue(np.array_equal(np.array(image1), array))
# If the tensor is channel-first, proper reordering of the channels is done.
image2 = feature_extractor.to_pil_image(tensor.permute(2, 0, 1))
self.assertTrue(isinstance(image2, PIL.Image.Image))
self.assertTrue(np.array_equal(np.array(image2), array))
# If the tensor has floating type, it's rescaled by default.
image3 = feature_extractor.to_pil_image(tensor.float() / 255.0)
self.assertTrue(isinstance(image3, PIL.Image.Image))
self.assertTrue(np.array_equal(np.array(image3), array))
# You can override the default to rescale.
image4 = feature_extractor.to_pil_image(tensor.float(), rescale=False)
self.assertTrue(isinstance(image4, PIL.Image.Image))
self.assertTrue(np.array_equal(np.array(image4), array))
# And with floats + channel first.
image5 = feature_extractor.to_pil_image(tensor.permute(2, 0, 1).float() * (1 / 255.0))
self.assertTrue(isinstance(image5, PIL.Image.Image))
self.assertTrue(np.array_equal(np.array(image5), array))
def test_resize_image_and_array(self):
feature_extractor = ImageFeatureExtractionMixin()
image = get_random_image(16, 32)
array = np.array(image)
# Size can be an int or a tuple of ints.
resized_image = feature_extractor.resize(image, 8)
self.assertTrue(isinstance(resized_image, PIL.Image.Image))
self.assertEqual(resized_image.size, (8, 8))
resized_image1 = feature_extractor.resize(image, (8, 16))
self.assertTrue(isinstance(resized_image1, PIL.Image.Image))
self.assertEqual(resized_image1.size, (8, 16))
# Passing an array converts it to a PIL Image.
resized_image2 = feature_extractor.resize(array, 8)
self.assertTrue(isinstance(resized_image2, PIL.Image.Image))
self.assertEqual(resized_image2.size, (8, 8))
self.assertTrue(np.array_equal(np.array(resized_image), np.array(resized_image2)))
resized_image3 = feature_extractor.resize(image, (8, 16))
self.assertTrue(isinstance(resized_image3, PIL.Image.Image))
self.assertEqual(resized_image3.size, (8, 16))
self.assertTrue(np.array_equal(np.array(resized_image1), np.array(resized_image3)))
def test_resize_image_and_array_non_default_to_square(self):
feature_extractor = ImageFeatureExtractionMixin()
heights_widths = [
# height, width
# square image
(28, 28),
(27, 27),
# rectangular image: h < w
(28, 34),
(29, 35),
# rectangular image: h > w
(34, 28),
(35, 29),
]
# single integer or single integer in tuple/list
sizes = [22, 27, 28, 36, [22], (27,)]
for (height, width), size in zip(heights_widths, sizes):
for max_size in (None, 37, 1000):
image = get_random_image(height, width)
array = np.array(image)
size = size[0] if isinstance(size, (list, tuple)) else size
# Size can be an int or a tuple of ints.
# If size is an int, smaller edge of the image will be matched to this number.
# i.e, if height > width, then image will be rescaled to (size * height / width, size).
if height < width:
exp_w, exp_h = (int(size * width / height), size)
if max_size is not None and max_size < exp_w:
exp_w, exp_h = max_size, int(max_size * exp_h / exp_w)
elif width < height:
exp_w, exp_h = (size, int(size * height / width))
if max_size is not None and max_size < exp_h:
exp_w, exp_h = int(max_size * exp_w / exp_h), max_size
else:
exp_w, exp_h = (size, size)
if max_size is not None and max_size < size:
exp_w, exp_h = max_size, max_size
resized_image = feature_extractor.resize(image, size=size, default_to_square=False, max_size=max_size)
self.assertTrue(isinstance(resized_image, PIL.Image.Image))
self.assertEqual(resized_image.size, (exp_w, exp_h))
# Passing an array converts it to a PIL Image.
resized_image2 = feature_extractor.resize(array, size=size, default_to_square=False, max_size=max_size)
self.assertTrue(isinstance(resized_image2, PIL.Image.Image))
self.assertEqual(resized_image2.size, (exp_w, exp_h))
self.assertTrue(np.array_equal(np.array(resized_image), np.array(resized_image2)))
@require_torch
def test_resize_tensor(self):
feature_extractor = ImageFeatureExtractionMixin()
tensor = torch.randint(0, 256, (16, 32, 3))
array = tensor.numpy()
# Size can be an int or a tuple of ints.
resized_image = feature_extractor.resize(tensor, 8)
self.assertTrue(isinstance(resized_image, PIL.Image.Image))
self.assertEqual(resized_image.size, (8, 8))
resized_image1 = feature_extractor.resize(tensor, (8, 16))
self.assertTrue(isinstance(resized_image1, PIL.Image.Image))
self.assertEqual(resized_image1.size, (8, 16))
# Check we get the same results as with NumPy arrays.
resized_image2 = feature_extractor.resize(array, 8)
self.assertTrue(np.array_equal(np.array(resized_image), np.array(resized_image2)))
resized_image3 = feature_extractor.resize(array, (8, 16))
self.assertTrue(np.array_equal(np.array(resized_image1), np.array(resized_image3)))
def test_normalize_image(self):
feature_extractor = ImageFeatureExtractionMixin()
image = get_random_image(16, 32)
array = np.array(image)
mean = [0.1, 0.5, 0.9]
std = [0.2, 0.4, 0.6]
# PIL Image are converted to NumPy arrays for the normalization
normalized_image = feature_extractor.normalize(image, mean, std)
self.assertTrue(isinstance(normalized_image, np.ndarray))
self.assertEqual(normalized_image.shape, (3, 16, 32))
# During the conversion rescale and channel first will be applied.
expected = array.transpose(2, 0, 1).astype(np.float32) * (1 / 255.0)
np_mean = np.array(mean).astype(np.float32)[:, None, None]
np_std = np.array(std).astype(np.float32)[:, None, None]
expected = (expected - np_mean) / np_std
self.assertTrue(np.array_equal(normalized_image, expected))
def test_normalize_array(self):
feature_extractor = ImageFeatureExtractionMixin()
array = np.random.random((16, 32, 3))
mean = [0.1, 0.5, 0.9]
std = [0.2, 0.4, 0.6]
# mean and std can be passed as lists or NumPy arrays.
expected = (array - np.array(mean)) / np.array(std)
normalized_array = feature_extractor.normalize(array, mean, std)
self.assertTrue(np.array_equal(normalized_array, expected))
normalized_array = feature_extractor.normalize(array, np.array(mean), np.array(std))
self.assertTrue(np.array_equal(normalized_array, expected))
# Normalize will detect automatically if channel first or channel last is used.
array = np.random.random((3, 16, 32))
expected = (array - np.array(mean)[:, None, None]) / np.array(std)[:, None, None]
normalized_array = feature_extractor.normalize(array, mean, std)
self.assertTrue(np.array_equal(normalized_array, expected))
normalized_array = feature_extractor.normalize(array, np.array(mean), np.array(std))
self.assertTrue(np.array_equal(normalized_array, expected))
@require_torch
def test_normalize_tensor(self):
feature_extractor = ImageFeatureExtractionMixin()
tensor = torch.rand(16, 32, 3)
mean = [0.1, 0.5, 0.9]
std = [0.2, 0.4, 0.6]
# mean and std can be passed as lists or tensors.
expected = (tensor - torch.tensor(mean)) / torch.tensor(std)
normalized_tensor = feature_extractor.normalize(tensor, mean, std)
self.assertTrue(torch.equal(normalized_tensor, expected))
normalized_tensor = feature_extractor.normalize(tensor, torch.tensor(mean), torch.tensor(std))
self.assertTrue(torch.equal(normalized_tensor, expected))
# Normalize will detect automatically if channel first or channel last is used.
tensor = torch.rand(3, 16, 32)
expected = (tensor - torch.tensor(mean)[:, None, None]) / torch.tensor(std)[:, None, None]
normalized_tensor = feature_extractor.normalize(tensor, mean, std)
self.assertTrue(torch.equal(normalized_tensor, expected))
normalized_tensor = feature_extractor.normalize(tensor, torch.tensor(mean), torch.tensor(std))
self.assertTrue(torch.equal(normalized_tensor, expected))
def test_center_crop_image(self):
feature_extractor = ImageFeatureExtractionMixin()
image = get_random_image(16, 32)
# Test various crop sizes: bigger on all dimensions, on one of the dimensions only and on both dimensions.
crop_sizes = [8, (8, 64), 20, (32, 64)]
for size in crop_sizes:
cropped_image = feature_extractor.center_crop(image, size)
self.assertTrue(isinstance(cropped_image, PIL.Image.Image))
# PIL Image.size is transposed compared to NumPy or PyTorch (width first instead of height first).
expected_size = (size, size) if isinstance(size, int) else (size[1], size[0])
self.assertEqual(cropped_image.size, expected_size)
def test_center_crop_array(self):
feature_extractor = ImageFeatureExtractionMixin()
image = get_random_image(16, 32)
array = feature_extractor.to_numpy_array(image)
# Test various crop sizes: bigger on all dimensions, on one of the dimensions only and on both dimensions.
crop_sizes = [8, (8, 64), 20, (32, 64)]
for size in crop_sizes:
cropped_array = feature_extractor.center_crop(array, size)
self.assertTrue(isinstance(cropped_array, np.ndarray))
expected_size = (size, size) if isinstance(size, int) else size
self.assertEqual(cropped_array.shape[-2:], expected_size)
# Check result is consistent with PIL.Image.crop
cropped_image = feature_extractor.center_crop(image, size)
self.assertTrue(np.array_equal(cropped_array, feature_extractor.to_numpy_array(cropped_image)))
@require_torch
def test_center_crop_tensor(self):
feature_extractor = ImageFeatureExtractionMixin()
image = get_random_image(16, 32)
array = feature_extractor.to_numpy_array(image)
tensor = torch.tensor(array)
# Test various crop sizes: bigger on all dimensions, on one of the dimensions only and on both dimensions.
crop_sizes = [8, (8, 64), 20, (32, 64)]
for size in crop_sizes:
cropped_tensor = feature_extractor.center_crop(tensor, size)
self.assertTrue(isinstance(cropped_tensor, torch.Tensor))
expected_size = (size, size) if isinstance(size, int) else size
self.assertEqual(cropped_tensor.shape[-2:], expected_size)
# Check result is consistent with PIL.Image.crop
cropped_image = feature_extractor.center_crop(image, size)
self.assertTrue(torch.equal(cropped_tensor, torch.tensor(feature_extractor.to_numpy_array(cropped_image))))
@require_vision
class LoadImageTester(unittest.TestCase):
def test_load_img_url(self):
img = load_image(INVOICE_URL)
img_arr = np.array(img)
self.assertEqual(img_arr.shape, (1061, 750, 3))
@is_flaky()
def test_load_img_url_timeout(self):
with self.assertRaises((ReadTimeout, ConnectTimeout)):
load_image(INVOICE_URL, timeout=0.001)
def test_load_img_local(self):
img = load_image("./tests/fixtures/tests_samples/COCO/000000039769.png")
img_arr = np.array(img)
self.assertEqual(
img_arr.shape,
(480, 640, 3),
)
def test_load_img_base64_prefix(self):
try:
tmp_file = tempfile.mktemp()
with open(tmp_file, "wb") as f:
http_get(
"https://huggingface.co/datasets/hf-internal-testing/dummy-base64-images/raw/main/image_0.txt", f
)
with open(tmp_file, encoding="utf-8") as b64:
img = load_image(b64.read())
img_arr = np.array(img)
finally:
os.remove(tmp_file)
self.assertEqual(img_arr.shape, (64, 32, 3))
def test_load_img_base64(self):
try:
tmp_file = tempfile.mktemp()
with open(tmp_file, "wb") as f:
http_get(
"https://huggingface.co/datasets/hf-internal-testing/dummy-base64-images/raw/main/image_1.txt", f
)
with open(tmp_file, encoding="utf-8") as b64:
img = load_image(b64.read())
img_arr = np.array(img)
finally:
os.remove(tmp_file)
self.assertEqual(img_arr.shape, (64, 32, 3))
def test_load_img_rgba(self):
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test")
img = load_image(dataset[0]["file"]) # img with mode RGBA
img_arr = np.array(img)
self.assertEqual(
img_arr.shape,
(512, 512, 3),
)
def test_load_img_la(self):
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test")
img = load_image(dataset[1]["file"]) # img with mode LA
img_arr = np.array(img)
self.assertEqual(
img_arr.shape,
(512, 768, 3),
)
def test_load_img_l(self):
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test")
img = load_image(dataset[2]["file"]) # img with mode L
img_arr = np.array(img)
self.assertEqual(
img_arr.shape,
(381, 225, 3),
)
def test_load_img_exif_transpose(self):
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test")
img_file = dataset[3]["file"]
img_without_exif_transpose = PIL.Image.open(img_file)
img_arr_without_exif_transpose = np.array(img_without_exif_transpose)
self.assertEqual(
img_arr_without_exif_transpose.shape,
(333, 500, 3),
)
img_with_exif_transpose = load_image(img_file)
img_arr_with_exif_transpose = np.array(img_with_exif_transpose)
self.assertEqual(
img_arr_with_exif_transpose.shape,
(500, 333, 3),
)
class UtilFunctionTester(unittest.TestCase):
def test_get_image_size(self):
# Test we can infer the size and channel dimension of an image.
image = np.random.randint(0, 256, (32, 64, 3))
self.assertEqual(get_image_size(image), (32, 64))
image = np.random.randint(0, 256, (3, 32, 64))
self.assertEqual(get_image_size(image), (32, 64))
# Test the channel dimension can be overriden
image = np.random.randint(0, 256, (3, 32, 64))
self.assertEqual(get_image_size(image, channel_dim=ChannelDimension.LAST), (3, 32))
def test_infer_channel_dimension(self):
# Test we fail with invalid input
with pytest.raises(ValueError):
infer_channel_dimension_format(np.random.randint(0, 256, (10, 10)))
with pytest.raises(ValueError):
infer_channel_dimension_format(np.random.randint(0, 256, (10, 10, 10, 10, 10)))
# Test we fail if neither first not last dimension is of size 3 or 1
with pytest.raises(ValueError):
infer_channel_dimension_format(np.random.randint(0, 256, (10, 1, 50)))
# But if we explicitly set one of the number of channels to 50 it works
inferred_dim = infer_channel_dimension_format(np.random.randint(0, 256, (10, 1, 50)), num_channels=50)
self.assertEqual(inferred_dim, ChannelDimension.LAST)
# Test we correctly identify the channel dimension
image = np.random.randint(0, 256, (3, 4, 5))
inferred_dim = infer_channel_dimension_format(image)
self.assertEqual(inferred_dim, ChannelDimension.FIRST)
image = np.random.randint(0, 256, (1, 4, 5))
inferred_dim = infer_channel_dimension_format(image)
self.assertEqual(inferred_dim, ChannelDimension.FIRST)
image = np.random.randint(0, 256, (4, 5, 3))
inferred_dim = infer_channel_dimension_format(image)
self.assertEqual(inferred_dim, ChannelDimension.LAST)
image = np.random.randint(0, 256, (4, 5, 1))
inferred_dim = infer_channel_dimension_format(image)
self.assertEqual(inferred_dim, ChannelDimension.LAST)
# We can take a batched array of images and find the dimension
image = np.random.randint(0, 256, (1, 3, 4, 5))
inferred_dim = infer_channel_dimension_format(image)
self.assertEqual(inferred_dim, ChannelDimension.FIRST)
def test_get_channel_dimension_axis(self):
# Test we correctly identify the channel dimension
image = np.random.randint(0, 256, (3, 4, 5))
inferred_axis = get_channel_dimension_axis(image)
self.assertEqual(inferred_axis, 0)
image = np.random.randint(0, 256, (1, 4, 5))
inferred_axis = get_channel_dimension_axis(image)
self.assertEqual(inferred_axis, 0)
image = np.random.randint(0, 256, (4, 5, 3))
inferred_axis = get_channel_dimension_axis(image)
self.assertEqual(inferred_axis, 2)
image = np.random.randint(0, 256, (4, 5, 1))
inferred_axis = get_channel_dimension_axis(image)
self.assertEqual(inferred_axis, 2)
# We can take a batched array of images and find the dimension
image = np.random.randint(0, 256, (1, 3, 4, 5))
inferred_axis = get_channel_dimension_axis(image)
self.assertEqual(inferred_axis, 1)
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/utils/test_file_utils.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
MODEL_ID = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
REVISION_ID_DEFAULT = "main"
# Default branch name
REVISION_ID_ONE_SPECIFIC_COMMIT = "f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"
# One particular commit (not the top of `main`)
REVISION_ID_INVALID = "aaaaaaa"
# This commit does not exist, so we should 404.
PINNED_SHA1 = "d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"
# Sha-1 of config.json on the top of `main`, for checking purposes
PINNED_SHA256 = "4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"
# Sha-256 of pytorch_model.bin on the top of `main`, for checking purposes
# Dummy contexts to test `ContextManagers`
@contextlib.contextmanager
def context_en():
print("Welcome!")
yield
print("Bye!")
@contextlib.contextmanager
def context_fr():
print("Bonjour!")
yield
print("Au revoir!")
class TestImportMechanisms(unittest.TestCase):
def test_module_spec_available(self):
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("transformers") is not None
class GenericUtilTests(unittest.TestCase):
@unittest.mock.patch("sys.stdout", new_callable=io.StringIO)
def test_context_managers_no_context(self, mock_stdout):
with ContextManagers([]):
print("Transformers are awesome!")
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue(), "Transformers are awesome!\n")
@unittest.mock.patch("sys.stdout", new_callable=io.StringIO)
def test_context_managers_one_context(self, mock_stdout):
with ContextManagers([context_en()]):
print("Transformers are awesome!")
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue(), "Welcome!\nTransformers are awesome!\nBye!\n")
@unittest.mock.patch("sys.stdout", new_callable=io.StringIO)
def test_context_managers_two_context(self, mock_stdout):
with ContextManagers([context_fr(), context_en()]):
print("Transformers are awesome!")
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue(), "Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n")
@require_torch
def test_find_labels_pt(self):
self.assertEqual(find_labels(BertForSequenceClassification), ["labels"])
self.assertEqual(find_labels(BertForPreTraining), ["labels", "next_sentence_label"])
self.assertEqual(find_labels(BertForQuestionAnswering), ["start_positions", "end_positions"])
# find_labels works regardless of the class name (it detects the framework through inheritance)
class DummyModel(BertForSequenceClassification):
pass
self.assertEqual(find_labels(DummyModel), ["labels"])
@require_tf
def test_find_labels_tf(self):
self.assertEqual(find_labels(TFBertForSequenceClassification), ["labels"])
self.assertEqual(find_labels(TFBertForPreTraining), ["labels", "next_sentence_label"])
self.assertEqual(find_labels(TFBertForQuestionAnswering), ["start_positions", "end_positions"])
# find_labels works regardless of the class name (it detects the framework through inheritance)
class DummyModel(TFBertForSequenceClassification):
pass
self.assertEqual(find_labels(DummyModel), ["labels"])
@require_flax
def test_find_labels_flax(self):
# Flax models don't have labels
self.assertEqual(find_labels(FlaxBertForSequenceClassification), [])
self.assertEqual(find_labels(FlaxBertForPreTraining), [])
self.assertEqual(find_labels(FlaxBertForQuestionAnswering), [])
# find_labels works regardless of the class name (it detects the framework through inheritance)
class DummyModel(FlaxBertForSequenceClassification):
pass
self.assertEqual(find_labels(DummyModel), [])
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/utils/test_modeling_tf_core.py | # coding=utf-8
# Copyright 2019 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import copy
import os
import tempfile
from importlib import import_module
from math import isnan
from transformers import is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import _tf_gpu_memory_limit, require_tf, slow
from ..test_modeling_tf_common import ids_tensor
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TFSharedEmbeddings,
)
if _tf_gpu_memory_limit is not None:
gpus = tf.config.list_physical_devices("GPU")
for gpu in gpus:
# Restrict TensorFlow to only allocate x GB of memory on the GPUs
try:
tf.config.set_logical_device_configuration(
gpu, [tf.config.LogicalDeviceConfiguration(memory_limit=_tf_gpu_memory_limit)]
)
logical_gpus = tf.config.list_logical_devices("GPU")
print("Logical GPUs", logical_gpus)
except RuntimeError as e:
# Virtual devices must be set before GPUs have been initialized
print(e)
@require_tf
class TFCoreModelTesterMixin:
model_tester = None
all_model_classes = ()
all_generative_model_classes = ()
test_mismatched_shapes = True
test_resize_embeddings = True
test_head_masking = True
is_encoder_decoder = False
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False) -> dict:
inputs_dict = copy.deepcopy(inputs_dict)
if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING):
inputs_dict = {
k: tf.tile(tf.expand_dims(v, 1), (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1))
if isinstance(v, tf.Tensor) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING):
inputs_dict["labels"] = tf.ones(self.model_tester.batch_size, dtype=tf.int32)
elif model_class in get_values(TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING):
inputs_dict["start_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
inputs_dict["end_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
elif model_class in [
*get_values(TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING),
*get_values(TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING),
]:
inputs_dict["labels"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
elif model_class in get_values(TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING):
inputs_dict["next_sentence_label"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
elif model_class in [
*get_values(TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING),
*get_values(TF_MODEL_FOR_CAUSAL_LM_MAPPING),
*get_values(TF_MODEL_FOR_MASKED_LM_MAPPING),
*get_values(TF_MODEL_FOR_PRETRAINING_MAPPING),
*get_values(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING),
]:
inputs_dict["labels"] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32
)
return inputs_dict
@slow
def test_graph_mode(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:2]:
inputs = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
@tf.function
def run_in_graph_mode():
return model(inputs)
outputs = run_in_graph_mode()
self.assertIsNotNone(outputs)
@slow
def test_xla_mode(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:2]:
inputs = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
@tf.function(experimental_compile=True)
def run_in_graph_mode():
return model(inputs)
outputs = run_in_graph_mode()
self.assertIsNotNone(outputs)
@slow
def test_xla_fit(self):
# This is a copy of the test_keras_fit method, but we use XLA compilation instead of eager
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:2]:
model = model_class(config)
if getattr(model, "hf_compute_loss", None):
# Test that model correctly compute the loss with kwargs
prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
# Is there a better way to remove these decoder inputs?
prepared_for_class = {
key: val
for key, val in prepared_for_class.items()
if key not in ("head_mask", "decoder_head_mask", "cross_attn_head_mask", "decoder_input_ids")
}
possible_label_cols = {
"labels",
"label",
"label_ids",
"start_positions",
"start_position",
"end_positions",
"end_position",
"next_sentence_label",
}
label_names = possible_label_cols.intersection(set(prepared_for_class))
self.assertGreater(len(label_names), 0, msg="No matching label names found!")
labels = {key: val for key, val in prepared_for_class.items() if key in label_names}
inputs_minus_labels = {key: val for key, val in prepared_for_class.items() if key not in label_names}
self.assertGreater(len(inputs_minus_labels), 0)
# Make sure it works with XLA!
model.compile(optimizer=tf.keras.optimizers.SGD(0.0), jit_compile=True)
# Make sure the model fits without crashing regardless of where we pass the labels
history = model.fit(
prepared_for_class,
validation_data=prepared_for_class,
steps_per_epoch=1,
validation_steps=1,
shuffle=False,
verbose=0,
)
loss = history.history["loss"][0]
self.assertTrue(not isnan(loss))
val_loss = history.history["val_loss"][0]
self.assertTrue(not isnan(val_loss))
# Now test it with separate labels, to make sure that path works in XLA too.
model = model_class(config)
model.compile(optimizer=tf.keras.optimizers.SGD(0.0), jit_compile=True)
history = model.fit(
inputs_minus_labels,
labels,
validation_data=(inputs_minus_labels, labels),
steps_per_epoch=1,
validation_steps=1,
shuffle=False,
verbose=0,
)
loss = history.history["loss"][0]
self.assertTrue(not isnan(loss))
val_loss = history.history["val_loss"][0]
self.assertTrue(not isnan(val_loss))
@slow
def test_saved_model_creation_extended(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
if hasattr(config, "use_cache"):
config.use_cache = True
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
for model_class in self.all_model_classes[:2]:
class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
model.build()
num_out = len(model(class_inputs_dict))
for key in list(class_inputs_dict.keys()):
# Remove keys not in the serving signature, as the SavedModel will not be compiled to deal with them
if key not in model.input_signature:
del class_inputs_dict[key]
# Check it's a tensor, in case the inputs dict has some bools in it too
elif isinstance(class_inputs_dict[key], tf.Tensor) and class_inputs_dict[key].dtype.is_integer:
class_inputs_dict[key] = tf.cast(class_inputs_dict[key], tf.int32)
if set(class_inputs_dict.keys()) != set(model.input_signature.keys()):
continue # Some models have inputs that the preparation functions don't create, we skip those
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, saved_model=True)
saved_model_dir = os.path.join(tmpdirname, "saved_model", "1")
model = tf.keras.models.load_model(saved_model_dir)
outputs = model(class_inputs_dict)
if self.is_encoder_decoder:
output_hidden_states = outputs["encoder_hidden_states"]
output_attentions = outputs["encoder_attentions"]
else:
output_hidden_states = outputs["hidden_states"]
output_attentions = outputs["attentions"]
self.assertEqual(len(outputs), num_out)
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(output_hidden_states), expected_num_layers)
self.assertListEqual(
list(output_hidden_states[0].shape[-2:]),
[self.model_tester.seq_length, self.model_tester.hidden_size],
)
self.assertEqual(len(output_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(output_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
@slow
def test_mixed_precision(self):
tf.keras.mixed_precision.set_global_policy("mixed_float16")
# try/finally block to ensure subsequent tests run in float32
try:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:2]:
class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
outputs = model(class_inputs_dict)
self.assertIsNotNone(outputs)
finally:
tf.keras.mixed_precision.set_global_policy("float32")
@slow
def test_train_pipeline_custom_model(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# head_mask and decoder_head_mask has different shapes than other input args
if "head_mask" in inputs_dict:
del inputs_dict["head_mask"]
if "decoder_head_mask" in inputs_dict:
del inputs_dict["decoder_head_mask"]
if "cross_attn_head_mask" in inputs_dict:
del inputs_dict["cross_attn_head_mask"]
tf_main_layer_classes = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__),)
for module_member_name in dir(module)
if module_member_name.endswith("MainLayer")
for module_member in (getattr(module, module_member_name),)
if isinstance(module_member, type)
and tf.keras.layers.Layer in module_member.__bases__
and getattr(module_member, "_keras_serializable", False)
}
for main_layer_class in tf_main_layer_classes:
# T5MainLayer needs an embed_tokens parameter when called without the inputs_embeds parameter
if "T5" in main_layer_class.__name__:
# Take the same values than in TFT5ModelTester for this shared layer
shared = TFSharedEmbeddings(self.model_tester.vocab_size, self.model_tester.hidden_size, name="shared")
config.use_cache = False
main_layer = main_layer_class(config, embed_tokens=shared)
else:
main_layer = main_layer_class(config)
symbolic_inputs = {
name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype) for name, tensor in inputs_dict.items()
}
if hasattr(self.model_tester, "num_labels"):
num_labels = self.model_tester.num_labels
else:
num_labels = 2
X = tf.data.Dataset.from_tensor_slices(
(inputs_dict, np.ones((self.model_tester.batch_size, self.model_tester.seq_length, num_labels, 1)))
).batch(1)
hidden_states = main_layer(symbolic_inputs)[0]
outputs = tf.keras.layers.Dense(num_labels, activation="softmax", name="outputs")(hidden_states)
model = tf.keras.models.Model(inputs=symbolic_inputs, outputs=[outputs])
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["binary_accuracy"])
model.fit(X, epochs=1)
with tempfile.TemporaryDirectory() as tmpdirname:
filepath = os.path.join(tmpdirname, "keras_model.h5")
model.save(filepath)
if "T5" in main_layer_class.__name__:
model = tf.keras.models.load_model(
filepath,
custom_objects={
main_layer_class.__name__: main_layer_class,
"TFSharedEmbeddings": TFSharedEmbeddings,
},
)
else:
model = tf.keras.models.load_model(
filepath, custom_objects={main_layer_class.__name__: main_layer_class}
)
assert isinstance(model, tf.keras.Model)
model(inputs_dict)
@slow
def test_graph_mode_with_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:2]:
model = model_class(config)
inputs = copy.deepcopy(inputs_dict)
if not self.is_encoder_decoder:
input_ids = inputs["input_ids"]
del inputs["input_ids"]
else:
encoder_input_ids = inputs["input_ids"]
decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
del inputs["input_ids"]
inputs.pop("decoder_input_ids", None)
if not self.is_encoder_decoder:
inputs["inputs_embeds"] = model.get_input_embeddings()(input_ids)
else:
inputs["inputs_embeds"] = model.get_input_embeddings()(encoder_input_ids)
inputs["decoder_inputs_embeds"] = model.get_input_embeddings()(decoder_input_ids)
inputs = self._prepare_for_class(inputs, model_class)
@tf.function
def run_in_graph_mode():
return model(inputs)
outputs = run_in_graph_mode()
self.assertIsNotNone(outputs)
def _generate_random_bad_tokens(self, num_bad_tokens, model):
# special tokens cannot be bad tokens
special_tokens = []
if model.config.bos_token_id is not None:
special_tokens.append(model.config.bos_token_id)
if model.config.pad_token_id is not None:
special_tokens.append(model.config.pad_token_id)
if model.config.eos_token_id is not None:
special_tokens.append(model.config.eos_token_id)
# create random bad tokens that are not special tokens
bad_tokens = []
while len(bad_tokens) < num_bad_tokens:
token = tf.squeeze(ids_tensor((1, 1), self.model_tester.vocab_size), 0).numpy()[0]
if token not in special_tokens:
bad_tokens.append(token)
return bad_tokens
def _check_generated_ids(self, output_ids):
for token_id in output_ids[0].numpy().tolist():
self.assertGreaterEqual(token_id, 0)
self.assertLess(token_id, self.model_tester.vocab_size)
def _check_match_tokens(self, generated_ids, bad_words_ids):
# for all bad word tokens
for bad_word_ids in bad_words_ids:
# for all slices in batch
for generated_ids_slice in generated_ids:
# for all word idx
for i in range(len(bad_word_ids), len(generated_ids_slice)):
# if tokens match
if generated_ids_slice[i - len(bad_word_ids) : i] == bad_word_ids:
return True
return False
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/utils/test_generic.py | # coding=utf-8
# Copyright 2019-present, the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class GenericTester(unittest.TestCase):
def test_flatten_dict(self):
input_dict = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6},
}
}
expected_dict = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 128,
"task_specific_params.summarization.min_length": 12,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 142,
"task_specific_params.summarization_cnn.min_length": 56,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 62,
"task_specific_params.summarization_xsum.min_length": 11,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(input_dict), expected_dict)
def test_transpose_numpy(self):
x = np.random.randn(3, 4)
self.assertTrue(np.allclose(transpose(x), x.transpose()))
x = np.random.randn(3, 4, 5)
self.assertTrue(np.allclose(transpose(x, axes=(1, 2, 0)), x.transpose((1, 2, 0))))
@require_torch
def test_transpose_torch(self):
x = np.random.randn(3, 4)
t = torch.tensor(x)
self.assertTrue(np.allclose(transpose(x), transpose(t).numpy()))
x = np.random.randn(3, 4, 5)
t = torch.tensor(x)
self.assertTrue(np.allclose(transpose(x, axes=(1, 2, 0)), transpose(t, axes=(1, 2, 0)).numpy()))
@require_tf
def test_transpose_tf(self):
x = np.random.randn(3, 4)
t = tf.constant(x)
self.assertTrue(np.allclose(transpose(x), transpose(t).numpy()))
x = np.random.randn(3, 4, 5)
t = tf.constant(x)
self.assertTrue(np.allclose(transpose(x, axes=(1, 2, 0)), transpose(t, axes=(1, 2, 0)).numpy()))
@require_flax
def test_transpose_flax(self):
x = np.random.randn(3, 4)
t = jnp.array(x)
self.assertTrue(np.allclose(transpose(x), np.asarray(transpose(t))))
x = np.random.randn(3, 4, 5)
t = jnp.array(x)
self.assertTrue(np.allclose(transpose(x, axes=(1, 2, 0)), np.asarray(transpose(t, axes=(1, 2, 0)))))
def test_reshape_numpy(self):
x = np.random.randn(3, 4)
self.assertTrue(np.allclose(reshape(x, (4, 3)), np.reshape(x, (4, 3))))
x = np.random.randn(3, 4, 5)
self.assertTrue(np.allclose(reshape(x, (12, 5)), np.reshape(x, (12, 5))))
@require_torch
def test_reshape_torch(self):
x = np.random.randn(3, 4)
t = torch.tensor(x)
self.assertTrue(np.allclose(reshape(x, (4, 3)), reshape(t, (4, 3)).numpy()))
x = np.random.randn(3, 4, 5)
t = torch.tensor(x)
self.assertTrue(np.allclose(reshape(x, (12, 5)), reshape(t, (12, 5)).numpy()))
@require_tf
def test_reshape_tf(self):
x = np.random.randn(3, 4)
t = tf.constant(x)
self.assertTrue(np.allclose(reshape(x, (4, 3)), reshape(t, (4, 3)).numpy()))
x = np.random.randn(3, 4, 5)
t = tf.constant(x)
self.assertTrue(np.allclose(reshape(x, (12, 5)), reshape(t, (12, 5)).numpy()))
@require_flax
def test_reshape_flax(self):
x = np.random.randn(3, 4)
t = jnp.array(x)
self.assertTrue(np.allclose(reshape(x, (4, 3)), np.asarray(reshape(t, (4, 3)))))
x = np.random.randn(3, 4, 5)
t = jnp.array(x)
self.assertTrue(np.allclose(reshape(x, (12, 5)), np.asarray(reshape(t, (12, 5)))))
def test_squeeze_numpy(self):
x = np.random.randn(1, 3, 4)
self.assertTrue(np.allclose(squeeze(x), np.squeeze(x)))
x = np.random.randn(1, 4, 1, 5)
self.assertTrue(np.allclose(squeeze(x, axis=2), np.squeeze(x, axis=2)))
@require_torch
def test_squeeze_torch(self):
x = np.random.randn(1, 3, 4)
t = torch.tensor(x)
self.assertTrue(np.allclose(squeeze(x), squeeze(t).numpy()))
x = np.random.randn(1, 4, 1, 5)
t = torch.tensor(x)
self.assertTrue(np.allclose(squeeze(x, axis=2), squeeze(t, axis=2).numpy()))
@require_tf
def test_squeeze_tf(self):
x = np.random.randn(1, 3, 4)
t = tf.constant(x)
self.assertTrue(np.allclose(squeeze(x), squeeze(t).numpy()))
x = np.random.randn(1, 4, 1, 5)
t = tf.constant(x)
self.assertTrue(np.allclose(squeeze(x, axis=2), squeeze(t, axis=2).numpy()))
@require_flax
def test_squeeze_flax(self):
x = np.random.randn(1, 3, 4)
t = jnp.array(x)
self.assertTrue(np.allclose(squeeze(x), np.asarray(squeeze(t))))
x = np.random.randn(1, 4, 1, 5)
t = jnp.array(x)
self.assertTrue(np.allclose(squeeze(x, axis=2), np.asarray(squeeze(t, axis=2))))
def test_expand_dims_numpy(self):
x = np.random.randn(3, 4)
self.assertTrue(np.allclose(expand_dims(x, axis=1), np.expand_dims(x, axis=1)))
@require_torch
def test_expand_dims_torch(self):
x = np.random.randn(3, 4)
t = torch.tensor(x)
self.assertTrue(np.allclose(expand_dims(x, axis=1), expand_dims(t, axis=1).numpy()))
@require_tf
def test_expand_dims_tf(self):
x = np.random.randn(3, 4)
t = tf.constant(x)
self.assertTrue(np.allclose(expand_dims(x, axis=1), expand_dims(t, axis=1).numpy()))
@require_flax
def test_expand_dims_flax(self):
x = np.random.randn(3, 4)
t = jnp.array(x)
self.assertTrue(np.allclose(expand_dims(x, axis=1), np.asarray(expand_dims(t, axis=1))))
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/utils/test_hf_argparser.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
is_python_no_less_than_3_10 = sys.version_info >= (3, 10)
def list_field(default=None, metadata=None):
return field(default_factory=lambda: default, metadata=metadata)
@dataclass
class BasicExample:
foo: int
bar: float
baz: str
flag: bool
@dataclass
class WithDefaultExample:
foo: int = 42
baz: str = field(default="toto", metadata={"help": "help message"})
@dataclass
class WithDefaultBoolExample:
foo: bool = False
baz: bool = True
opt: Optional[bool] = None
class BasicEnum(Enum):
titi = "titi"
toto = "toto"
class MixedTypeEnum(Enum):
titi = "titi"
toto = "toto"
fourtytwo = 42
@dataclass
class EnumExample:
foo: BasicEnum = "toto"
def __post_init__(self):
self.foo = BasicEnum(self.foo)
@dataclass
class MixedTypeEnumExample:
foo: MixedTypeEnum = "toto"
def __post_init__(self):
self.foo = MixedTypeEnum(self.foo)
@dataclass
class OptionalExample:
foo: Optional[int] = None
bar: Optional[float] = field(default=None, metadata={"help": "help message"})
baz: Optional[str] = None
ces: Optional[List[str]] = list_field(default=[])
des: Optional[List[int]] = list_field(default=[])
@dataclass
class ListExample:
foo_int: List[int] = list_field(default=[])
bar_int: List[int] = list_field(default=[1, 2, 3])
foo_str: List[str] = list_field(default=["Hallo", "Bonjour", "Hello"])
foo_float: List[float] = list_field(default=[0.1, 0.2, 0.3])
@dataclass
class RequiredExample:
required_list: List[int] = field()
required_str: str = field()
required_enum: BasicEnum = field()
def __post_init__(self):
self.required_enum = BasicEnum(self.required_enum)
@dataclass
class StringLiteralAnnotationExample:
foo: int
required_enum: "BasicEnum" = field()
opt: "Optional[bool]" = None
baz: "str" = field(default="toto", metadata={"help": "help message"})
foo_str: "List[str]" = list_field(default=["Hallo", "Bonjour", "Hello"])
if is_python_no_less_than_3_10:
@dataclass
class WithDefaultBoolExamplePep604:
foo: bool = False
baz: bool = True
opt: bool | None = None
@dataclass
class OptionalExamplePep604:
foo: int | None = None
bar: float | None = field(default=None, metadata={"help": "help message"})
baz: str | None = None
ces: list[str] | None = list_field(default=[])
des: list[int] | None = list_field(default=[])
class HfArgumentParserTest(unittest.TestCase):
def argparsersEqual(self, a: argparse.ArgumentParser, b: argparse.ArgumentParser):
"""
Small helper to check pseudo-equality of parsed arguments on `ArgumentParser` instances.
"""
self.assertEqual(len(a._actions), len(b._actions))
for x, y in zip(a._actions, b._actions):
xx = {k: v for k, v in vars(x).items() if k != "container"}
yy = {k: v for k, v in vars(y).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices", None) and yy.get("choices", None):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](expected_choice), yy["type"](expected_choice))
del xx["type"], yy["type"]
self.assertEqual(xx, yy)
def test_basic(self):
parser = HfArgumentParser(BasicExample)
expected = argparse.ArgumentParser()
expected.add_argument("--foo", type=int, required=True)
expected.add_argument("--bar", type=float, required=True)
expected.add_argument("--baz", type=str, required=True)
expected.add_argument("--flag", type=string_to_bool, default=False, const=True, nargs="?")
self.argparsersEqual(parser, expected)
args = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
(example,) = parser.parse_args_into_dataclasses(args, look_for_args_file=False)
self.assertFalse(example.flag)
def test_with_default(self):
parser = HfArgumentParser(WithDefaultExample)
expected = argparse.ArgumentParser()
expected.add_argument("--foo", default=42, type=int)
expected.add_argument("--baz", default="toto", type=str, help="help message")
self.argparsersEqual(parser, expected)
def test_with_default_bool(self):
expected = argparse.ArgumentParser()
expected.add_argument("--foo", type=string_to_bool, default=False, const=True, nargs="?")
expected.add_argument("--baz", type=string_to_bool, default=True, const=True, nargs="?")
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz", action="store_false", default=False, dest="baz")
expected.add_argument("--opt", type=string_to_bool, default=None)
dataclass_types = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(WithDefaultBoolExamplePep604)
for dataclass_type in dataclass_types:
parser = HfArgumentParser(dataclass_type)
self.argparsersEqual(parser, expected)
args = parser.parse_args([])
self.assertEqual(args, Namespace(foo=False, baz=True, opt=None))
args = parser.parse_args(["--foo", "--no_baz"])
self.assertEqual(args, Namespace(foo=True, baz=False, opt=None))
args = parser.parse_args(["--foo", "--baz"])
self.assertEqual(args, Namespace(foo=True, baz=True, opt=None))
args = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"])
self.assertEqual(args, Namespace(foo=True, baz=True, opt=True))
args = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"])
self.assertEqual(args, Namespace(foo=False, baz=False, opt=False))
def test_with_enum(self):
parser = HfArgumentParser(MixedTypeEnumExample)
expected = argparse.ArgumentParser()
expected.add_argument(
"--foo",
default="toto",
choices=["titi", "toto", 42],
type=make_choice_type_function(["titi", "toto", 42]),
)
self.argparsersEqual(parser, expected)
args = parser.parse_args([])
self.assertEqual(args.foo, "toto")
enum_ex = parser.parse_args_into_dataclasses([])[0]
self.assertEqual(enum_ex.foo, MixedTypeEnum.toto)
args = parser.parse_args(["--foo", "titi"])
self.assertEqual(args.foo, "titi")
enum_ex = parser.parse_args_into_dataclasses(["--foo", "titi"])[0]
self.assertEqual(enum_ex.foo, MixedTypeEnum.titi)
args = parser.parse_args(["--foo", "42"])
self.assertEqual(args.foo, 42)
enum_ex = parser.parse_args_into_dataclasses(["--foo", "42"])[0]
self.assertEqual(enum_ex.foo, MixedTypeEnum.fourtytwo)
def test_with_literal(self):
@dataclass
class LiteralExample:
foo: Literal["titi", "toto", 42] = "toto"
parser = HfArgumentParser(LiteralExample)
expected = argparse.ArgumentParser()
expected.add_argument(
"--foo",
default="toto",
choices=("titi", "toto", 42),
type=make_choice_type_function(["titi", "toto", 42]),
)
self.argparsersEqual(parser, expected)
args = parser.parse_args([])
self.assertEqual(args.foo, "toto")
args = parser.parse_args(["--foo", "titi"])
self.assertEqual(args.foo, "titi")
args = parser.parse_args(["--foo", "42"])
self.assertEqual(args.foo, 42)
def test_with_list(self):
parser = HfArgumentParser(ListExample)
expected = argparse.ArgumentParser()
expected.add_argument("--foo_int", nargs="+", default=[], type=int)
expected.add_argument("--bar_int", nargs="+", default=[1, 2, 3], type=int)
expected.add_argument("--foo_str", nargs="+", default=["Hallo", "Bonjour", "Hello"], type=str)
expected.add_argument("--foo_float", nargs="+", default=[0.1, 0.2, 0.3], type=float)
self.argparsersEqual(parser, expected)
args = parser.parse_args([])
self.assertEqual(
args,
Namespace(foo_int=[], bar_int=[1, 2, 3], foo_str=["Hallo", "Bonjour", "Hello"], foo_float=[0.1, 0.2, 0.3]),
)
args = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split())
self.assertEqual(args, Namespace(foo_int=[1], bar_int=[2, 3], foo_str=["a", "b", "c"], foo_float=[0.1, 0.7]))
def test_with_optional(self):
expected = argparse.ArgumentParser()
expected.add_argument("--foo", default=None, type=int)
expected.add_argument("--bar", default=None, type=float, help="help message")
expected.add_argument("--baz", default=None, type=str)
expected.add_argument("--ces", nargs="+", default=[], type=str)
expected.add_argument("--des", nargs="+", default=[], type=int)
dataclass_types = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(OptionalExamplePep604)
for dataclass_type in dataclass_types:
parser = HfArgumentParser(dataclass_type)
self.argparsersEqual(parser, expected)
args = parser.parse_args([])
self.assertEqual(args, Namespace(foo=None, bar=None, baz=None, ces=[], des=[]))
args = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split())
self.assertEqual(args, Namespace(foo=12, bar=3.14, baz="42", ces=["a", "b", "c"], des=[1, 2, 3]))
def test_with_required(self):
parser = HfArgumentParser(RequiredExample)
expected = argparse.ArgumentParser()
expected.add_argument("--required_list", nargs="+", type=int, required=True)
expected.add_argument("--required_str", type=str, required=True)
expected.add_argument(
"--required_enum",
type=make_choice_type_function(["titi", "toto"]),
choices=["titi", "toto"],
required=True,
)
self.argparsersEqual(parser, expected)
def test_with_string_literal_annotation(self):
parser = HfArgumentParser(StringLiteralAnnotationExample)
expected = argparse.ArgumentParser()
expected.add_argument("--foo", type=int, required=True)
expected.add_argument(
"--required_enum",
type=make_choice_type_function(["titi", "toto"]),
choices=["titi", "toto"],
required=True,
)
expected.add_argument("--opt", type=string_to_bool, default=None)
expected.add_argument("--baz", default="toto", type=str, help="help message")
expected.add_argument("--foo_str", nargs="+", default=["Hallo", "Bonjour", "Hello"], type=str)
self.argparsersEqual(parser, expected)
def test_parse_dict(self):
parser = HfArgumentParser(BasicExample)
args_dict = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
parsed_args = parser.parse_dict(args_dict)[0]
args = BasicExample(**args_dict)
self.assertEqual(parsed_args, args)
def test_parse_dict_extra_key(self):
parser = HfArgumentParser(BasicExample)
args_dict = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 42,
}
self.assertRaises(ValueError, parser.parse_dict, args_dict, allow_extra_keys=False)
def test_parse_json(self):
parser = HfArgumentParser(BasicExample)
args_dict_for_json = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
temp_local_path = os.path.join(tmp_dir, "temp_json")
os.mkdir(temp_local_path)
with open(temp_local_path + ".json", "w+") as f:
json.dump(args_dict_for_json, f)
parsed_args = parser.parse_yaml_file(Path(temp_local_path + ".json"))[0]
args = BasicExample(**args_dict_for_json)
self.assertEqual(parsed_args, args)
def test_parse_yaml(self):
parser = HfArgumentParser(BasicExample)
args_dict_for_yaml = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
temp_local_path = os.path.join(tmp_dir, "temp_yaml")
os.mkdir(temp_local_path)
with open(temp_local_path + ".yaml", "w+") as f:
yaml.dump(args_dict_for_yaml, f)
parsed_args = parser.parse_yaml_file(Path(temp_local_path + ".yaml"))[0]
args = BasicExample(**args_dict_for_yaml)
self.assertEqual(parsed_args, args)
def test_integration_training_args(self):
parser = HfArgumentParser(TrainingArguments)
self.assertIsNotNone(parser)
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/utils/tiny_model_summary.json | {
"ASTForAudioClassification": {
"tokenizer_classes": [],
"processor_classes": [
"ASTFeatureExtractor"
],
"model_classes": [
"ASTForAudioClassification"
],
"sha": "83d6e076db7768a3645401bad3204624985e1d08"
},
"ASTModel": {
"tokenizer_classes": [],
"processor_classes": [
"ASTFeatureExtractor"
],
"model_classes": [
"ASTModel"
],
"sha": "75e68f956f6f2c0709b01e596e7a6aecb1b29dce"
},
"AlbertForMaskedLM": {
"tokenizer_classes": [
"AlbertTokenizer",
"AlbertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"AlbertForMaskedLM",
"TFAlbertForMaskedLM"
],
"sha": "d29de71ac29e1019c3a7762f7357f750730cb037"
},
"AlbertForMultipleChoice": {
"tokenizer_classes": [
"AlbertTokenizer",
"AlbertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"AlbertForMultipleChoice",
"TFAlbertForMultipleChoice"
],
"sha": "242aecce6a589a2964c0f695621fa22a83751579"
},
"AlbertForPreTraining": {
"tokenizer_classes": [
"AlbertTokenizer",
"AlbertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"AlbertForPreTraining",
"TFAlbertForPreTraining"
],
"sha": "41330be4b271687f4d88ddc96346c12aa11de983"
},
"AlbertForQuestionAnswering": {
"tokenizer_classes": [
"AlbertTokenizer",
"AlbertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"AlbertForQuestionAnswering",
"TFAlbertForQuestionAnswering"
],
"sha": "040b81c15f437f4722349dc5b41fccd17ebd7fdc"
},
"AlbertForSequenceClassification": {
"tokenizer_classes": [
"AlbertTokenizer",
"AlbertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"AlbertForSequenceClassification",
"TFAlbertForSequenceClassification"
],
"sha": "39c1a0e2c1c2623106d3211d751e9b32f23a91a0"
},
"AlbertForTokenClassification": {
"tokenizer_classes": [
"AlbertTokenizer",
"AlbertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"AlbertForTokenClassification",
"TFAlbertForTokenClassification"
],
"sha": "359c3f4a311a4053a6f6d6a880db5f82c8e3ff1f"
},
"AlbertModel": {
"tokenizer_classes": [
"AlbertTokenizer",
"AlbertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"AlbertModel",
"TFAlbertModel"
],
"sha": "34a63314686b64aaeb595ddb95006f1ff2ffda17"
},
"AlignModel": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [
"EfficientNetImageProcessor"
],
"model_classes": [
"AlignModel"
],
"sha": "68a4f9d3f493f44efa7c1dde6fcca23350e2c92b"
},
"AltCLIPModel": {
"tokenizer_classes": [
"XLMRobertaTokenizerFast"
],
"processor_classes": [
"CLIPImageProcessor"
],
"model_classes": [
"AltCLIPModel"
],
"sha": "3106af0fd503970717c05f27218e5cacf19ba872"
},
"BarkModel": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"BarkModel"
],
"sha": "187e590fd87359cea47693e8cb11a604cd7b673c"
},
"BartForCausalLM": {
"tokenizer_classes": [
"BartTokenizer",
"BartTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"BartForCausalLM"
],
"sha": "c25526ac67d2dbe79fe5462af4b7908ca2fbc3ff"
},
"BartForConditionalGeneration": {
"tokenizer_classes": [
"BartTokenizer",
"BartTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"BartForConditionalGeneration",
"TFBartForConditionalGeneration"
],
"sha": "3a489a21e4b04705f4a6047924b7616a67be7e37"
},
"BartForQuestionAnswering": {
"tokenizer_classes": [
"BartTokenizer",
"BartTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"BartForQuestionAnswering"
],
"sha": "3ebf9aab39a57ceab55128d5fc6f61e4db0dadd4"
},
"BartForSequenceClassification": {
"tokenizer_classes": [
"BartTokenizer",
"BartTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"BartForSequenceClassification",
"TFBartForSequenceClassification"
],
"sha": "ea452fd9a928cfebd71723afa50feb20326917bc"
},
"BartModel": {
"tokenizer_classes": [
"BartTokenizer",
"BartTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"BartModel",
"TFBartModel"
],
"sha": "e5df6d1aa75f03833b2df328b9c35463f73a421b"
},
"BeitForImageClassification": {
"tokenizer_classes": [],
"processor_classes": [
"BeitImageProcessor"
],
"model_classes": [
"BeitForImageClassification"
],
"sha": "e997587bb890f82faad4bd25eb23d85ba21ecaaa"
},
"BeitForSemanticSegmentation": {
"tokenizer_classes": [],
"processor_classes": [
"BeitImageProcessor"
],
"model_classes": [
"BeitForSemanticSegmentation"
],
"sha": "d4afa9e21e3fe5b087578ed68974d9b3ffc1fb22"
},
"BeitModel": {
"tokenizer_classes": [],
"processor_classes": [
"BeitImageProcessor"
],
"model_classes": [
"BeitModel"
],
"sha": "5c4a051f0cca6f64d02c6168deb88413cae10d2c"
},
"BertForMaskedLM": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"BertForMaskedLM",
"TFBertForMaskedLM"
],
"sha": "3e32baa52ce044c75edfb5c28abd51ee8d051282"
},
"BertForMultipleChoice": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"BertForMultipleChoice",
"TFBertForMultipleChoice"
],
"sha": "0b8c3a6d411d1e19e5fd98d4d8631ae7616eeeaa"
},
"BertForNextSentencePrediction": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"BertForNextSentencePrediction",
"TFBertForNextSentencePrediction"
],
"sha": "628e70debf8864bd0b63aff7901d17d9c4f7612c"
},
"BertForPreTraining": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"BertForPreTraining",
"TFBertForPreTraining"
],
"sha": "c748ad37e6a200a6f64b2764191bfe13f976032f"
},
"BertForQuestionAnswering": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"BertForQuestionAnswering",
"TFBertForQuestionAnswering"
],
"sha": "4671ad0c21493b97c5eb2f0201192704c29876d5"
},
"BertForSequenceClassification": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"BertForSequenceClassification",
"TFBertForSequenceClassification"
],
"sha": "37a9d44022264c12bdf3ec257778f953b63d4aaf"
},
"BertForTokenClassification": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"BertForTokenClassification",
"TFBertForTokenClassification"
],
"sha": "d7dc3a0793ff6dfcb794b21130ee0f185d2c61a2"
},
"BertLMHeadModel": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"BertLMHeadModel",
"TFBertLMHeadModel"
],
"sha": "b4e3acc1990f3e365ffddbd54b620a26d9fb4b09"
},
"BertModel": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"BertModel",
"TFBertModel"
],
"sha": "3956d303d3cddf0708ff20660c1ea5f6ec30e434"
},
"BigBirdForCausalLM": {
"tokenizer_classes": [
"BigBirdTokenizer",
"BigBirdTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"BigBirdForCausalLM"
],
"sha": "5c7a487af5248d9c01b45d5481b7d7bb9b36e1b5"
},
"BigBirdForMaskedLM": {
"tokenizer_classes": [
"BigBirdTokenizer",
"BigBirdTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"BigBirdForMaskedLM"
],
"sha": "476ef8225c0f69270b577706ad4f1dda13e4dde5"
},
"BigBirdForMultipleChoice": {
"tokenizer_classes": [
"BigBirdTokenizer",
"BigBirdTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"BigBirdForMultipleChoice"
],
"sha": "cf93eaa1019987112c171a407745bc183a20513a"
},
"BigBirdForPreTraining": {
"tokenizer_classes": [
"BigBirdTokenizer",
"BigBirdTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"BigBirdForPreTraining"
],
"sha": "5fb9efa13334431e7c186a9fa314b89c4a1eee72"
},
"BigBirdForQuestionAnswering": {
"tokenizer_classes": [
"BigBirdTokenizer",
"BigBirdTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"BigBirdForQuestionAnswering"
],
"sha": "f82f88bd71fba819a8ffb0692915d3529e705417"
},
"BigBirdForSequenceClassification": {
"tokenizer_classes": [
"BigBirdTokenizer",
"BigBirdTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"BigBirdForSequenceClassification"
],
"sha": "ea398090858f9af93b54fc9a8d65cfed78ac27ff"
},
"BigBirdForTokenClassification": {
"tokenizer_classes": [
"BigBirdTokenizer",
"BigBirdTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"BigBirdForTokenClassification"
],
"sha": "2cdea118999fa58ba9fb0162d99e2ffa146c3df1"
},
"BigBirdModel": {
"tokenizer_classes": [
"BigBirdTokenizer",
"BigBirdTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"BigBirdModel"
],
"sha": "9c55989f31df156194e6997606fb14d9897e0300"
},
"BigBirdPegasusForCausalLM": {
"tokenizer_classes": [
"PegasusTokenizer",
"PegasusTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"BigBirdPegasusForCausalLM"
],
"sha": "49bc8816c666dee32e27cd8e00136b604eb85243"
},
"BigBirdPegasusForConditionalGeneration": {
"tokenizer_classes": [
"PegasusTokenizer",
"PegasusTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"BigBirdPegasusForConditionalGeneration"
],
"sha": "e791aa6d1af5a76ca0926d95b1f28bd2d8adf376"
},
"BigBirdPegasusForQuestionAnswering": {
"tokenizer_classes": [
"PegasusTokenizer",
"PegasusTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"BigBirdPegasusForQuestionAnswering"
],
"sha": "7650e076713ca707a37062adc8c9c1cd60dad7c7"
},
"BigBirdPegasusForSequenceClassification": {
"tokenizer_classes": [
"PegasusTokenizer",
"PegasusTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"BigBirdPegasusForSequenceClassification"
],
"sha": "02500e8ebd9c53528750013fb963fbdc2be34034"
},
"BigBirdPegasusModel": {
"tokenizer_classes": [
"PegasusTokenizer",
"PegasusTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"BigBirdPegasusModel"
],
"sha": "b07c5304dfba673cf8b9cf5cd1aa45fbfea1c2f3"
},
"BioGptForCausalLM": {
"tokenizer_classes": [
"BioGptTokenizer"
],
"processor_classes": [],
"model_classes": [
"BioGptForCausalLM"
],
"sha": "07073b31da84054fd12226e3cae4cb3beb2547f9"
},
"BioGptForSequenceClassification": {
"tokenizer_classes": [
"BioGptTokenizer"
],
"processor_classes": [],
"model_classes": [
"BioGptForSequenceClassification"
],
"sha": "8e18ad6218abd795e050dec324a8c827ccedacb4"
},
"BioGptForTokenClassification": {
"tokenizer_classes": [
"BioGptTokenizer"
],
"processor_classes": [],
"model_classes": [
"BioGptForTokenClassification"
],
"sha": "67f8173c1a17273064d452a9031a51b67f327b6a"
},
"BioGptModel": {
"tokenizer_classes": [
"BioGptTokenizer"
],
"processor_classes": [],
"model_classes": [
"BioGptModel"
],
"sha": "fe18551d0743538a990520b75707294ec57b4ebe"
},
"BitBackbone": {
"tokenizer_classes": [],
"processor_classes": [
"BitImageProcessor"
],
"model_classes": [
"BitBackbone"
],
"sha": "2f06f6b4395b6dce2b00ac839ff757410e743cd7"
},
"BitForImageClassification": {
"tokenizer_classes": [],
"processor_classes": [
"BitImageProcessor"
],
"model_classes": [
"BitForImageClassification"
],
"sha": "d0d8476f2d285ddda7c42c0d4a8e4bf6f5d2bfdf"
},
"BitModel": {
"tokenizer_classes": [],
"processor_classes": [
"BitImageProcessor"
],
"model_classes": [
"BitModel"
],
"sha": "30a8a9b1a6b253cc500c01cf41bc1fc9581ea5e5"
},
"BlenderbotForCausalLM": {
"tokenizer_classes": [
"BlenderbotTokenizer",
"BlenderbotTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"BlenderbotForCausalLM"
],
"sha": "8aad2e13e8920bca3cf988ba45f8a7b008b51a81"
},
"BlenderbotForConditionalGeneration": {
"tokenizer_classes": [
"BlenderbotTokenizer",
"BlenderbotTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"BlenderbotForConditionalGeneration",
"TFBlenderbotForConditionalGeneration"
],
"sha": "e8532878b9924fa02fb4b059b7f6e7fa372fff91"
},
"BlenderbotModel": {
"tokenizer_classes": [
"BlenderbotTokenizer",
"BlenderbotTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"BlenderbotModel",
"TFBlenderbotModel"
],
"sha": "ff848a40c30ca98eb7c6870bbb02677d5af9db55"
},
"BlenderbotSmallForCausalLM": {
"tokenizer_classes": [
"BlenderbotSmallTokenizer"
],
"processor_classes": [],
"model_classes": [
"BlenderbotSmallForCausalLM"
],
"sha": "4c57c106630932eb9de4d76210a540d04616304d"
},
"BlenderbotSmallForConditionalGeneration": {
"tokenizer_classes": [
"BlenderbotSmallTokenizer"
],
"processor_classes": [],
"model_classes": [
"BlenderbotSmallForConditionalGeneration",
"TFBlenderbotSmallForConditionalGeneration"
],
"sha": "b8db01fcf3e37a5b369cd50e169bf383b8e905d8"
},
"BlenderbotSmallModel": {
"tokenizer_classes": [
"BlenderbotSmallTokenizer"
],
"processor_classes": [],
"model_classes": [
"BlenderbotSmallModel",
"TFBlenderbotSmallModel"
],
"sha": "0a10c70e225ec63278faffa8fabf759f063f0e55"
},
"Blip2ForConditionalGeneration": {
"tokenizer_classes": [
"GPT2Tokenizer",
"GPT2TokenizerFast"
],
"processor_classes": [
"BlipImageProcessor"
],
"model_classes": [
"Blip2ForConditionalGeneration"
],
"sha": "35e1ef43da3554af62eb29a7b3dbbef3f3bef48e"
},
"Blip2Model": {
"tokenizer_classes": [
"GPT2Tokenizer",
"GPT2TokenizerFast"
],
"processor_classes": [
"BlipImageProcessor"
],
"model_classes": [
"Blip2Model"
],
"sha": "c23378f225be31872fff33c103cf0ebc2454ffcc"
},
"BlipForConditionalGeneration": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [
"BlipImageProcessor"
],
"model_classes": [
"BlipForConditionalGeneration",
"TFBlipForConditionalGeneration"
],
"sha": "eaf32bc0369349deef0c777442fc185119171d1f"
},
"BlipModel": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [
"BlipImageProcessor"
],
"model_classes": [
"BlipModel",
"TFBlipModel"
],
"sha": "3d1d1c15eff22d6b2664a2d15757fa6f5d93827d"
},
"BloomForCausalLM": {
"tokenizer_classes": [
"BloomTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"BloomForCausalLM"
],
"sha": "0f4f06f162cd67d34d03ee156484e4001d468500"
},
"BloomForQuestionAnswering": {
"tokenizer_classes": [
"BloomTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"BloomForQuestionAnswering"
],
"sha": "23f369f163eef8c9c9685900440b0cbb0f3439fd"
},
"BloomForSequenceClassification": {
"tokenizer_classes": [
"BloomTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"BloomForSequenceClassification"
],
"sha": "b2280eef7172835f39b265eb0c46623257f67bbe"
},
"BloomForTokenClassification": {
"tokenizer_classes": [
"BloomTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"BloomForTokenClassification"
],
"sha": "9796aa45f99adff987c978089e11c0bd9d7b997f"
},
"BloomModel": {
"tokenizer_classes": [
"BloomTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"BloomModel"
],
"sha": "28b600fcfdc4f4938406fb518abf895620048cb2"
},
"BrosForTokenClassification": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"BrosForTokenClassification"
],
"sha": "4ec2c91936f96b93667e8946fc7abbdeeb08a6d7"
},
"BrosModel": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"BrosModel"
],
"sha": "e2464830b1874eeaf9f4b425fbe0ce8e7c7643e9"
},
"CLIPModel": {
"tokenizer_classes": [
"CLIPTokenizer",
"CLIPTokenizerFast"
],
"processor_classes": [
"CLIPImageProcessor"
],
"model_classes": [
"CLIPModel",
"TFCLIPModel"
],
"sha": "0452d344074485d0e7eb5d5c12447b7c9dbc9619"
},
"CLIPSegModel": {
"tokenizer_classes": [
"CLIPTokenizer",
"CLIPTokenizerFast"
],
"processor_classes": [
"ViTImageProcessor"
],
"model_classes": [
"CLIPSegModel"
],
"sha": "7b1305214ccc85d29b776ffbee06748693852a04"
},
"CTRLForSequenceClassification": {
"tokenizer_classes": [
"CTRLTokenizer"
],
"processor_classes": [],
"model_classes": [
"CTRLForSequenceClassification",
"TFCTRLForSequenceClassification"
],
"sha": "280b5a3502d607c55c9f8d9f198fe9c2802d6f73"
},
"CTRLLMHeadModel": {
"tokenizer_classes": [
"CTRLTokenizer"
],
"processor_classes": [],
"model_classes": [
"CTRLLMHeadModel",
"TFCTRLLMHeadModel"
],
"sha": "662381663b216f1dd3c9cd30e2e83cb4c6fc9552"
},
"CTRLModel": {
"tokenizer_classes": [
"CTRLTokenizer"
],
"processor_classes": [],
"model_classes": [
"CTRLModel",
"TFCTRLModel"
],
"sha": "68b19b4f132d5a191a73acd78d983cbdcf068e9c"
},
"CanineForMultipleChoice": {
"tokenizer_classes": [
"CanineTokenizer"
],
"processor_classes": [],
"model_classes": [
"CanineForMultipleChoice"
],
"sha": "fa0451453ed202f903ff7dcf6071aab6630fb89f"
},
"CanineForQuestionAnswering": {
"tokenizer_classes": [
"CanineTokenizer"
],
"processor_classes": [],
"model_classes": [
"CanineForQuestionAnswering"
],
"sha": "5e1012bb086ac2e0b1497eeb7ed14eb2183d4ecb"
},
"CanineForSequenceClassification": {
"tokenizer_classes": [
"CanineTokenizer"
],
"processor_classes": [],
"model_classes": [
"CanineForSequenceClassification"
],
"sha": "75336dc9179153869c38a8047ce4b1e02677a260"
},
"CanineForTokenClassification": {
"tokenizer_classes": [
"CanineTokenizer"
],
"processor_classes": [],
"model_classes": [
"CanineForTokenClassification"
],
"sha": "65a622ea8e12597e12f45e59d46d8dbe8461fc10"
},
"CanineModel": {
"tokenizer_classes": [
"CanineTokenizer"
],
"processor_classes": [],
"model_classes": [
"CanineModel"
],
"sha": "531ef67ad4f0b3dc7a9e5d722c774096b7401b1b"
},
"ChineseCLIPModel": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [
"ChineseCLIPImageProcessor"
],
"model_classes": [
"ChineseCLIPModel"
],
"sha": "504271a3c5fd9c2e877f5b4c01848bc18778c7c3"
},
"ClapModel": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [
"ClapFeatureExtractor"
],
"model_classes": [
"ClapModel"
],
"sha": "a7874595b900f9b2ddc79130dafc3ff48f4fbfb9"
},
"ClvpModelForConditionalGeneration": {
"tokenizer_classes": [
"ClvpTokenizer"
],
"processor_classes": [
"ClvpFeatureExtractor"
],
"model_classes": [],
"sha": "45df7581535be337ff781707b6c20994ca221f05"
},
"CodeGenForCausalLM": {
"tokenizer_classes": [
"CodeGenTokenizer",
"CodeGenTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"CodeGenForCausalLM"
],
"sha": "a3fc69d757fd1f0aa01bcbc4337f586651c7cb10"
},
"CodeGenModel": {
"tokenizer_classes": [
"CodeGenTokenizer",
"CodeGenTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"CodeGenModel"
],
"sha": "dad4941a2b7429fc6e8206fcc4a04fc40f4a0beb"
},
"ConditionalDetrForObjectDetection": {
"tokenizer_classes": [],
"processor_classes": [
"ConditionalDetrImageProcessor"
],
"model_classes": [
"ConditionalDetrForObjectDetection"
],
"sha": "762c213a0285edc84eb813a2ed90063cf971ca43"
},
"ConditionalDetrModel": {
"tokenizer_classes": [],
"processor_classes": [
"ConditionalDetrImageProcessor"
],
"model_classes": [
"ConditionalDetrModel"
],
"sha": "18b75874158cac520c63605293b06e0b1327c263"
},
"ConvBertForMaskedLM": {
"tokenizer_classes": [
"ConvBertTokenizer",
"ConvBertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"ConvBertForMaskedLM",
"TFConvBertForMaskedLM"
],
"sha": "307c70e32c3d3c18aeb45e0cbdc9fcd2957d9aba"
},
"ConvBertForMultipleChoice": {
"tokenizer_classes": [
"ConvBertTokenizer",
"ConvBertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"ConvBertForMultipleChoice",
"TFConvBertForMultipleChoice"
],
"sha": "d6561a21ffdb82d03c1822af0510eb7482ce5026"
},
"ConvBertForQuestionAnswering": {
"tokenizer_classes": [
"ConvBertTokenizer",
"ConvBertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"ConvBertForQuestionAnswering",
"TFConvBertForQuestionAnswering"
],
"sha": "8a056da5cc421415c2a24b9f644dd95ca279411d"
},
"ConvBertForSequenceClassification": {
"tokenizer_classes": [
"ConvBertTokenizer",
"ConvBertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"ConvBertForSequenceClassification",
"TFConvBertForSequenceClassification"
],
"sha": "8bb8b20e51d282d777cc567cacadd97a35f0811e"
},
"ConvBertForTokenClassification": {
"tokenizer_classes": [
"ConvBertTokenizer",
"ConvBertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"ConvBertForTokenClassification",
"TFConvBertForTokenClassification"
],
"sha": "8db0dd3c2b8ccc958fa9a84801f4f837b42fcf2c"
},
"ConvBertModel": {
"tokenizer_classes": [
"ConvBertTokenizer",
"ConvBertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"ConvBertModel",
"TFConvBertModel"
],
"sha": "c9c5b1a74f0e468d8467473cabeaa67fcdbaddb7"
},
"ConvNextBackbone": {
"tokenizer_classes": [],
"processor_classes": [
"ConvNextImageProcessor"
],
"model_classes": [
"ConvNextBackbone"
],
"sha": "499c7d6a97825b79e19663b70f3b60c4813b6bf2"
},
"ConvNextForImageClassification": {
"tokenizer_classes": [],
"processor_classes": [
"ConvNextImageProcessor"
],
"model_classes": [
"ConvNextForImageClassification",
"TFConvNextForImageClassification"
],
"sha": "0b490fd6b19cdbf721025dbd6ee45dcc5828e6e3"
},
"ConvNextModel": {
"tokenizer_classes": [],
"processor_classes": [
"ConvNextImageProcessor"
],
"model_classes": [
"ConvNextModel",
"TFConvNextModel"
],
"sha": "7b3b47a57b9a9120e022b91d6067daeac55b794f"
},
"ConvNextV2Backbone": {
"tokenizer_classes": [],
"processor_classes": [
"ConvNextImageProcessor"
],
"model_classes": [
"ConvNextV2Backbone"
],
"sha": "c82fc526949dfd892a1fee3c34be6f8d80c4d3df"
},
"ConvNextV2ForImageClassification": {
"tokenizer_classes": [],
"processor_classes": [
"ConvNextImageProcessor"
],
"model_classes": [
"ConvNextV2ForImageClassification",
"TFConvNextV2ForImageClassification"
],
"sha": "ee22bae1cbb87d66fc7f62f7e15a43d6ff80d3cc"
},
"ConvNextV2Model": {
"tokenizer_classes": [],
"processor_classes": [
"ConvNextImageProcessor"
],
"model_classes": [
"ConvNextV2Model",
"TFConvNextV2Model"
],
"sha": "c4dd68ee1102cba05bcc483da2a88e39427b7249"
},
"CvtForImageClassification": {
"tokenizer_classes": [],
"processor_classes": [
"ConvNextImageProcessor"
],
"model_classes": [
"CvtForImageClassification",
"TFCvtForImageClassification"
],
"sha": "4b1938e252fdb26a06c1f5755e07fa8f6eed2d75"
},
"CvtModel": {
"tokenizer_classes": [],
"processor_classes": [
"ConvNextImageProcessor"
],
"model_classes": [
"CvtModel",
"TFCvtModel"
],
"sha": "27fed12c174f4f4f1fe27075d1c29602fe0669f0"
},
"DPRQuestionEncoder": {
"tokenizer_classes": [
"DPRQuestionEncoderTokenizer",
"DPRQuestionEncoderTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"DPRQuestionEncoder",
"TFDPRQuestionEncoder"
],
"sha": "09ae0269780271e0a4916f7bab1dbc4f8a76070d"
},
"DPTForDepthEstimation": {
"tokenizer_classes": [],
"processor_classes": [
"DPTImageProcessor"
],
"model_classes": [
"DPTForDepthEstimation"
],
"sha": "11b7735d64d95b6599811631b012d2dec6eaa2c1"
},
"DPTForSemanticSegmentation": {
"tokenizer_classes": [],
"processor_classes": [
"DPTImageProcessor"
],
"model_classes": [
"DPTForSemanticSegmentation"
],
"sha": "e140c3c716a4bf11dad875e5f5f0abd2bd4cbbcb"
},
"DPTModel": {
"tokenizer_classes": [],
"processor_classes": [
"DPTImageProcessor"
],
"model_classes": [
"DPTModel"
],
"sha": "1d6ae6c0b60868dffbef0dddeda381c51c6dcba5"
},
"Data2VecAudioForAudioFrameClassification": {
"tokenizer_classes": [],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"Data2VecAudioForAudioFrameClassification"
],
"sha": "a64828b27e73fc8dd95aeb315108ca2f6a66b55f"
},
"Data2VecAudioForCTC": {
"tokenizer_classes": [],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"Data2VecAudioForCTC"
],
"sha": "bb161b6a181bd2c22cf30222f46fa6ef42225744"
},
"Data2VecAudioForSequenceClassification": {
"tokenizer_classes": [],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"Data2VecAudioForSequenceClassification"
],
"sha": "8de17e0a959eca5f72b2ea59a11bc1fa744785d9"
},
"Data2VecAudioForXVector": {
"tokenizer_classes": [],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"Data2VecAudioForXVector"
],
"sha": "dcb92484cf28fb4fe1dcf5d6e8d78e04382fdce9"
},
"Data2VecAudioModel": {
"tokenizer_classes": [],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"Data2VecAudioModel"
],
"sha": "73f503fdff73b7616154f64dbe38a685cc48e8eb"
},
"Data2VecTextForCausalLM": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"Data2VecTextForCausalLM"
],
"sha": "1f3658ce623653338cd31516551e8181aa08bb38"
},
"Data2VecTextForMaskedLM": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"Data2VecTextForMaskedLM"
],
"sha": "fb41ac30d0faa0899bf5afaa0986df8993395ca6"
},
"Data2VecTextForMultipleChoice": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"Data2VecTextForMultipleChoice"
],
"sha": "e7556d520ad90ebae5ad88554d45a37488d00040"
},
"Data2VecTextForQuestionAnswering": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"Data2VecTextForQuestionAnswering"
],
"sha": "9630833d76a1fd7e96b904d87bb11b7c00ccd021"
},
"Data2VecTextForSequenceClassification": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"Data2VecTextForSequenceClassification"
],
"sha": "156e4019c37d9592f193ba80553cd245cbccecb3"
},
"Data2VecTextForTokenClassification": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"Data2VecTextForTokenClassification"
],
"sha": "55b3a49fdbf22479d6eb939261d4b884ea288270"
},
"Data2VecTextModel": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"Data2VecTextModel"
],
"sha": "c21be3e4f88e8357bf33bfba8f8e05ae2e735124"
},
"Data2VecVisionForImageClassification": {
"tokenizer_classes": [],
"processor_classes": [
"BeitImageProcessor"
],
"model_classes": [
"Data2VecVisionForImageClassification",
"TFData2VecVisionForImageClassification"
],
"sha": "d640e7ced7a3fbbb8c8661a4f67b934e55406172"
},
"Data2VecVisionForSemanticSegmentation": {
"tokenizer_classes": [],
"processor_classes": [
"BeitImageProcessor"
],
"model_classes": [
"Data2VecVisionForSemanticSegmentation",
"TFData2VecVisionForSemanticSegmentation"
],
"sha": "3eba3cd694fab6530b7e5da8f49d3951301c816a"
},
"Data2VecVisionModel": {
"tokenizer_classes": [],
"processor_classes": [
"BeitImageProcessor"
],
"model_classes": [
"Data2VecVisionModel",
"TFData2VecVisionModel"
],
"sha": "2a7ad25e4359970dc70494a2f3eb98e2a3c9806d"
},
"DebertaForMaskedLM": {
"tokenizer_classes": [
"DebertaTokenizer",
"DebertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"DebertaForMaskedLM",
"TFDebertaForMaskedLM"
],
"sha": "e0f9ada9e0f6d4d7cc39d7cbd58369b0c84de33d"
},
"DebertaForQuestionAnswering": {
"tokenizer_classes": [
"DebertaTokenizer",
"DebertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"DebertaForQuestionAnswering",
"TFDebertaForQuestionAnswering"
],
"sha": "a3eb69cdb0b52f7d0fb730e882f1a54b9a7442ea"
},
"DebertaForSequenceClassification": {
"tokenizer_classes": [
"DebertaTokenizer",
"DebertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"DebertaForSequenceClassification",
"TFDebertaForSequenceClassification"
],
"sha": "32af91d12c4e9b6d62b420bee93311fd77d3c933"
},
"DebertaForTokenClassification": {
"tokenizer_classes": [
"DebertaTokenizer",
"DebertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"DebertaForTokenClassification",
"TFDebertaForTokenClassification"
],
"sha": "ba62ba2726d813e60e512476fc1b178aa3858175"
},
"DebertaModel": {
"tokenizer_classes": [
"DebertaTokenizer",
"DebertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"DebertaModel",
"TFDebertaModel"
],
"sha": "4273294e14cd04c0e2cd1dcff5cf7e5d4fe906ba"
},
"DebertaV2ForMaskedLM": {
"tokenizer_classes": [
"DebertaV2Tokenizer",
"DebertaV2TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"DebertaV2ForMaskedLM",
"TFDebertaV2ForMaskedLM"
],
"sha": "a053dedc2cdf32918a84277cb0c05186604496a5"
},
"DebertaV2ForMultipleChoice": {
"tokenizer_classes": [
"DebertaV2Tokenizer",
"DebertaV2TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"DebertaV2ForMultipleChoice",
"TFDebertaV2ForMultipleChoice"
],
"sha": "07e39f520ce239b39ef8cb24cd7874d06c791063"
},
"DebertaV2ForQuestionAnswering": {
"tokenizer_classes": [
"DebertaV2Tokenizer",
"DebertaV2TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"DebertaV2ForQuestionAnswering",
"TFDebertaV2ForQuestionAnswering"
],
"sha": "9cecb3a7fc6b95099122283644ea1f8ced287d1b"
},
"DebertaV2ForSequenceClassification": {
"tokenizer_classes": [
"DebertaV2Tokenizer",
"DebertaV2TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"DebertaV2ForSequenceClassification",
"TFDebertaV2ForSequenceClassification"
],
"sha": "df9ea1f5c0f2ccd139b21cfb3963a5a5ebfb5b81"
},
"DebertaV2ForTokenClassification": {
"tokenizer_classes": [
"DebertaV2Tokenizer",
"DebertaV2TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"DebertaV2ForTokenClassification",
"TFDebertaV2ForTokenClassification"
],
"sha": "51fe01989df38a540ac1abca5ee71a51365defd5"
},
"DebertaV2Model": {
"tokenizer_classes": [
"DebertaV2Tokenizer",
"DebertaV2TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"DebertaV2Model",
"TFDebertaV2Model"
],
"sha": "211df4bd1a4a9b66c97af3f9231a5d2af8de7b9f"
},
"DeformableDetrForObjectDetection": {
"tokenizer_classes": [],
"processor_classes": [
"DeformableDetrImageProcessor"
],
"model_classes": [
"DeformableDetrForObjectDetection"
],
"sha": "8fa0db215c458f60ae4d455d6fb067c1c5e39fdc"
},
"DeformableDetrModel": {
"tokenizer_classes": [],
"processor_classes": [
"DeformableDetrImageProcessor"
],
"model_classes": [
"DeformableDetrModel"
],
"sha": "0faac5624696b03edd14694642f9804f2cd8f3da"
},
"DeiTForImageClassification": {
"tokenizer_classes": [],
"processor_classes": [
"DeiTImageProcessor"
],
"model_classes": [
"DeiTForImageClassification",
"TFDeiTForImageClassification"
],
"sha": "21fc864199dafa0130f16a45769c6b6ca22c7784"
},
"DeiTForImageClassificationWithTeacher": {
"tokenizer_classes": [],
"processor_classes": [
"DeiTImageProcessor"
],
"model_classes": [
"DeiTForImageClassificationWithTeacher",
"TFDeiTForImageClassificationWithTeacher"
],
"sha": "5a5738a109e27f3d4b78a0db4cb1d3331140c10e"
},
"DeiTForMaskedImageModeling": {
"tokenizer_classes": [],
"processor_classes": [
"DeiTImageProcessor"
],
"model_classes": [
"DeiTForMaskedImageModeling",
"TFDeiTForMaskedImageModeling"
],
"sha": "d5df5c538fe1efb8d668a3893d1691d505a0de06"
},
"DeiTModel": {
"tokenizer_classes": [],
"processor_classes": [
"DeiTImageProcessor"
],
"model_classes": [
"DeiTModel",
"TFDeiTModel"
],
"sha": "0fdbff6f44b7c6933c2027fec1d7f87bec06b590"
},
"DetaForObjectDetection": {
"tokenizer_classes": [],
"processor_classes": [
"DetaImageProcessor"
],
"model_classes": [
"DetaForObjectDetection"
],
"sha": "a15ad6ce64fbcb5021b2b99e9587c4011ef3341d"
},
"DetaModel": {
"tokenizer_classes": [],
"processor_classes": [
"DetaImageProcessor"
],
"model_classes": [
"DetaModel"
],
"sha": "8820f2297ec0dec8f1875054559c8b7a162098e3"
},
"DetrForObjectDetection": {
"tokenizer_classes": [],
"processor_classes": [
"DetrImageProcessor"
],
"model_classes": [
"DetrForObjectDetection"
],
"sha": "7dc967c53f4b3f07904c42b255346b744d0ad84e"
},
"DetrForSegmentation": {
"tokenizer_classes": [],
"processor_classes": [
"DetrImageProcessor"
],
"model_classes": [
"DetrForSegmentation"
],
"sha": "e34330acdae359588ef853e961a78d419dc4e8eb"
},
"DetrModel": {
"tokenizer_classes": [],
"processor_classes": [
"DetrImageProcessor"
],
"model_classes": [
"DetrModel"
],
"sha": "f15ce38a10c7447e8048b1681e4811322a005722"
},
"DinatBackbone": {
"tokenizer_classes": [],
"processor_classes": [
"ViTImageProcessor"
],
"model_classes": [
"DinatBackbone"
],
"sha": "3ba13790a0796d90104c207f75bb3d5d79723d51"
},
"DinatForImageClassification": {
"tokenizer_classes": [],
"processor_classes": [
"ViTImageProcessor"
],
"model_classes": [
"DinatForImageClassification"
],
"sha": "624cf2d864a7ea2f90e24014a213e34597e8bd76"
},
"DinatModel": {
"tokenizer_classes": [],
"processor_classes": [
"ViTImageProcessor"
],
"model_classes": [
"DinatModel"
],
"sha": "d6c75bc51196f0a683afb12de6310fdda13efefd"
},
"Dinov2Backbone": {
"tokenizer_classes": [],
"processor_classes": [
"BitImageProcessor"
],
"model_classes": [
"Dinov2Backbone"
],
"sha": "dbf8d2ff3092ac53c11e6525e6cbae7ace84769a"
},
"Dinov2ForImageClassification": {
"tokenizer_classes": [],
"processor_classes": [
"BitImageProcessor"
],
"model_classes": [
"Dinov2ForImageClassification"
],
"sha": "ae44840966456aae33641df2c8c8a4af5b457b24"
},
"Dinov2Model": {
"tokenizer_classes": [],
"processor_classes": [
"BitImageProcessor"
],
"model_classes": [
"Dinov2Model"
],
"sha": "6f560b1cc9806bcf84fe0b0c60b5faf9c29be959"
},
"DistilBertForMaskedLM": {
"tokenizer_classes": [
"DistilBertTokenizer",
"DistilBertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"DistilBertForMaskedLM",
"TFDistilBertForMaskedLM"
],
"sha": "b2dfda30b012821996e6e603729562d9c900bc0f"
},
"DistilBertForMultipleChoice": {
"tokenizer_classes": [
"DistilBertTokenizer",
"DistilBertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"DistilBertForMultipleChoice",
"TFDistilBertForMultipleChoice"
],
"sha": "ec6b83129a7d1be2a6b8d58303abcca5541a5cb3"
},
"DistilBertForQuestionAnswering": {
"tokenizer_classes": [
"DistilBertTokenizer",
"DistilBertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"DistilBertForQuestionAnswering",
"TFDistilBertForQuestionAnswering"
],
"sha": "812406b226415044469b0e0a84c4fe0ff338c5d3"
},
"DistilBertForSequenceClassification": {
"tokenizer_classes": [
"DistilBertTokenizer",
"DistilBertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"DistilBertForSequenceClassification",
"TFDistilBertForSequenceClassification"
],
"sha": "6f427ce7b3e5aaa596938fbd98437d3875581b7b"
},
"DistilBertForTokenClassification": {
"tokenizer_classes": [
"DistilBertTokenizer",
"DistilBertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"DistilBertForTokenClassification",
"TFDistilBertForTokenClassification"
],
"sha": "166dbe3f5d6ecd871762567069454d6ec65234b4"
},
"DistilBertModel": {
"tokenizer_classes": [
"DistilBertTokenizer",
"DistilBertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"DistilBertModel",
"TFDistilBertModel"
],
"sha": "cc4425ad0676f3ec00e8bffe485fe83cae61041a"
},
"DonutSwinModel": {
"tokenizer_classes": [],
"processor_classes": [
"DonutImageProcessor"
],
"model_classes": [
"DonutSwinModel"
],
"sha": "1b10654fbfe2f2ea410a672ab605bd5c60d3f284"
},
"EfficientFormerForImageClassification": {
"tokenizer_classes": [],
"processor_classes": [
"EfficientFormerImageProcessor"
],
"model_classes": [
"EfficientFormerForImageClassification",
"TFEfficientFormerForImageClassification"
],
"sha": "ebadb628e12f268e321fcc756fa4606f7b5b3178"
},
"EfficientFormerForImageClassificationWithTeacher": {
"tokenizer_classes": [],
"processor_classes": [
"EfficientFormerImageProcessor"
],
"model_classes": [
"EfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerForImageClassificationWithTeacher"
],
"sha": "1beabce6da9cb4ebbeafcd1ef23fac36b4a269e2"
},
"EfficientFormerModel": {
"tokenizer_classes": [],
"processor_classes": [
"EfficientFormerImageProcessor"
],
"model_classes": [
"EfficientFormerModel",
"TFEfficientFormerModel"
],
"sha": "200fae5b875844d09c8a91d1c155b72b06a517f6"
},
"EfficientNetForImageClassification": {
"tokenizer_classes": [],
"processor_classes": [
"EfficientNetImageProcessor"
],
"model_classes": [
"EfficientNetForImageClassification"
],
"sha": "6ed195ee636d2c0b885139da8c7b45d57ebaeee0"
},
"EfficientNetModel": {
"tokenizer_classes": [],
"processor_classes": [
"EfficientNetImageProcessor"
],
"model_classes": [
"EfficientNetModel"
],
"sha": "eb03c90d4aaad98af0f19e0dfbdc41106297ffff"
},
"ElectraForCausalLM": {
"tokenizer_classes": [
"ElectraTokenizer",
"ElectraTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"ElectraForCausalLM"
],
"sha": "c78396bc8cdd8db247892339de8da80d691d1d04"
},
"ElectraForMaskedLM": {
"tokenizer_classes": [
"ElectraTokenizer",
"ElectraTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"ElectraForMaskedLM",
"TFElectraForMaskedLM"
],
"sha": "631337703dbd8d41904c39891a41c6f1edd31813"
},
"ElectraForMultipleChoice": {
"tokenizer_classes": [
"ElectraTokenizer",
"ElectraTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"ElectraForMultipleChoice",
"TFElectraForMultipleChoice"
],
"sha": "66fdea6e22cfcbd3caa49ea82f31871c460612fa"
},
"ElectraForPreTraining": {
"tokenizer_classes": [
"ElectraTokenizer",
"ElectraTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"ElectraForPreTraining",
"TFElectraForPreTraining"
],
"sha": "7b2d0fa8726b1180c7d6cde4f4afc3800eba7e6f"
},
"ElectraForQuestionAnswering": {
"tokenizer_classes": [
"ElectraTokenizer",
"ElectraTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"ElectraForQuestionAnswering",
"TFElectraForQuestionAnswering"
],
"sha": "c6b127fd9f3019462e4ca2373762836207e39ce2"
},
"ElectraForSequenceClassification": {
"tokenizer_classes": [
"ElectraTokenizer",
"ElectraTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"ElectraForSequenceClassification",
"TFElectraForSequenceClassification"
],
"sha": "41f0089ab7876abe0e28dbbd565144acb31f8127"
},
"ElectraForTokenClassification": {
"tokenizer_classes": [
"ElectraTokenizer",
"ElectraTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"ElectraForTokenClassification",
"TFElectraForTokenClassification"
],
"sha": "1fdbbe70c1ddd16503820a1443d6a379a15ed777"
},
"ElectraModel": {
"tokenizer_classes": [
"ElectraTokenizer",
"ElectraTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"ElectraModel",
"TFElectraModel"
],
"sha": "312b532cbef26610d80f2bd008650160cae4f7a1"
},
"EncodecModel": {
"tokenizer_classes": [],
"processor_classes": [
"EncodecFeatureExtractor"
],
"model_classes": [
"EncodecModel"
],
"sha": "e14c5a2fd6529c85cd4ac5a05ee9e550ced6a006"
},
"EncoderDecoderModel": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"EncoderDecoderModel",
"TFEncoderDecoderModel"
],
"sha": "1038be9fd1b87b2e0a8f33721ff8e4612d34b3b6"
},
"ErnieForCausalLM": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"ErnieForCausalLM"
],
"sha": "b49e00112ff06c2f0a0e54499921dddcf8c3c6a8"
},
"ErnieForMaskedLM": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"ErnieForMaskedLM"
],
"sha": "30429830d1997222d885dcfdbd36d5e02d0d34b1"
},
"ErnieForMultipleChoice": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"ErnieForMultipleChoice"
],
"sha": "5a21144bf35dfb60560ff8249116ad4459c0069a"
},
"ErnieForNextSentencePrediction": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"ErnieForNextSentencePrediction"
],
"sha": "ed5868efb39bf6afb29f0cf444deafcf1e50b5bc"
},
"ErnieForPreTraining": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"ErnieForPreTraining"
],
"sha": "e4ad30d291c310fea25e6f91f91393f993513b42"
},
"ErnieForQuestionAnswering": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"ErnieForQuestionAnswering"
],
"sha": "fe7c74b763f63a9fd864dad325385075df7c80c8"
},
"ErnieForSequenceClassification": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"ErnieForSequenceClassification"
],
"sha": "84e0be05fcd52f54e96a69f67a2481323a58a9db"
},
"ErnieForTokenClassification": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"ErnieForTokenClassification"
],
"sha": "91cf62c43a5a83332552ffa2d8e5e44d63a224ea"
},
"ErnieMForMultipleChoice": {
"tokenizer_classes": [
"ErnieMTokenizer"
],
"processor_classes": [],
"model_classes": [
"ErnieMForMultipleChoice"
],
"sha": "c42ee7fcb132a323ace314c32e63c8a7d36ce18f"
},
"ErnieMForQuestionAnswering": {
"tokenizer_classes": [
"ErnieMTokenizer"
],
"processor_classes": [],
"model_classes": [
"ErnieMForQuestionAnswering"
],
"sha": "2b90dee75ca87b214f96db00002aa18244ec8e84"
},
"ErnieMForSequenceClassification": {
"tokenizer_classes": [
"ErnieMTokenizer"
],
"processor_classes": [],
"model_classes": [
"ErnieMForSequenceClassification"
],
"sha": "d8368646d8b1c67b1460af9c6ec13fd9d894cae6"
},
"ErnieMForTokenClassification": {
"tokenizer_classes": [
"ErnieMTokenizer"
],
"processor_classes": [],
"model_classes": [
"ErnieMForTokenClassification"
],
"sha": "a9e29ba60fa0b7bedc2ed26a6b9911427df1ca6b"
},
"ErnieMModel": {
"tokenizer_classes": [
"ErnieMTokenizer"
],
"processor_classes": [],
"model_classes": [
"ErnieMModel"
],
"sha": "7306eac3f38c3cf6211f0e741fdb81c6cc92bc09"
},
"ErnieModel": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"ErnieModel"
],
"sha": "b51478a9f40e353c41be3a29ccef103dcfe22b4b"
},
"EsmForMaskedLM": {
"tokenizer_classes": [
"EsmTokenizer"
],
"processor_classes": [],
"model_classes": [
"EsmForMaskedLM",
"TFEsmForMaskedLM"
],
"sha": "b56297b6cd64b9ba7c613d0cd146f1ecbea8115e"
},
"EsmForSequenceClassification": {
"tokenizer_classes": [
"EsmTokenizer"
],
"processor_classes": [],
"model_classes": [
"EsmForSequenceClassification",
"TFEsmForSequenceClassification"
],
"sha": "cc6d7ef0a4763540d67b7a4fb31bede9a7d3f245"
},
"EsmForTokenClassification": {
"tokenizer_classes": [
"EsmTokenizer"
],
"processor_classes": [],
"model_classes": [
"EsmForTokenClassification",
"TFEsmForTokenClassification"
],
"sha": "498953f66e260b974c504abbc863ee266d6c84a9"
},
"EsmModel": {
"tokenizer_classes": [
"EsmTokenizer"
],
"processor_classes": [],
"model_classes": [
"EsmModel",
"TFEsmModel"
],
"sha": "183838263b70809310117a0761542501acf64c21"
},
"FNetForMaskedLM": {
"tokenizer_classes": [
"FNetTokenizer",
"FNetTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"FNetForMaskedLM"
],
"sha": "91eaae1eac894af5d96c0221ec9bcef7f1af41c8"
},
"FNetForMultipleChoice": {
"tokenizer_classes": [
"FNetTokenizer",
"FNetTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"FNetForMultipleChoice"
],
"sha": "c15d98d5f7a6f3ef3099b1257949bee208d5466e"
},
"FNetForNextSentencePrediction": {
"tokenizer_classes": [
"FNetTokenizer",
"FNetTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"FNetForNextSentencePrediction"
],
"sha": "c59440b44d07d61fc45a90ded7fc11d6f25b143d"
},
"FNetForPreTraining": {
"tokenizer_classes": [
"FNetTokenizer",
"FNetTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"FNetForPreTraining"
],
"sha": "c05f55ccfb2f2533babd3c6e99de7749bc8081da"
},
"FNetForQuestionAnswering": {
"tokenizer_classes": [
"FNetTokenizer",
"FNetTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"FNetForQuestionAnswering"
],
"sha": "47788e49dd435653fa2aa4b3ccae3572a870758e"
},
"FNetForSequenceClassification": {
"tokenizer_classes": [
"FNetTokenizer",
"FNetTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"FNetForSequenceClassification"
],
"sha": "a3049b896ea6c5a32c364989c3afe604ee58b9fc"
},
"FNetForTokenClassification": {
"tokenizer_classes": [
"FNetTokenizer",
"FNetTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"FNetForTokenClassification"
],
"sha": "3bcdafca57d544bb81e2f7eead1e512c168582fc"
},
"FNetModel": {
"tokenizer_classes": [
"FNetTokenizer",
"FNetTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"FNetModel"
],
"sha": "48fa66de37df126504db3b658806135eb877f505"
},
"FSMTForConditionalGeneration": {
"tokenizer_classes": [
"FSMTTokenizer"
],
"processor_classes": [],
"model_classes": [
"FSMTForConditionalGeneration"
],
"sha": "6a1a981b29c8a98c1fd31bd0ad809f5575ca6c7a"
},
"FSMTModel": {
"tokenizer_classes": [
"FSMTTokenizer"
],
"processor_classes": [],
"model_classes": [
"FSMTModel"
],
"sha": "683f6f73a2ab87801f1695a72d1af63cf173ab7c"
},
"FalconForCausalLM": {
"tokenizer_classes": [
"PreTrainedTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"FalconForCausalLM"
],
"sha": "60076d5dafc5e33ba9c90dcd05e7c0834e44049a"
},
"FalconForQuestionAnswering": {
"tokenizer_classes": [
"PreTrainedTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"FalconForQuestionAnswering"
],
"sha": "b1ee9cd5fad2d177ea5a46df4611cd02f66ae788"
},
"FalconForSequenceClassification": {
"tokenizer_classes": [
"PreTrainedTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"FalconForSequenceClassification"
],
"sha": "007838c0991c2b6a87dc49a8a5c20f29149a00fa"
},
"FalconForTokenClassification": {
"tokenizer_classes": [
"PreTrainedTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"FalconForTokenClassification"
],
"sha": "0ea6ae548773daa6e3317fddc058957e956eebf4"
},
"FalconModel": {
"tokenizer_classes": [
"PreTrainedTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"FalconModel"
],
"sha": "ca15a579c946eb00c5b39cc8e0ea63d0c1460f84"
},
"FlaubertForMultipleChoice": {
"tokenizer_classes": [
"FlaubertTokenizer"
],
"processor_classes": [],
"model_classes": [
"FlaubertForMultipleChoice",
"TFFlaubertForMultipleChoice"
],
"sha": "8b12bd87a63f2e86c3482431742f6d8abf6ec4fd"
},
"FlaubertForQuestionAnsweringSimple": {
"tokenizer_classes": [
"FlaubertTokenizer"
],
"processor_classes": [],
"model_classes": [
"FlaubertForQuestionAnsweringSimple",
"TFFlaubertForQuestionAnsweringSimple"
],
"sha": "5c0e7ad1efae7e3497f5cd6d2d9519403df49d37"
},
"FlaubertForSequenceClassification": {
"tokenizer_classes": [
"FlaubertTokenizer"
],
"processor_classes": [],
"model_classes": [
"FlaubertForSequenceClassification",
"TFFlaubertForSequenceClassification"
],
"sha": "762f12a8c99690be8ed2663b7af3011660174a7c"
},
"FlaubertForTokenClassification": {
"tokenizer_classes": [
"FlaubertTokenizer"
],
"processor_classes": [],
"model_classes": [
"FlaubertForTokenClassification",
"TFFlaubertForTokenClassification"
],
"sha": "d2ab741c937bb69ef27c89e4c86a8c9d444874ca"
},
"FlaubertModel": {
"tokenizer_classes": [
"FlaubertTokenizer"
],
"processor_classes": [],
"model_classes": [
"FlaubertModel",
"TFFlaubertModel"
],
"sha": "bdc2f8e17bb869393053429ec8c1c842bfeabb07"
},
"FlaubertWithLMHeadModel": {
"tokenizer_classes": [
"FlaubertTokenizer"
],
"processor_classes": [],
"model_classes": [
"FlaubertWithLMHeadModel",
"TFFlaubertWithLMHeadModel"
],
"sha": "f20eb0932c90061003c9cc4e109c6ea22559c4f2"
},
"FlavaForPreTraining": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [
"FlavaImageProcessor"
],
"model_classes": [
"FlavaForPreTraining"
],
"sha": "6e9b2094060a5fa27984c7b49e5d0e820a88b487"
},
"FlavaModel": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [
"FlavaImageProcessor"
],
"model_classes": [
"FlavaModel"
],
"sha": "31ebf1b7a0ef1fd5059b98e28e5ab1c366d2c482"
},
"FocalNetBackbone": {
"tokenizer_classes": [],
"processor_classes": [
"BitImageProcessor"
],
"model_classes": [
"FocalNetBackbone"
],
"sha": "eb8c580969443cb87de7dd9a256deaface03692f"
},
"FocalNetForImageClassification": {
"tokenizer_classes": [],
"processor_classes": [
"BitImageProcessor"
],
"model_classes": [
"FocalNetForImageClassification"
],
"sha": "28d30ded26a3213e8fb7011a455afc3aa98b0a95"
},
"FocalNetForMaskedImageModeling": {
"tokenizer_classes": [],
"processor_classes": [
"BitImageProcessor"
],
"model_classes": [
"FocalNetForMaskedImageModeling"
],
"sha": "0ea7626d19c9dd2f3113d977f643a1babc720bd3"
},
"FocalNetModel": {
"tokenizer_classes": [],
"processor_classes": [
"BitImageProcessor"
],
"model_classes": [
"FocalNetModel"
],
"sha": "107b004e6aa14108a359b7d22bdb9aa141ec05d5"
},
"FunnelBaseModel": {
"tokenizer_classes": [
"FunnelTokenizer",
"FunnelTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"FunnelBaseModel",
"TFFunnelBaseModel"
],
"sha": "87fed4252812df23315a56531625333e315681c6"
},
"FunnelForMaskedLM": {
"tokenizer_classes": [
"FunnelTokenizer",
"FunnelTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"FunnelForMaskedLM",
"TFFunnelForMaskedLM"
],
"sha": "5543daf29f185cd45f2599bd6f38c96064c9c8de"
},
"FunnelForMultipleChoice": {
"tokenizer_classes": [
"FunnelTokenizer",
"FunnelTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"FunnelForMultipleChoice",
"TFFunnelForMultipleChoice"
],
"sha": "a8bf597e37dbefb1ac5c97c4cb162c3d522a33a1"
},
"FunnelForPreTraining": {
"tokenizer_classes": [
"FunnelTokenizer",
"FunnelTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"FunnelForPreTraining",
"TFFunnelForPreTraining"
],
"sha": "cbcb300d60aacd5950a45409b6e3f0f240c9082e"
},
"FunnelForQuestionAnswering": {
"tokenizer_classes": [
"FunnelTokenizer",
"FunnelTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"FunnelForQuestionAnswering",
"TFFunnelForQuestionAnswering"
],
"sha": "6a5675305e096434e818486a13892cb55daffd13"
},
"FunnelForSequenceClassification": {
"tokenizer_classes": [
"FunnelTokenizer",
"FunnelTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"FunnelForSequenceClassification",
"TFFunnelForSequenceClassification"
],
"sha": "1bc557a1e4314da21a44dee57b799e95a7025e5c"
},
"FunnelForTokenClassification": {
"tokenizer_classes": [
"FunnelTokenizer",
"FunnelTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"FunnelForTokenClassification",
"TFFunnelForTokenClassification"
],
"sha": "693bc1217a224efd558f410ddc8ffc63739bebc3"
},
"FunnelModel": {
"tokenizer_classes": [
"FunnelTokenizer",
"FunnelTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"FunnelModel",
"TFFunnelModel"
],
"sha": "bfbaa8fa21c3abf80b94e7168b5ecff8ec5b5f76"
},
"FuyuForCausalLM": {
"tokenizer_classes": [
"LlamaTokenizerFast"
],
"processor_classes": [
"FuyuImageProcessor"
],
"model_classes": [
"FuyuForCausalLM"
],
"sha": "685d78258ea95c5c82e0e4555d0d4a2270ab8bff"
},
"GLPNForDepthEstimation": {
"tokenizer_classes": [],
"processor_classes": [
"GLPNImageProcessor"
],
"model_classes": [
"GLPNForDepthEstimation"
],
"sha": "32ca1c1ef5d33242e5e7c0433bcd773c082f0260"
},
"GLPNModel": {
"tokenizer_classes": [],
"processor_classes": [
"GLPNImageProcessor"
],
"model_classes": [
"GLPNModel"
],
"sha": "24a8dbb48b1aa0ba2eba44324fcd0c78cca64dd4"
},
"GPT2ForQuestionAnswering": {
"tokenizer_classes": [
"GPT2Tokenizer",
"GPT2TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"GPT2ForQuestionAnswering"
],
"sha": "a5bdd6bd4d79feece85ea9a8bd4ee5fe54c1d45b"
},
"GPT2ForSequenceClassification": {
"tokenizer_classes": [
"GPT2Tokenizer",
"GPT2TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"GPT2ForSequenceClassification",
"TFGPT2ForSequenceClassification"
],
"sha": "90a2d78e5c7f288152f8456c3d58a43b40a58449"
},
"GPT2ForTokenClassification": {
"tokenizer_classes": [
"GPT2Tokenizer",
"GPT2TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"GPT2ForTokenClassification"
],
"sha": "da78bc95b45fab2da9d43f2ca27164996e31ade1"
},
"GPT2LMHeadModel": {
"tokenizer_classes": [
"GPT2Tokenizer",
"GPT2TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"GPT2LMHeadModel",
"TFGPT2LMHeadModel"
],
"sha": "78f56535d4ce19e9d7c0992e390085c5a4196b37"
},
"GPT2Model": {
"tokenizer_classes": [
"GPT2Tokenizer",
"GPT2TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"GPT2Model",
"TFGPT2Model"
],
"sha": "d6694b0d8fe17978761c9305dc151780506b192e"
},
"GPTBigCodeForCausalLM": {
"tokenizer_classes": [
"GPT2Tokenizer",
"GPT2TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"GPTBigCodeForCausalLM"
],
"sha": "99f7aaadf9c29669c63ef6c16f6bc5c07dbb9126"
},
"GPTBigCodeForSequenceClassification": {
"tokenizer_classes": [
"GPT2Tokenizer",
"GPT2TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"GPTBigCodeForSequenceClassification"
],
"sha": "64a7398d5763161037b818314c60dd83d93d03e9"
},
"GPTBigCodeForTokenClassification": {
"tokenizer_classes": [
"GPT2Tokenizer",
"GPT2TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"GPTBigCodeForTokenClassification"
],
"sha": "310537ecd22d45f71bf594b17922cf2abc338eaf"
},
"GPTBigCodeModel": {
"tokenizer_classes": [
"GPT2Tokenizer",
"GPT2TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"GPTBigCodeModel"
],
"sha": "3069419084a9dc36802d47de9df3d314ccfc2f28"
},
"GPTJForCausalLM": {
"tokenizer_classes": [
"GPT2Tokenizer",
"GPT2TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"GPTJForCausalLM",
"TFGPTJForCausalLM"
],
"sha": "1fff390baa45cb187903ebdd269c975bb9ed7386"
},
"GPTJForQuestionAnswering": {
"tokenizer_classes": [
"GPT2Tokenizer",
"GPT2TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"GPTJForQuestionAnswering",
"TFGPTJForQuestionAnswering"
],
"sha": "3d4ec61dbed01f844d4c309971eeb5ad722c6c84"
},
"GPTJForSequenceClassification": {
"tokenizer_classes": [
"GPT2Tokenizer",
"GPT2TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"GPTJForSequenceClassification",
"TFGPTJForSequenceClassification"
],
"sha": "4b5db259cd16ca84ae2cd79aa4851cdd14479128"
},
"GPTJModel": {
"tokenizer_classes": [
"GPT2Tokenizer",
"GPT2TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"GPTJModel",
"TFGPTJModel"
],
"sha": "d8e1db30d08fbf57da6fc139aea3ffd63ab6226e"
},
"GPTNeoForCausalLM": {
"tokenizer_classes": [
"GPT2Tokenizer",
"GPT2TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"GPTNeoForCausalLM"
],
"sha": "e88934e402c15195dd99b2947632415dd7645268"
},
"GPTNeoForQuestionAnswering": {
"tokenizer_classes": [
"GPT2Tokenizer",
"GPT2TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"GPTNeoForQuestionAnswering"
],
"sha": "623883e94bd08caf9b3f839b98debeea72d5bc2b"
},
"GPTNeoForSequenceClassification": {
"tokenizer_classes": [
"GPT2Tokenizer",
"GPT2TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"GPTNeoForSequenceClassification"
],
"sha": "bf2090d5d91a70eb37ba51fbdcf23afc7031fea8"
},
"GPTNeoForTokenClassification": {
"tokenizer_classes": [
"GPT2Tokenizer",
"GPT2TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"GPTNeoForTokenClassification"
],
"sha": "d5208e73e24a1671219776b50fe5f96e0e4cd218"
},
"GPTNeoModel": {
"tokenizer_classes": [
"GPT2Tokenizer",
"GPT2TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"GPTNeoModel"
],
"sha": "72a7cd49da613c3125a90884df4763545c594e56"
},
"GPTNeoXForCausalLM": {
"tokenizer_classes": [
"GPTNeoXTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"GPTNeoXForCausalLM"
],
"sha": "0229cfaaa843c6b492ac2abffabb00f1ff1936f8"
},
"GPTNeoXForQuestionAnswering": {
"tokenizer_classes": [
"GPTNeoXTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"GPTNeoXForQuestionAnswering"
],
"sha": "7d2f08c959c211129952ee03b5562add09fe6864"
},
"GPTNeoXForSequenceClassification": {
"tokenizer_classes": [
"GPTNeoXTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"GPTNeoXForSequenceClassification"
],
"sha": "17c4b845ee2e0bb780ca2dea2d59a3d9d5d3c651"
},
"GPTNeoXForTokenClassification": {
"tokenizer_classes": [
"GPTNeoXTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"GPTNeoXForTokenClassification"
],
"sha": "3aa4fe8a562f32230041d6d3616aa5ecc3f30192"
},
"GPTNeoXJapaneseForCausalLM": {
"tokenizer_classes": [
"GPTNeoXJapaneseTokenizer"
],
"processor_classes": [],
"model_classes": [
"GPTNeoXJapaneseForCausalLM"
],
"sha": "5fca2479f1064fd22e17f944c8fcc14f7e73f1d5"
},
"GPTNeoXJapaneseModel": {
"tokenizer_classes": [
"GPTNeoXJapaneseTokenizer"
],
"processor_classes": [],
"model_classes": [
"GPTNeoXJapaneseModel"
],
"sha": "5c6ed124150df845cfc701d70b97fdcde687be52"
},
"GPTNeoXModel": {
"tokenizer_classes": [
"GPTNeoXTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"GPTNeoXModel"
],
"sha": "33114ba2f72189d5a2bd63f0cdb78551189242ff"
},
"GPTSanJapaneseForConditionalGeneration": {
"tokenizer_classes": [
"GPTSanJapaneseTokenizer"
],
"processor_classes": [],
"model_classes": [
"GPTSanJapaneseForConditionalGeneration"
],
"sha": "ff6a41faaa713c7fbd5d9a1a50539745f9e1178e"
},
"GitForCausalLM": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [
"CLIPImageProcessor"
],
"model_classes": [
"GitForCausalLM"
],
"sha": "60f9c50466ae0beeb11776ca5bfeb6473f441554"
},
"GitModel": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [
"CLIPImageProcessor"
],
"model_classes": [
"GitModel"
],
"sha": "3d2eb6bddf95bb4a4e59b045d4e464c730c07f41"
},
"GroupViTModel": {
"tokenizer_classes": [
"CLIPTokenizer",
"CLIPTokenizerFast"
],
"processor_classes": [
"CLIPImageProcessor"
],
"model_classes": [
"GroupViTModel",
"TFGroupViTModel"
],
"sha": "05a3a02dd46cb9eb078608dec98f633c0cf559ef"
},
"HubertForCTC": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"HubertForCTC"
],
"sha": "13431b76106f993eedcff48a75bae590a09b14f7"
},
"HubertForSequenceClassification": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"HubertForSequenceClassification"
],
"sha": "d23f46607a900b1a55dfee4b7ed205a6823035b1"
},
"HubertModel": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"HubertModel",
"TFHubertModel"
],
"sha": "3224562c86c4669db65ae7defdc5fb555b113e95"
},
"IBertForMaskedLM": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"IBertForMaskedLM"
],
"sha": "e333a9c9d375f4d839b7e9e21d1a1c8dad58d7d1"
},
"IBertForMultipleChoice": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"IBertForMultipleChoice"
],
"sha": "a81f7d64cd7ce5fe6cd726b23d9d14ac5d17bf53"
},
"IBertForQuestionAnswering": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"IBertForQuestionAnswering"
],
"sha": "7b66d13d4d6801a82cbeb7f9fd853ca1630d1f8b"
},
"IBertForSequenceClassification": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"IBertForSequenceClassification"
],
"sha": "309d57145c40f889222fe5df62f14dddf4496b38"
},
"IBertForTokenClassification": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"IBertForTokenClassification"
],
"sha": "b032e9bff4b081b78c098b2d8bc610ac035c6ddf"
},
"IBertModel": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"IBertModel"
],
"sha": "6749164c678d4883d455f98b1dfc98c62da8f08b"
},
"IdeficsForVisionText2Text": {
"tokenizer_classes": [
"LlamaTokenizerFast"
],
"processor_classes": [
"IdeficsImageProcessor"
],
"model_classes": [
"IdeficsForVisionText2Text"
],
"sha": "2c2f2e2cd6b02a77d0cdd8c3767ba9a6267dbd20"
},
"IdeficsModel": {
"tokenizer_classes": [
"LlamaTokenizerFast"
],
"processor_classes": [
"IdeficsImageProcessor"
],
"model_classes": [
"IdeficsModel"
],
"sha": "649df2e35e067efd573ff2d083784a5cf876545e"
},
"ImageGPTForCausalImageModeling": {
"tokenizer_classes": [],
"processor_classes": [
"ImageGPTImageProcessor"
],
"model_classes": [
"ImageGPTForCausalImageModeling"
],
"sha": "9a7d1fc04439ab1d9d690de9c3e7673f08568cdf"
},
"ImageGPTForImageClassification": {
"tokenizer_classes": [],
"processor_classes": [
"ImageGPTImageProcessor"
],
"model_classes": [
"ImageGPTForImageClassification"
],
"sha": "d92c7aed4ba5de74a1f542b736010090e4a58b42"
},
"ImageGPTModel": {
"tokenizer_classes": [],
"processor_classes": [
"ImageGPTImageProcessor"
],
"model_classes": [
"ImageGPTModel"
],
"sha": "5a7983e48d5841704733dd0756177680ed50c074"
},
"Kosmos2ForConditionalGeneration": {
"tokenizer_classes": [
"XLMRobertaTokenizerFast"
],
"processor_classes": [
"CLIPImageProcessor"
],
"model_classes": [
"Kosmos2ForConditionalGeneration"
],
"sha": "d1d4607782b911411676f1ee79997dee645def58"
},
"Kosmos2Model": {
"tokenizer_classes": [
"XLMRobertaTokenizerFast"
],
"processor_classes": [
"CLIPImageProcessor"
],
"model_classes": [
"Kosmos2Model"
],
"sha": "379d8944a65312094d9ab1c4b8a82058a2d3274e"
},
"LEDForConditionalGeneration": {
"tokenizer_classes": [
"LEDTokenizer",
"LEDTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"LEDForConditionalGeneration",
"TFLEDForConditionalGeneration"
],
"sha": "a354b49a79351f3ea8ae7776d9f8352ae26cfc14"
},
"LEDForQuestionAnswering": {
"tokenizer_classes": [
"LEDTokenizer",
"LEDTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"LEDForQuestionAnswering"
],
"sha": "47c7a75a1e650dae60ff6e9bbab0f2386946670c"
},
"LEDForSequenceClassification": {
"tokenizer_classes": [
"LEDTokenizer",
"LEDTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"LEDForSequenceClassification"
],
"sha": "3571e2c9d9f2f2ec0b8fe47090330b128be05126"
},
"LEDModel": {
"tokenizer_classes": [
"LEDTokenizer",
"LEDTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"LEDModel",
"TFLEDModel"
],
"sha": "3c3f6eb142545afc570187bfdabfe65d43dafbe4"
},
"LayoutLMForMaskedLM": {
"tokenizer_classes": [
"LayoutLMTokenizer",
"LayoutLMTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"LayoutLMForMaskedLM",
"TFLayoutLMForMaskedLM"
],
"sha": "0368bd9bd8fd3eb43b8a3b38962b5345b8765514"
},
"LayoutLMForQuestionAnswering": {
"tokenizer_classes": [
"LayoutLMTokenizer",
"LayoutLMTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"LayoutLMForQuestionAnswering",
"TFLayoutLMForQuestionAnswering"
],
"sha": "0d6a4bc614fccfa313c1fb6d132a250929518f85"
},
"LayoutLMForSequenceClassification": {
"tokenizer_classes": [
"LayoutLMTokenizer",
"LayoutLMTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"LayoutLMForSequenceClassification",
"TFLayoutLMForSequenceClassification"
],
"sha": "1bd68c73dbf6c8c0526d24fbe2831be82998c440"
},
"LayoutLMForTokenClassification": {
"tokenizer_classes": [
"LayoutLMTokenizer",
"LayoutLMTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"LayoutLMForTokenClassification",
"TFLayoutLMForTokenClassification"
],
"sha": "155e7da3f1d786aa39d957b16080c52de4a7efd7"
},
"LayoutLMModel": {
"tokenizer_classes": [
"LayoutLMTokenizer",
"LayoutLMTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"LayoutLMModel",
"TFLayoutLMModel"
],
"sha": "14f77b30d267910f11f0fd532a91a6b85ab3a4de"
},
"LayoutLMv2ForQuestionAnswering": {
"tokenizer_classes": [
"LayoutLMv2Tokenizer",
"LayoutLMv2TokenizerFast"
],
"processor_classes": [
"LayoutLMv2ImageProcessor"
],
"model_classes": [
"LayoutLMv2ForQuestionAnswering"
],
"sha": "f452e28dd34d3c38cce046b1cc7b0ada69f587b1"
},
"LayoutLMv2ForSequenceClassification": {
"tokenizer_classes": [
"LayoutLMv2Tokenizer",
"LayoutLMv2TokenizerFast"
],
"processor_classes": [
"LayoutLMv2ImageProcessor"
],
"model_classes": [
"LayoutLMv2ForSequenceClassification"
],
"sha": "b483e08fd143113629ecda3dbfd57e69bfeb5f11"
},
"LayoutLMv2ForTokenClassification": {
"tokenizer_classes": [
"LayoutLMv2Tokenizer",
"LayoutLMv2TokenizerFast"
],
"processor_classes": [
"LayoutLMv2ImageProcessor"
],
"model_classes": [
"LayoutLMv2ForTokenClassification"
],
"sha": "0721ae69bff00ecfff1b3d1521a475cde0253299"
},
"LayoutLMv2Model": {
"tokenizer_classes": [
"LayoutLMv2Tokenizer",
"LayoutLMv2TokenizerFast"
],
"processor_classes": [
"LayoutLMv2ImageProcessor"
],
"model_classes": [
"LayoutLMv2Model"
],
"sha": "6a1b510769b344979a910a7d0bade613a9ec2dfc"
},
"LayoutLMv3ForQuestionAnswering": {
"tokenizer_classes": [
"LayoutLMv3Tokenizer",
"LayoutLMv3TokenizerFast"
],
"processor_classes": [
"LayoutLMv3ImageProcessor"
],
"model_classes": [
"LayoutLMv3ForQuestionAnswering",
"TFLayoutLMv3ForQuestionAnswering"
],
"sha": "4640242388e69cf77ea2dd3ac36ec6f1b26628c8"
},
"LayoutLMv3ForSequenceClassification": {
"tokenizer_classes": [
"LayoutLMv3Tokenizer",
"LayoutLMv3TokenizerFast"
],
"processor_classes": [
"LayoutLMv3ImageProcessor"
],
"model_classes": [
"LayoutLMv3ForSequenceClassification",
"TFLayoutLMv3ForSequenceClassification"
],
"sha": "96515f699874cfbfbec7a64c539ae92419e4c6dc"
},
"LayoutLMv3ForTokenClassification": {
"tokenizer_classes": [
"LayoutLMv3Tokenizer",
"LayoutLMv3TokenizerFast"
],
"processor_classes": [
"LayoutLMv3ImageProcessor"
],
"model_classes": [
"LayoutLMv3ForTokenClassification",
"TFLayoutLMv3ForTokenClassification"
],
"sha": "ed4ffc464f2028fe50dfc6823f4eda78d34be7e6"
},
"LayoutLMv3Model": {
"tokenizer_classes": [
"LayoutLMv3Tokenizer",
"LayoutLMv3TokenizerFast"
],
"processor_classes": [
"LayoutLMv3ImageProcessor"
],
"model_classes": [
"LayoutLMv3Model",
"TFLayoutLMv3Model"
],
"sha": "69725e5e2445e5c1c3aa8a2aa49cfd72e0a44565"
},
"LevitForImageClassification": {
"tokenizer_classes": [],
"processor_classes": [
"LevitImageProcessor"
],
"model_classes": [
"LevitForImageClassification"
],
"sha": "5ae8ccaa1fe1c947cb8ae6499e4a150c668bb9f0"
},
"LevitForImageClassificationWithTeacher": {
"tokenizer_classes": [],
"processor_classes": [
"LevitImageProcessor"
],
"model_classes": [
"LevitForImageClassificationWithTeacher"
],
"sha": "568cc0d965b9bd293f240e7724314db6d50f6722"
},
"LevitModel": {
"tokenizer_classes": [],
"processor_classes": [
"LevitImageProcessor"
],
"model_classes": [
"LevitModel"
],
"sha": "172efa52b50c75c3b3e498fa638f55e65b2ebf87"
},
"LiltForQuestionAnswering": {
"tokenizer_classes": [
"LayoutLMv3Tokenizer",
"LayoutLMv3TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"LiltForQuestionAnswering"
],
"sha": "0a348441999e98ec003b29fc4d5a67ad22ee6ca2"
},
"LiltForSequenceClassification": {
"tokenizer_classes": [
"LayoutLMv3Tokenizer",
"LayoutLMv3TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"LiltForSequenceClassification"
],
"sha": "c53ab0ba33536fe564a4a1e4f1674d990c01b83a"
},
"LiltForTokenClassification": {
"tokenizer_classes": [
"LayoutLMv3Tokenizer",
"LayoutLMv3TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"LiltForTokenClassification"
],
"sha": "14f85076f9b3f7016917e324d51ebd22511a2ae5"
},
"LiltModel": {
"tokenizer_classes": [
"LayoutLMv3Tokenizer",
"LayoutLMv3TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"LiltModel"
],
"sha": "3f1166cc14c532388df7e82336a8e575a813bd3f"
},
"LongT5ForConditionalGeneration": {
"tokenizer_classes": [
"T5Tokenizer",
"T5TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"LongT5ForConditionalGeneration"
],
"sha": "c685cbbe706ad5c9a28689631765726a1874dcc7"
},
"LongT5Model": {
"tokenizer_classes": [
"T5Tokenizer",
"T5TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"LongT5Model"
],
"sha": "6b468e55e2490565e6155690201086ac00c72062"
},
"LongformerForMaskedLM": {
"tokenizer_classes": [
"LongformerTokenizer",
"LongformerTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"LongformerForMaskedLM",
"TFLongformerForMaskedLM"
],
"sha": "929d3bda9a1485d9bae41f9dbfc1d149c1c4e78e"
},
"LongformerForMultipleChoice": {
"tokenizer_classes": [
"LongformerTokenizer",
"LongformerTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"LongformerForMultipleChoice",
"TFLongformerForMultipleChoice"
],
"sha": "60b1ecac6b9385ce18c7e6978ab161cce8e7f9d4"
},
"LongformerForQuestionAnswering": {
"tokenizer_classes": [
"LongformerTokenizer",
"LongformerTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"LongformerForQuestionAnswering",
"TFLongformerForQuestionAnswering"
],
"sha": "be45ab1321b703f2200cbbcae560aaf2e2afef88"
},
"LongformerForSequenceClassification": {
"tokenizer_classes": [
"LongformerTokenizer",
"LongformerTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"LongformerForSequenceClassification",
"TFLongformerForSequenceClassification"
],
"sha": "8bc0de0b0f740bf397eb2770ec3ce3a24f3d7af9"
},
"LongformerForTokenClassification": {
"tokenizer_classes": [
"LongformerTokenizer",
"LongformerTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"LongformerForTokenClassification",
"TFLongformerForTokenClassification"
],
"sha": "efa33a9b6f47f0f7979af08ae8d04a5a7363a14b"
},
"LongformerModel": {
"tokenizer_classes": [
"LongformerTokenizer",
"LongformerTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"LongformerModel",
"TFLongformerModel"
],
"sha": "b023d531688e8655fc09300ac36742588efb3240"
},
"LukeForMaskedLM": {
"tokenizer_classes": [
"LukeTokenizer"
],
"processor_classes": [],
"model_classes": [
"LukeForMaskedLM"
],
"sha": "954cf6cd2bf1f298a3956b10c36656c57387506d"
},
"LukeForMultipleChoice": {
"tokenizer_classes": [
"LukeTokenizer"
],
"processor_classes": [],
"model_classes": [
"LukeForMultipleChoice"
],
"sha": "d1310a9174ad50d60b30ad6049e165deb2539034"
},
"LukeForQuestionAnswering": {
"tokenizer_classes": [
"LukeTokenizer"
],
"processor_classes": [],
"model_classes": [
"LukeForQuestionAnswering"
],
"sha": "3ea38da4e32cb4e45bea82b2e81a8639aeba2c35"
},
"LukeForSequenceClassification": {
"tokenizer_classes": [
"LukeTokenizer"
],
"processor_classes": [],
"model_classes": [
"LukeForSequenceClassification"
],
"sha": "b5b11248aeb4f5976379d15a977aeb2677e0c0f9"
},
"LukeForTokenClassification": {
"tokenizer_classes": [
"LukeTokenizer"
],
"processor_classes": [],
"model_classes": [
"LukeForTokenClassification"
],
"sha": "8aab1a33ad26a344a6f4dfd68630e9661e174471"
},
"LukeModel": {
"tokenizer_classes": [
"LukeTokenizer"
],
"processor_classes": [],
"model_classes": [
"LukeModel"
],
"sha": "ae23a674e7297d41f33c9af86e039757dfd2d531"
},
"LxmertForPreTraining": {
"tokenizer_classes": [
"LxmertTokenizer",
"LxmertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"LxmertForPreTraining",
"TFLxmertForPreTraining"
],
"sha": "7b0843403c187aef00f20d5087086468d9613d2c"
},
"LxmertForQuestionAnswering": {
"tokenizer_classes": [
"LxmertTokenizer",
"LxmertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"LxmertForQuestionAnswering"
],
"sha": "27a74bd2cd156e46656c43ceb432c4deda0df5c1"
},
"LxmertModel": {
"tokenizer_classes": [
"LxmertTokenizer",
"LxmertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"LxmertModel",
"TFLxmertModel"
],
"sha": "97612a0d6b14406ea9bfd7672e6974e0961cbef1"
},
"M2M100ForConditionalGeneration": {
"tokenizer_classes": [
"M2M100Tokenizer"
],
"processor_classes": [],
"model_classes": [
"M2M100ForConditionalGeneration"
],
"sha": "32ac347092d51f658b41ffc111b67d49acdeab46"
},
"M2M100Model": {
"tokenizer_classes": [
"M2M100Tokenizer"
],
"processor_classes": [],
"model_classes": [
"M2M100Model"
],
"sha": "e95c2ae168c7ba19f8114def40e1b1edd953b2f5"
},
"MBartForCausalLM": {
"tokenizer_classes": [
"MBartTokenizer",
"MBartTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MBartForCausalLM"
],
"sha": "a45044f8056328d20a764356eca3d0746a7a195e"
},
"MBartForConditionalGeneration": {
"tokenizer_classes": [
"MBartTokenizer",
"MBartTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MBartForConditionalGeneration",
"TFMBartForConditionalGeneration"
],
"sha": "171e918962d6c0ee56c6b070858e19e16c8dd09f"
},
"MBartForQuestionAnswering": {
"tokenizer_classes": [
"MBartTokenizer",
"MBartTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MBartForQuestionAnswering"
],
"sha": "1ee08565d24777335595e0d2940e454abdcff731"
},
"MBartForSequenceClassification": {
"tokenizer_classes": [
"MBartTokenizer",
"MBartTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MBartForSequenceClassification"
],
"sha": "53e9c88ecfa2475d27afe099ffa7a8bcdb7ef7e4"
},
"MBartModel": {
"tokenizer_classes": [
"MBartTokenizer",
"MBartTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MBartModel",
"TFMBartModel"
],
"sha": "2d492b34d69dd63b411990d5c8bb692fd637e91c"
},
"MCTCTForCTC": {
"tokenizer_classes": [],
"processor_classes": [
"MCTCTFeatureExtractor"
],
"model_classes": [
"MCTCTForCTC"
],
"sha": "895a3d74f87b344b1f0a71eae4f085941d51b5cf"
},
"MCTCTModel": {
"tokenizer_classes": [],
"processor_classes": [
"MCTCTFeatureExtractor"
],
"model_classes": [
"MCTCTModel"
],
"sha": "ce73d5c2b6fe163de778697d7b0543bf00d7ffa8"
},
"MPNetForMaskedLM": {
"tokenizer_classes": [
"MPNetTokenizer",
"MPNetTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MPNetForMaskedLM",
"TFMPNetForMaskedLM"
],
"sha": "50af96e7d0202aef86e396c136e4c4fde8afe183"
},
"MPNetForMultipleChoice": {
"tokenizer_classes": [
"MPNetTokenizer",
"MPNetTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MPNetForMultipleChoice",
"TFMPNetForMultipleChoice"
],
"sha": "af4ff8bf296a3a51f5ab6cd9f56741e4c732487c"
},
"MPNetForQuestionAnswering": {
"tokenizer_classes": [
"MPNetTokenizer",
"MPNetTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MPNetForQuestionAnswering",
"TFMPNetForQuestionAnswering"
],
"sha": "3e1a25c0d3243f78f81580c312ada3b39c06b428"
},
"MPNetForSequenceClassification": {
"tokenizer_classes": [
"MPNetTokenizer",
"MPNetTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MPNetForSequenceClassification",
"TFMPNetForSequenceClassification"
],
"sha": "43da45c0a0d73c5a5567b4c7ec512ec5023e52dd"
},
"MPNetForTokenClassification": {
"tokenizer_classes": [
"MPNetTokenizer",
"MPNetTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MPNetForTokenClassification",
"TFMPNetForTokenClassification"
],
"sha": "4e825eff24df533321ebab823eb66ce67e4ab3d9"
},
"MPNetModel": {
"tokenizer_classes": [
"MPNetTokenizer",
"MPNetTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MPNetModel",
"TFMPNetModel"
],
"sha": "847c68344c2922e9a71fa8835b87a0f6f72b9f47"
},
"MarianForCausalLM": {
"tokenizer_classes": [
"MarianTokenizer"
],
"processor_classes": [],
"model_classes": [],
"sha": "5fb205e6db8e18e3c6cdd4e4709be292ba4599f3"
},
"MarianMTModel": {
"tokenizer_classes": [
"MarianTokenizer"
],
"processor_classes": [],
"model_classes": [
"MarianMTModel",
"TFMarianMTModel"
],
"sha": "0405f542b31561592231a86e3009d05256cbf49f"
},
"MarianModel": {
"tokenizer_classes": [
"MarianTokenizer"
],
"processor_classes": [],
"model_classes": [
"MarianModel",
"TFMarianModel"
],
"sha": "3649748c0286c6d5179a7013a716f7314db182a8"
},
"MarkupLMForQuestionAnswering": {
"tokenizer_classes": [
"MarkupLMTokenizer",
"MarkupLMTokenizerFast"
],
"processor_classes": [
"MarkupLMFeatureExtractor"
],
"model_classes": [
"MarkupLMForQuestionAnswering"
],
"sha": "c8bb9f93591d980362547b0bdca9f23ace2f383e"
},
"MarkupLMForSequenceClassification": {
"tokenizer_classes": [
"MarkupLMTokenizer",
"MarkupLMTokenizerFast"
],
"processor_classes": [
"MarkupLMFeatureExtractor"
],
"model_classes": [
"MarkupLMForSequenceClassification"
],
"sha": "c2cb7245d68d76e0a5f993fc8a3de099ecebc68b"
},
"MarkupLMForTokenClassification": {
"tokenizer_classes": [
"MarkupLMTokenizer",
"MarkupLMTokenizerFast"
],
"processor_classes": [
"MarkupLMFeatureExtractor"
],
"model_classes": [
"MarkupLMForTokenClassification"
],
"sha": "b9f924e82f400de0b34b46ee4ba276d686bd4890"
},
"MarkupLMModel": {
"tokenizer_classes": [
"MarkupLMTokenizer",
"MarkupLMTokenizerFast"
],
"processor_classes": [
"MarkupLMFeatureExtractor"
],
"model_classes": [
"MarkupLMModel"
],
"sha": "9687ba29f1c59d978e3d4b0fa702031f88eff53b"
},
"Mask2FormerForUniversalSegmentation": {
"tokenizer_classes": [],
"processor_classes": [
"Mask2FormerImageProcessor"
],
"model_classes": [
"Mask2FormerForUniversalSegmentation"
],
"sha": "6429a7349527c9ef140ae691b83c47702cce1bc0"
},
"Mask2FormerModel": {
"tokenizer_classes": [],
"processor_classes": [
"Mask2FormerImageProcessor"
],
"model_classes": [
"Mask2FormerModel"
],
"sha": "9bee8709204024b3669d503cdfe8890182f2a075"
},
"MaskFormerForInstanceSegmentation": {
"tokenizer_classes": [],
"processor_classes": [
"MaskFormerImageProcessor"
],
"model_classes": [
"MaskFormerForInstanceSegmentation"
],
"sha": "f844aaa81f55cb199c115f1bf95c217a70685570"
},
"MaskFormerModel": {
"tokenizer_classes": [],
"processor_classes": [
"MaskFormerImageProcessor"
],
"model_classes": [
"MaskFormerModel"
],
"sha": "473b54a464bc0ccee29bc23b4f6610f32eec05af"
},
"MegaForCausalLM": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MegaForCausalLM"
],
"sha": "6642b9da860f8b62abcfb0660feabcebf6698418"
},
"MegaForMaskedLM": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MegaForMaskedLM"
],
"sha": "6b2d47ba03bec9e6f7eefdd4a67351fa191aae6f"
},
"MegaForMultipleChoice": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MegaForMultipleChoice"
],
"sha": "2b1e751da36a4410473eef07a62b09227a26d504"
},
"MegaForQuestionAnswering": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MegaForQuestionAnswering"
],
"sha": "612acd9a53c351c42514adb3c04f2057d2870be7"
},
"MegaForSequenceClassification": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MegaForSequenceClassification"
],
"sha": "4871572da1613b7e9cfd3640c6d1129af004eefb"
},
"MegaForTokenClassification": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MegaForTokenClassification"
],
"sha": "450d3722c3b995215d06b9c12544c99f958581c7"
},
"MegaModel": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MegaModel"
],
"sha": "ca0862db27428893fe22f9bb5d2eb0875c2156f3"
},
"MegatronBertForCausalLM": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MegatronBertForCausalLM"
],
"sha": "ff08d05ef8f98fdccf1f01560ec6ec4adbc8a3e3"
},
"MegatronBertForMaskedLM": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MegatronBertForMaskedLM"
],
"sha": "2ed25e2681d26b51b404ef1347a385c5f2c86a9a"
},
"MegatronBertForMultipleChoice": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MegatronBertForMultipleChoice"
],
"sha": "1485af4b75f8f234d2b4b5aea50ab2ec55223a15"
},
"MegatronBertForNextSentencePrediction": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MegatronBertForNextSentencePrediction"
],
"sha": "52bc9ee1d5145344f66b088ed278f07ed3d90584"
},
"MegatronBertForPreTraining": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MegatronBertForPreTraining"
],
"sha": "e580d0efd54e1c92789e39b32929234e36ee427f"
},
"MegatronBertForQuestionAnswering": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MegatronBertForQuestionAnswering"
],
"sha": "7342ba042a3c30c15382d00fcb0521533fc43841"
},
"MegatronBertForSequenceClassification": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MegatronBertForSequenceClassification"
],
"sha": "6a7cd480511d817a1e221c8f7558c55a93baed1b"
},
"MegatronBertForTokenClassification": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MegatronBertForTokenClassification"
],
"sha": "8b5334b6ec5f025293ca861de474b57ca84bc005"
},
"MegatronBertModel": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MegatronBertModel"
],
"sha": "f2457fbe535ba97ea13db049f53618b42e13f047"
},
"MgpstrForSceneTextRecognition": {
"tokenizer_classes": [],
"processor_classes": [
"MgpstrProcessor"
],
"model_classes": [
"MgpstrForSceneTextRecognition"
],
"sha": "f197d5bfa1fe27b5f28a6e6d4e3ad229b753450a"
},
"MistralForCausalLM": {
"tokenizer_classes": [
"LlamaTokenizer",
"LlamaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MistralForCausalLM"
],
"sha": "f7e06aeedbba8f4f665b438b868ed932d451f64b"
},
"MistralForSequenceClassification": {
"tokenizer_classes": [
"LlamaTokenizer",
"LlamaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MistralForSequenceClassification"
],
"sha": "65045444ea1933309270d8b08b21d3fa94a84290"
},
"MistralModel": {
"tokenizer_classes": [
"LlamaTokenizer",
"LlamaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MistralModel"
],
"sha": "becd727ad72b1e8a7c0fa0ea39b61904fa68aeac"
},
"MobileBertForMaskedLM": {
"tokenizer_classes": [
"MobileBertTokenizer",
"MobileBertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MobileBertForMaskedLM",
"TFMobileBertForMaskedLM"
],
"sha": "d689e737d73ad23aed3aabd3177591fc827d1c62"
},
"MobileBertForMultipleChoice": {
"tokenizer_classes": [
"MobileBertTokenizer",
"MobileBertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MobileBertForMultipleChoice",
"TFMobileBertForMultipleChoice"
],
"sha": "403d1f88be7eb0c769ff3a8e57eab21cc3e75afb"
},
"MobileBertForNextSentencePrediction": {
"tokenizer_classes": [
"MobileBertTokenizer",
"MobileBertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MobileBertForNextSentencePrediction",
"TFMobileBertForNextSentencePrediction"
],
"sha": "b4d8836a0f259ee3bca9f230093836c9117c5e4d"
},
"MobileBertForPreTraining": {
"tokenizer_classes": [
"MobileBertTokenizer",
"MobileBertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MobileBertForPreTraining",
"TFMobileBertForPreTraining"
],
"sha": "fbaa13ea6f9fcebb9fde620dd009d12510440d17"
},
"MobileBertForQuestionAnswering": {
"tokenizer_classes": [
"MobileBertTokenizer",
"MobileBertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MobileBertForQuestionAnswering",
"TFMobileBertForQuestionAnswering"
],
"sha": "ba6a55cf2daec55bfb220c9bab0bc4ad96510087"
},
"MobileBertForSequenceClassification": {
"tokenizer_classes": [
"MobileBertTokenizer",
"MobileBertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MobileBertForSequenceClassification",
"TFMobileBertForSequenceClassification"
],
"sha": "17ab35603bec351457e035eef2d0426538071f72"
},
"MobileBertForTokenClassification": {
"tokenizer_classes": [
"MobileBertTokenizer",
"MobileBertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MobileBertForTokenClassification",
"TFMobileBertForTokenClassification"
],
"sha": "dee83e820e6c4f069886a5d1875bf6775897313e"
},
"MobileBertModel": {
"tokenizer_classes": [
"MobileBertTokenizer",
"MobileBertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MobileBertModel",
"TFMobileBertModel"
],
"sha": "09b2db33ea798a762eeaf7e727e95f9ea8a6d14f"
},
"MobileNetV1ForImageClassification": {
"tokenizer_classes": [],
"processor_classes": [
"MobileNetV1ImageProcessor"
],
"model_classes": [
"MobileNetV1ForImageClassification"
],
"sha": "55023dbd0935f147bf1bccf960cea01ca07e0f0c"
},
"MobileNetV1Model": {
"tokenizer_classes": [],
"processor_classes": [
"MobileNetV1ImageProcessor"
],
"model_classes": [
"MobileNetV1Model"
],
"sha": "178bd24528147a028938d6ee5c7e65c969ea37b0"
},
"MobileNetV2ForImageClassification": {
"tokenizer_classes": [],
"processor_classes": [
"MobileNetV2ImageProcessor"
],
"model_classes": [
"MobileNetV2ForImageClassification"
],
"sha": "ff907f740cf9ea91bc3cdf403a94ae28fbb2548a"
},
"MobileNetV2ForSemanticSegmentation": {
"tokenizer_classes": [],
"processor_classes": [
"MobileNetV2ImageProcessor"
],
"model_classes": [
"MobileNetV2ForSemanticSegmentation"
],
"sha": "48adbc340e42882f52b54d4f5dd045e16e9ef2d6"
},
"MobileNetV2Model": {
"tokenizer_classes": [],
"processor_classes": [
"MobileNetV2ImageProcessor"
],
"model_classes": [
"MobileNetV2Model"
],
"sha": "e876885828825472a80ef1796d89d60b901813ba"
},
"MobileViTForImageClassification": {
"tokenizer_classes": [],
"processor_classes": [
"MobileViTImageProcessor"
],
"model_classes": [
"MobileViTForImageClassification",
"TFMobileViTForImageClassification"
],
"sha": "7d0b31864f856e00f9e34e8c6781dcc7a8cdaf1e"
},
"MobileViTForSemanticSegmentation": {
"tokenizer_classes": [],
"processor_classes": [
"MobileViTImageProcessor"
],
"model_classes": [
"MobileViTForSemanticSegmentation",
"TFMobileViTForSemanticSegmentation"
],
"sha": "215f727caa3c3fc94fa4df486aa706e5d99d4194"
},
"MobileViTModel": {
"tokenizer_classes": [],
"processor_classes": [
"MobileViTImageProcessor"
],
"model_classes": [
"MobileViTModel",
"TFMobileViTModel"
],
"sha": "b3a1452e7cb44b600b21ee14f3d5382366855a46"
},
"MobileViTV2ForImageClassification": {
"tokenizer_classes": [],
"processor_classes": [
"MobileViTImageProcessor"
],
"model_classes": [
"MobileViTV2ForImageClassification"
],
"sha": "25752b0967ad594341d1b685401450d7f698433c"
},
"MobileViTV2ForSemanticSegmentation": {
"tokenizer_classes": [],
"processor_classes": [
"MobileViTImageProcessor"
],
"model_classes": [
"MobileViTV2ForSemanticSegmentation"
],
"sha": "13b953f50be33219d55a12f1098be38b88000897"
},
"MobileViTV2Model": {
"tokenizer_classes": [],
"processor_classes": [
"MobileViTImageProcessor"
],
"model_classes": [
"MobileViTV2Model"
],
"sha": "2f46357659db2d6d54d870e28073deeea1c8cb64"
},
"MptForCausalLM": {
"tokenizer_classes": [
"GPTNeoXTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MptForCausalLM"
],
"sha": "500c869b956c65f6b1a7b4867727f124c6f5728a"
},
"MptForQuestionAnswering": {
"tokenizer_classes": [
"GPTNeoXTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MptForQuestionAnswering"
],
"sha": "6ee46572bf61eb5e7dbbdaf00b73c4d37efc42d9"
},
"MptForSequenceClassification": {
"tokenizer_classes": [
"GPTNeoXTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MptForSequenceClassification"
],
"sha": "f0b9153413b5dfceeb96b67d4b0f22c94bbaf64a"
},
"MptForTokenClassification": {
"tokenizer_classes": [
"GPTNeoXTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MptForTokenClassification"
],
"sha": "3f7c3ccd67cd0b2aae56d37613429a64ef813246"
},
"MptModel": {
"tokenizer_classes": [
"GPTNeoXTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MptModel"
],
"sha": "ea747f234556661b0c8b84a626f267066ce586bf"
},
"MraForMaskedLM": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MraForMaskedLM"
],
"sha": "c00ee46cfd2b8fed29cc37f0a4ead40ad51a439c"
},
"MraForMultipleChoice": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MraForMultipleChoice"
],
"sha": "f397469ba8109f64dab2d75335ea7bf0c2dbeb74"
},
"MraForQuestionAnswering": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MraForQuestionAnswering"
],
"sha": "c2ed75acd20e5440a76d6504d9a3ebc2513011f0"
},
"MraForSequenceClassification": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MraForSequenceClassification"
],
"sha": "f47672d3708508bda7774215bee44a92ec16ab2f"
},
"MraForTokenClassification": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MraForTokenClassification"
],
"sha": "f0961ab5818bca473607fb94b391c186dc1d3492"
},
"MraModel": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MraModel"
],
"sha": "315f34f30bcc4b0b66b11987726df2a80c50e271"
},
"MusicgenForCausalLM": {
"tokenizer_classes": [
"T5TokenizerFast"
],
"processor_classes": [],
"model_classes": [],
"sha": "f67d387eaaa7c71ddf88af95eda4bf14ace08d49"
},
"MusicgenForConditionalGeneration": {
"tokenizer_classes": [
"T5TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MusicgenForConditionalGeneration"
],
"sha": "16102cdf580e70cf0b4e0e2cda5bc75b934da92c"
},
"MvpForCausalLM": {
"tokenizer_classes": [
"MvpTokenizer",
"MvpTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MvpForCausalLM"
],
"sha": "105e5f2c8a0f20d404cb71795539cda5dd49716d"
},
"MvpForConditionalGeneration": {
"tokenizer_classes": [
"MvpTokenizer",
"MvpTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MvpForConditionalGeneration"
],
"sha": "b0b706f14b2f8aae288cba30ae0064e0be7e888b"
},
"MvpForQuestionAnswering": {
"tokenizer_classes": [
"MvpTokenizer",
"MvpTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MvpForQuestionAnswering"
],
"sha": "82f152b36a40a4c22edcb146e6eaec636d84fa2d"
},
"MvpForSequenceClassification": {
"tokenizer_classes": [
"MvpTokenizer",
"MvpTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MvpForSequenceClassification"
],
"sha": "506b68544d064001929ee9e6db3752e62972a6aa"
},
"MvpModel": {
"tokenizer_classes": [
"MvpTokenizer",
"MvpTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MvpModel"
],
"sha": "3f4653184721a2bc029b27706d335ef7ddd219d5"
},
"NatBackbone": {
"tokenizer_classes": [],
"processor_classes": [
"ViTImageProcessor"
],
"model_classes": [
"NatBackbone"
],
"sha": "d5cc5eccba4da609c82e9f5c649301b9f9fee9fb"
},
"NatForImageClassification": {
"tokenizer_classes": [],
"processor_classes": [
"ViTImageProcessor"
],
"model_classes": [
"NatForImageClassification"
],
"sha": "2ff4c9e73c49c392c02a467e87b5511fd924242a"
},
"NatModel": {
"tokenizer_classes": [],
"processor_classes": [
"ViTImageProcessor"
],
"model_classes": [
"NatModel"
],
"sha": "75e9756bb94d0ccdce98a8e963eeecbc66f9d573"
},
"NezhaForMaskedLM": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"NezhaForMaskedLM"
],
"sha": "5991cca4b78f0ed7299259a71f3eeed3f3452b72"
},
"NezhaForMultipleChoice": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"NezhaForMultipleChoice"
],
"sha": "0f6e9ec791d85ad4503acdec50b3a120f984016b"
},
"NezhaForNextSentencePrediction": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"NezhaForNextSentencePrediction"
],
"sha": "9a34316c14ec8ecc98ff08e46760915c80098a57"
},
"NezhaForPreTraining": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"NezhaForPreTraining"
],
"sha": "6259db427a0073061de352ea819d38a74798edd7"
},
"NezhaForQuestionAnswering": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"NezhaForQuestionAnswering"
],
"sha": "31c6a34e85ae8c41294e0f4ef25044e00e511c4d"
},
"NezhaForSequenceClassification": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"NezhaForSequenceClassification"
],
"sha": "db057c308ba2e05f223404de11e1816ce4bd62a9"
},
"NezhaForTokenClassification": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"NezhaForTokenClassification"
],
"sha": "235f4e10b4a59709650c2bece3e342ec153d9cfc"
},
"NezhaModel": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"NezhaModel"
],
"sha": "80e05ba7c55bcdd7f4d1387ef9a09a7a8e95b5ac"
},
"NllbMoeForConditionalGeneration": {
"tokenizer_classes": [
"NllbTokenizer",
"NllbTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"NllbMoeForConditionalGeneration"
],
"sha": "2a7f87dffe826af3d52086888f3f3773246e5528"
},
"NllbMoeModel": {
"tokenizer_classes": [
"NllbTokenizer",
"NllbTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"NllbMoeModel"
],
"sha": "9f7a2261eed4658e1aa5623be4672ba64bee7da5"
},
"NystromformerForMaskedLM": {
"tokenizer_classes": [
"AlbertTokenizer",
"AlbertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"NystromformerForMaskedLM"
],
"sha": "37036847783f1e65e81ecd43803270a1ecb276f3"
},
"NystromformerForMultipleChoice": {
"tokenizer_classes": [
"AlbertTokenizer",
"AlbertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"NystromformerForMultipleChoice"
],
"sha": "42a077d5ab6830e20560466eaccc525eff10c3ae"
},
"NystromformerForQuestionAnswering": {
"tokenizer_classes": [
"AlbertTokenizer",
"AlbertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"NystromformerForQuestionAnswering"
],
"sha": "1cfaf79051731824db4f09989f093f87f4fceec5"
},
"NystromformerForSequenceClassification": {
"tokenizer_classes": [
"AlbertTokenizer",
"AlbertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"NystromformerForSequenceClassification"
],
"sha": "d75231203066df41e9b6b25dbee9ad40e8515c18"
},
"NystromformerForTokenClassification": {
"tokenizer_classes": [
"AlbertTokenizer",
"AlbertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"NystromformerForTokenClassification"
],
"sha": "5a499dc96e106bf41fc9166f2ad06527ec7ca14e"
},
"NystromformerModel": {
"tokenizer_classes": [
"AlbertTokenizer",
"AlbertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"NystromformerModel"
],
"sha": "2b6adb37ec473b15d71e2eb459acea08df6940ce"
},
"OPTForCausalLM": {
"tokenizer_classes": [
"GPT2Tokenizer",
"GPT2TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"OPTForCausalLM",
"TFOPTForCausalLM"
],
"sha": "190d1f4fc0011d2eaeaa05282e0fbd2445e4b11f"
},
"OPTForQuestionAnswering": {
"tokenizer_classes": [
"GPT2Tokenizer",
"GPT2TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"OPTForQuestionAnswering"
],
"sha": "0fa9277ce10dbc3d0922b354befb684a136af00b"
},
"OPTForSequenceClassification": {
"tokenizer_classes": [
"GPT2Tokenizer",
"GPT2TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"OPTForSequenceClassification"
],
"sha": "784ab288ab7280b1853ee400ef10ee2a965df352"
},
"OPTModel": {
"tokenizer_classes": [
"GPT2Tokenizer",
"GPT2TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"OPTModel",
"TFOPTModel"
],
"sha": "901d92b8f51edb0ec9614cb185fb66a8b5d364c3"
},
"OneFormerForUniversalSegmentation": {
"tokenizer_classes": [
"CLIPTokenizer",
"CLIPTokenizerFast"
],
"processor_classes": [
"OneFormerImageProcessor"
],
"model_classes": [
"OneFormerForUniversalSegmentation"
],
"sha": "fee1cfd676acc40f09017702ddac6504f3090d14"
},
"OneFormerModel": {
"tokenizer_classes": [
"CLIPTokenizer",
"CLIPTokenizerFast"
],
"processor_classes": [
"OneFormerImageProcessor"
],
"model_classes": [
"OneFormerModel"
],
"sha": "4163a79328c78f93ec57942598698a138c19a577"
},
"OpenAIGPTForSequenceClassification": {
"tokenizer_classes": [
"OpenAIGPTTokenizer",
"OpenAIGPTTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"OpenAIGPTForSequenceClassification",
"TFOpenAIGPTForSequenceClassification"
],
"sha": "c513f7f952935085f7573bf70a1ac3ad8f33434c"
},
"OpenAIGPTLMHeadModel": {
"tokenizer_classes": [
"OpenAIGPTTokenizer",
"OpenAIGPTTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"OpenAIGPTLMHeadModel",
"TFOpenAIGPTLMHeadModel"
],
"sha": "33f59ecd860f7a998483ec7631fe32d257235461"
},
"OpenAIGPTModel": {
"tokenizer_classes": [
"OpenAIGPTTokenizer",
"OpenAIGPTTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"OpenAIGPTModel",
"TFOpenAIGPTModel"
],
"sha": "00f6ec0a3a5276af71d08a26199e0ccbf2556fc9"
},
"OwlViTForObjectDetection": {
"tokenizer_classes": [
"CLIPTokenizer",
"CLIPTokenizerFast"
],
"processor_classes": [
"OwlViTImageProcessor"
],
"model_classes": [
"OwlViTForObjectDetection"
],
"sha": "af958c9164f23d0f12921a8edf687f9aaa6af90e"
},
"OwlViTModel": {
"tokenizer_classes": [
"CLIPTokenizer",
"CLIPTokenizerFast"
],
"processor_classes": [
"OwlViTImageProcessor"
],
"model_classes": [
"OwlViTModel"
],
"sha": "f0e27b2b4e53ba70e05d13dcfea8e85272b292a5"
},
"Owlv2ForObjectDetection": {
"tokenizer_classes": [
"CLIPTokenizer",
"CLIPTokenizerFast"
],
"processor_classes": [
"Owlv2ImageProcessor"
],
"model_classes": [
"Owlv2ForObjectDetection"
],
"sha": "30439c0b2749726468dc13a755261e8101170052"
},
"Owlv2Model": {
"tokenizer_classes": [
"CLIPTokenizer",
"CLIPTokenizerFast"
],
"processor_classes": [
"Owlv2ImageProcessor"
],
"model_classes": [
"Owlv2Model"
],
"sha": "7aeebdad5f72b36cb07c74355afad8e6052e2377"
},
"PLBartForCausalLM": {
"tokenizer_classes": [
"PLBartTokenizer"
],
"processor_classes": [],
"model_classes": [
"PLBartForCausalLM"
],
"sha": "6ee51133246dbdb18fc3681ebd62d21e421b9bb4"
},
"PLBartForConditionalGeneration": {
"tokenizer_classes": [
"PLBartTokenizer"
],
"processor_classes": [],
"model_classes": [
"PLBartForConditionalGeneration"
],
"sha": "ba191d28f4678d20b4dfed5fca5944018282cf20"
},
"PLBartForSequenceClassification": {
"tokenizer_classes": [
"PLBartTokenizer"
],
"processor_classes": [],
"model_classes": [
"PLBartForSequenceClassification"
],
"sha": "02063b3d9707fcff619a4e37a0d6e58f76e39b18"
},
"PLBartModel": {
"tokenizer_classes": [
"PLBartTokenizer"
],
"processor_classes": [],
"model_classes": [
"PLBartModel"
],
"sha": "cfbba29169b3f40d800403fc1b53982e1f88c5f8"
},
"PegasusForCausalLM": {
"tokenizer_classes": [
"PegasusTokenizer",
"PegasusTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"PegasusForCausalLM"
],
"sha": "6e685a698302a3ba33e5379d3a37eb0bc1ae2f70"
},
"PegasusForConditionalGeneration": {
"tokenizer_classes": [
"PegasusTokenizer",
"PegasusTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"PegasusForConditionalGeneration",
"TFPegasusForConditionalGeneration"
],
"sha": "15e58ee2ebc14b6e80ef2891259057ee5f049be2"
},
"PegasusModel": {
"tokenizer_classes": [
"PegasusTokenizer",
"PegasusTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"PegasusModel",
"TFPegasusModel"
],
"sha": "fa36b24523db411ef77903453346b8be81ef73fe"
},
"PegasusXForConditionalGeneration": {
"tokenizer_classes": [
"PegasusTokenizer",
"PegasusTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"PegasusXForConditionalGeneration"
],
"sha": "7588a8120f26a36c1687c14bdf1e9f9656891c1a"
},
"PegasusXModel": {
"tokenizer_classes": [
"PegasusTokenizer",
"PegasusTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"PegasusXModel"
],
"sha": "a0bdff627416ac3c39c22d081f5d88d8b8fd99cc"
},
"PerceiverForImageClassificationConvProcessing": {
"tokenizer_classes": [
"PerceiverTokenizer"
],
"processor_classes": [
"PerceiverImageProcessor"
],
"model_classes": [
"PerceiverForImageClassificationConvProcessing"
],
"sha": "2c1e5e62ebc9d0c931adc8c665fb05bde6c1c1f1"
},
"PerceiverForImageClassificationFourier": {
"tokenizer_classes": [
"PerceiverTokenizer"
],
"processor_classes": [
"PerceiverImageProcessor"
],
"model_classes": [
"PerceiverForImageClassificationFourier"
],
"sha": "88da41b8851b76b8be0dacdb3de023db02bb031a"
},
"PerceiverForImageClassificationLearned": {
"tokenizer_classes": [
"PerceiverTokenizer"
],
"processor_classes": [
"PerceiverImageProcessor"
],
"model_classes": [
"PerceiverForImageClassificationLearned"
],
"sha": "879bd1fa38d3baddb027bb2cacba2d160a741375"
},
"PerceiverForMaskedLM": {
"tokenizer_classes": [
"PerceiverTokenizer"
],
"processor_classes": [
"PerceiverImageProcessor"
],
"model_classes": [
"PerceiverForMaskedLM"
],
"sha": "1d2459cbd281ef72da5682e65102aaca96183045"
},
"PerceiverForSequenceClassification": {
"tokenizer_classes": [
"PerceiverTokenizer"
],
"processor_classes": [
"PerceiverImageProcessor"
],
"model_classes": [
"PerceiverForSequenceClassification"
],
"sha": "576f1f96348f0343458499fbf53d4102b5c0f2ff"
},
"PerceiverModel": {
"tokenizer_classes": [
"PerceiverTokenizer"
],
"processor_classes": [
"PerceiverImageProcessor"
],
"model_classes": [
"PerceiverModel"
],
"sha": "83ec4d2d61ed62525ee033e13d144817beb29d19"
},
"PersimmonForCausalLM": {
"tokenizer_classes": [
"LlamaTokenizer",
"LlamaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"PersimmonForCausalLM"
],
"sha": "454234d6496c3857f5bf3eafb784616e2cd3ea82"
},
"PersimmonForSequenceClassification": {
"tokenizer_classes": [
"LlamaTokenizer",
"LlamaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"PersimmonForSequenceClassification"
],
"sha": "1d2674846543a181ca67bafa8b8f3a48bd2eefd1"
},
"PersimmonModel": {
"tokenizer_classes": [
"LlamaTokenizer",
"LlamaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"PersimmonModel"
],
"sha": "b8c8d479e29e9ee048e2d0b05b001ac835ad8859"
},
"PhiForCausalLM": {
"tokenizer_classes": [
"CodeGenTokenizer",
"CodeGenTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"PhiForCausalLM"
],
"sha": "3fecc0109a4a3a230e3a5509eaf47a26eba85d79"
},
"PhiForSequenceClassification": {
"tokenizer_classes": [
"CodeGenTokenizer",
"CodeGenTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"PhiForSequenceClassification"
],
"sha": "e1c9f8ebf1317516acc1cd6338de71a53e770245"
},
"PhiForTokenClassification": {
"tokenizer_classes": [
"CodeGenTokenizer",
"CodeGenTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"PhiForTokenClassification"
],
"sha": "d3a8054903753b5c96c05eaf9877905a116a1d5e"
},
"PhiModel": {
"tokenizer_classes": [
"CodeGenTokenizer",
"CodeGenTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"PhiModel"
],
"sha": "99c38d5ce7ace35127d00ed3eeb3561308ea6b21"
},
"Pix2StructForConditionalGeneration": {
"tokenizer_classes": [
"T5TokenizerFast"
],
"processor_classes": [
"Pix2StructImageProcessor",
"Pix2StructProcessor"
],
"model_classes": [
"Pix2StructForConditionalGeneration"
],
"sha": "42b3de00ad535076c4893e4ac5ae2d2748cc4ccb"
},
"PoolFormerForImageClassification": {
"tokenizer_classes": [],
"processor_classes": [
"PoolFormerImageProcessor"
],
"model_classes": [
"PoolFormerForImageClassification"
],
"sha": "ef04de5a6896100d457fb9553dd9789c09cca98e"
},
"PoolFormerModel": {
"tokenizer_classes": [],
"processor_classes": [
"PoolFormerImageProcessor"
],
"model_classes": [
"PoolFormerModel"
],
"sha": "e8037215ebdbf795329ef6525cdc6aa547f04ace"
},
"ProphetNetForCausalLM": {
"tokenizer_classes": [
"ProphetNetTokenizer"
],
"processor_classes": [],
"model_classes": [
"ProphetNetForCausalLM"
],
"sha": "d40b1e75bbc5ea0839563457aff6eee5bc0bb03e"
},
"ProphetNetForConditionalGeneration": {
"tokenizer_classes": [
"ProphetNetTokenizer"
],
"processor_classes": [],
"model_classes": [
"ProphetNetForConditionalGeneration"
],
"sha": "d842875c41278032af39c03c66902786bb5ff2c7"
},
"ProphetNetModel": {
"tokenizer_classes": [
"ProphetNetTokenizer"
],
"processor_classes": [],
"model_classes": [
"ProphetNetModel"
],
"sha": "f1ddbbcc768c7ba54c4d75b319540c1635e65937"
},
"PvtForImageClassification": {
"tokenizer_classes": [],
"processor_classes": [
"PvtImageProcessor"
],
"model_classes": [
"PvtForImageClassification"
],
"sha": "589b37bd6941aff6dd248259f9eee3c422a41fde"
},
"PvtModel": {
"tokenizer_classes": [],
"processor_classes": [
"PvtImageProcessor"
],
"model_classes": [
"PvtModel"
],
"sha": "c40765c382515ae627652d60e9077b6478448d48"
},
"ReformerForMaskedLM": {
"tokenizer_classes": [
"ReformerTokenizer",
"ReformerTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"ReformerForMaskedLM"
],
"sha": "1e6431e42c676b525e3215e9e3cc8f1404f9f82b"
},
"ReformerForQuestionAnswering": {
"tokenizer_classes": [
"ReformerTokenizer",
"ReformerTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"ReformerForQuestionAnswering"
],
"sha": "62b43977f244474bd6982c6327d0c57310258fcd"
},
"ReformerForSequenceClassification": {
"tokenizer_classes": [
"ReformerTokenizer",
"ReformerTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"ReformerForSequenceClassification"
],
"sha": "67bd534a990a7dcfa02406987e7f066caa2a30e8"
},
"ReformerModel": {
"tokenizer_classes": [
"ReformerTokenizer",
"ReformerTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"ReformerModel"
],
"sha": "a34ddb1389067448e9bc1323de674951cfb4cff1"
},
"ReformerModelWithLMHead": {
"tokenizer_classes": [
"ReformerTokenizer",
"ReformerTokenizerFast"
],
"processor_classes": [],
"model_classes": [],
"sha": "e7a8addaea8407d4c55e144e48aee04be6cca618"
},
"RegNetForImageClassification": {
"tokenizer_classes": [],
"processor_classes": [
"ConvNextImageProcessor"
],
"model_classes": [
"RegNetForImageClassification",
"TFRegNetForImageClassification"
],
"sha": "5ec67c84fc7944c0c5b386bd26820bc4d1f3b32a"
},
"RegNetModel": {
"tokenizer_classes": [],
"processor_classes": [
"ConvNextImageProcessor"
],
"model_classes": [
"RegNetModel",
"TFRegNetModel"
],
"sha": "72375e1401dc8271d4abb6295c9cee376f7b8f1a"
},
"RemBertForCausalLM": {
"tokenizer_classes": [
"RemBertTokenizer",
"RemBertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"RemBertForCausalLM",
"TFRemBertForCausalLM"
],
"sha": "8d9ae3d74a0e0a8958b4ee8c9dca3632abf52ef9"
},
"RemBertForMaskedLM": {
"tokenizer_classes": [
"RemBertTokenizer",
"RemBertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"RemBertForMaskedLM",
"TFRemBertForMaskedLM"
],
"sha": "b7c27d01e1cc3bef9ddd6a78627d700b3bffd759"
},
"RemBertForMultipleChoice": {
"tokenizer_classes": [
"RemBertTokenizer",
"RemBertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"RemBertForMultipleChoice",
"TFRemBertForMultipleChoice"
],
"sha": "2fe192677b9740cf24dd559339d46925e8ac23d4"
},
"RemBertForQuestionAnswering": {
"tokenizer_classes": [
"RemBertTokenizer",
"RemBertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"RemBertForQuestionAnswering",
"TFRemBertForQuestionAnswering"
],
"sha": "22b8ba44681b96292a1cf7f6df4ba6bb7937ec6e"
},
"RemBertForSequenceClassification": {
"tokenizer_classes": [
"RemBertTokenizer",
"RemBertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"RemBertForSequenceClassification",
"TFRemBertForSequenceClassification"
],
"sha": "20f3e89341ea15266d2685a8798142fba03c3f98"
},
"RemBertForTokenClassification": {
"tokenizer_classes": [
"RemBertTokenizer",
"RemBertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"RemBertForTokenClassification",
"TFRemBertForTokenClassification"
],
"sha": "15712ff753708da3cf0550e76e73a5d0bba7784e"
},
"RemBertModel": {
"tokenizer_classes": [
"RemBertTokenizer",
"RemBertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"RemBertModel",
"TFRemBertModel"
],
"sha": "59cc6d099b1ded0aaead8684457415b129f79e86"
},
"ResNetBackbone": {
"tokenizer_classes": [],
"processor_classes": [
"ConvNextImageProcessor"
],
"model_classes": [
"ResNetBackbone"
],
"sha": "c84a6bcf8af4b6a3403dea3cf4c55965ac39f239"
},
"ResNetForImageClassification": {
"tokenizer_classes": [],
"processor_classes": [
"ConvNextImageProcessor"
],
"model_classes": [
"ResNetForImageClassification",
"TFResNetForImageClassification"
],
"sha": "34a180ad24d80811d420d7aa4fbec4a17751aaf8"
},
"ResNetModel": {
"tokenizer_classes": [],
"processor_classes": [
"ConvNextImageProcessor"
],
"model_classes": [
"ResNetModel",
"TFResNetModel"
],
"sha": "fafa6cdf9986c6cfbae360596b3574162430bcd3"
},
"RoCBertForCausalLM": {
"tokenizer_classes": [
"RoCBertTokenizer"
],
"processor_classes": [],
"model_classes": [
"RoCBertForCausalLM"
],
"sha": "194d8dafc4f4142f8d31e6b4be14b55d812f923b"
},
"RoCBertForMaskedLM": {
"tokenizer_classes": [
"RoCBertTokenizer"
],
"processor_classes": [],
"model_classes": [
"RoCBertForMaskedLM"
],
"sha": "8bc285f32f3b932dbd56ddf91b1170734d638eeb"
},
"RoCBertForMultipleChoice": {
"tokenizer_classes": [
"RoCBertTokenizer"
],
"processor_classes": [],
"model_classes": [
"RoCBertForMultipleChoice"
],
"sha": "bb54e5ae021d728022d34b12fee3f087d9486af9"
},
"RoCBertForPreTraining": {
"tokenizer_classes": [
"RoCBertTokenizer"
],
"processor_classes": [],
"model_classes": [
"RoCBertForPreTraining"
],
"sha": "86ebbd5b0bc84660ad7f505082eff19b86c137c8"
},
"RoCBertForQuestionAnswering": {
"tokenizer_classes": [
"RoCBertTokenizer"
],
"processor_classes": [],
"model_classes": [
"RoCBertForQuestionAnswering"
],
"sha": "1bfc2dc3d6e76170e6dca1ff32a54a0887ff28a3"
},
"RoCBertForSequenceClassification": {
"tokenizer_classes": [
"RoCBertTokenizer"
],
"processor_classes": [],
"model_classes": [
"RoCBertForSequenceClassification"
],
"sha": "c329038802241f454273894128fea38b60f7c739"
},
"RoCBertForTokenClassification": {
"tokenizer_classes": [
"RoCBertTokenizer"
],
"processor_classes": [],
"model_classes": [
"RoCBertForTokenClassification"
],
"sha": "afe5ec22c2ad1d9ff6e3e64c87eb7555faaa936d"
},
"RoCBertModel": {
"tokenizer_classes": [
"RoCBertTokenizer"
],
"processor_classes": [],
"model_classes": [
"RoCBertModel"
],
"sha": "29de5580d5f5d3461a88673e7b4c492a9d8a67a4"
},
"RoFormerForCausalLM": {
"tokenizer_classes": [
"RoFormerTokenizer",
"RoFormerTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"RoFormerForCausalLM",
"TFRoFormerForCausalLM"
],
"sha": "6e074219c6dd8f8b221bbfda64fba100f729f88d"
},
"RoFormerForMaskedLM": {
"tokenizer_classes": [
"RoFormerTokenizer",
"RoFormerTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"RoFormerForMaskedLM",
"TFRoFormerForMaskedLM"
],
"sha": "a3a4d05f9b29601553a77244f2adcf8194f9367c"
},
"RoFormerForMultipleChoice": {
"tokenizer_classes": [
"RoFormerTokenizer",
"RoFormerTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"RoFormerForMultipleChoice",
"TFRoFormerForMultipleChoice"
],
"sha": "aca3999a1d14f09644faed44e2cdfb28ed68a3d3"
},
"RoFormerForQuestionAnswering": {
"tokenizer_classes": [
"RoFormerTokenizer",
"RoFormerTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"RoFormerForQuestionAnswering",
"TFRoFormerForQuestionAnswering"
],
"sha": "b8a20b3a788f178b9ef64e2eb9587f693dca1b69"
},
"RoFormerForSequenceClassification": {
"tokenizer_classes": [
"RoFormerTokenizer",
"RoFormerTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"RoFormerForSequenceClassification",
"TFRoFormerForSequenceClassification"
],
"sha": "d092e2d5e62012bf4ec921e763b37865d6189216"
},
"RoFormerForTokenClassification": {
"tokenizer_classes": [
"RoFormerTokenizer",
"RoFormerTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"RoFormerForTokenClassification",
"TFRoFormerForTokenClassification"
],
"sha": "85d3a17062e1f3e0539abfe738a88203e25349b6"
},
"RoFormerModel": {
"tokenizer_classes": [
"RoFormerTokenizer",
"RoFormerTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"RoFormerModel",
"TFRoFormerModel"
],
"sha": "22e7df2f4cd66caf449f2342f63d176005afccc9"
},
"RobertaForCausalLM": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"RobertaForCausalLM",
"TFRobertaForCausalLM"
],
"sha": "5d1d24d56f9735402e50a2ea513ffde44487733e"
},
"RobertaForMaskedLM": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"RobertaForMaskedLM",
"TFRobertaForMaskedLM"
],
"sha": "b21c9daf0b3b66530bf5d45d67df5ec392b5059c"
},
"RobertaForMultipleChoice": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"RobertaForMultipleChoice",
"TFRobertaForMultipleChoice"
],
"sha": "10020d9546d4d7318f4d514fe13daaad07e6269f"
},
"RobertaForQuestionAnswering": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"RobertaForQuestionAnswering",
"TFRobertaForQuestionAnswering"
],
"sha": "eea4a81306891746bac9e7715f805a2d9dbf4be7"
},
"RobertaForSequenceClassification": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"RobertaForSequenceClassification",
"TFRobertaForSequenceClassification"
],
"sha": "6a6f53fc6ab98e29ed539e76b1cb76d25a2cd720"
},
"RobertaForTokenClassification": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"RobertaForTokenClassification",
"TFRobertaForTokenClassification"
],
"sha": "9190044c4091eb0d98ae7638c453e24846bca5d7"
},
"RobertaModel": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"RobertaModel",
"TFRobertaModel"
],
"sha": "181a0b8a7ad24500ec327ad07ddb225f0680ac0a"
},
"RobertaPreLayerNormForCausalLM": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"RobertaPreLayerNormForCausalLM",
"TFRobertaPreLayerNormForCausalLM"
],
"sha": "73b6d4531b41f295a5d310d7aa44736004a59865"
},
"RobertaPreLayerNormForMaskedLM": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"RobertaPreLayerNormForMaskedLM",
"TFRobertaPreLayerNormForMaskedLM"
],
"sha": "a61723c77e5ab7adc95285e7823a0a49b99af395"
},
"RobertaPreLayerNormForMultipleChoice": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"RobertaPreLayerNormForMultipleChoice",
"TFRobertaPreLayerNormForMultipleChoice"
],
"sha": "3dcfa62e0771358c60232a18135bfe7c7f6d715e"
},
"RobertaPreLayerNormForQuestionAnswering": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"RobertaPreLayerNormForQuestionAnswering",
"TFRobertaPreLayerNormForQuestionAnswering"
],
"sha": "a8e76a5a50f7df60055e5ed6a1c3af2e7d34cf01"
},
"RobertaPreLayerNormForSequenceClassification": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"RobertaPreLayerNormForSequenceClassification",
"TFRobertaPreLayerNormForSequenceClassification"
],
"sha": "7509cb0286d146ef2fc6beb8867ae31b92fb1b16"
},
"RobertaPreLayerNormForTokenClassification": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"RobertaPreLayerNormForTokenClassification",
"TFRobertaPreLayerNormForTokenClassification"
],
"sha": "3ad5814ba126b41e18c1978c970e396fab6da9bf"
},
"RobertaPreLayerNormModel": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"RobertaPreLayerNormModel",
"TFRobertaPreLayerNormModel"
],
"sha": "4830db38fd310404c5ab70bd00684eca0bc06ca8"
},
"RwkvForCausalLM": {
"tokenizer_classes": [
"GPTNeoXTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"RwkvForCausalLM"
],
"sha": "2f452fd46b39e39b1a6a95fa1d8232405bbb3e96"
},
"RwkvModel": {
"tokenizer_classes": [
"GPTNeoXTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"RwkvModel"
],
"sha": "88a52c9437dc3c06f65a8252490be7eb91197804"
},
"SEWDForCTC": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"SEWDForCTC"
],
"sha": "5c7495c77ae9e0f12c0de05d3a5fb95bdcd91768"
},
"SEWDForSequenceClassification": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"SEWDForSequenceClassification"
],
"sha": "d6cbf1164ce1999fdaf3deeb7a6eba19a3b1f873"
},
"SEWDModel": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"SEWDModel"
],
"sha": "dde4e02219449f149bb3403bbeae127cafaf9c79"
},
"SEWForCTC": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"SEWForCTC"
],
"sha": "4477c7a277059fba08772acf91cf3e3dd3cb073b"
},
"SEWForSequenceClassification": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"SEWForSequenceClassification"
],
"sha": "3b90fbb1c0c3848fed18f91a0169bb297a3e6619"
},
"SEWModel": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"SEWModel"
],
"sha": "0a0fbb844eeefa0dce62bd05db30a2bb91e5dc88"
},
"SamModel": {
"tokenizer_classes": [],
"processor_classes": [
"SamImageProcessor"
],
"model_classes": [
"SamModel",
"TFSamModel"
],
"sha": "eca8651bc84e5ac3b1b62e784b744a6bd1b82575"
},
"SegformerForImageClassification": {
"tokenizer_classes": [],
"processor_classes": [
"SegformerImageProcessor"
],
"model_classes": [
"SegformerForImageClassification",
"TFSegformerForImageClassification"
],
"sha": "c566ae0ed382be4ed61ed6dacffa2ba663e9cc19"
},
"SegformerForSemanticSegmentation": {
"tokenizer_classes": [],
"processor_classes": [
"SegformerImageProcessor"
],
"model_classes": [
"SegformerForSemanticSegmentation",
"TFSegformerForSemanticSegmentation"
],
"sha": "b73798972cdf24daafa858994713aca60e2bf90d"
},
"SegformerModel": {
"tokenizer_classes": [],
"processor_classes": [
"SegformerImageProcessor"
],
"model_classes": [
"SegformerModel",
"TFSegformerModel"
],
"sha": "3d4ba8ed2bdf801e6afa855b9d77893f2b7f9e10"
},
"Speech2TextForConditionalGeneration": {
"tokenizer_classes": [
"Speech2TextTokenizer"
],
"processor_classes": [
"Speech2TextFeatureExtractor"
],
"model_classes": [
"Speech2TextForConditionalGeneration",
"TFSpeech2TextForConditionalGeneration"
],
"sha": "1da80293ec78762e136cf6dd64b652693f9ab364"
},
"Speech2TextModel": {
"tokenizer_classes": [
"Speech2TextTokenizer"
],
"processor_classes": [
"Speech2TextFeatureExtractor"
],
"model_classes": [
"Speech2TextModel",
"TFSpeech2TextModel"
],
"sha": "7c6e63bd0c15dd99ef01573d4c43f90e4920cc91"
},
"SpeechEncoderDecoderModel": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"SpeechEncoderDecoderModel"
],
"sha": "78602ae0857728e95de4042bdca8a31ef818890a"
},
"SpeechT5ForSpeechToText": {
"tokenizer_classes": [
"SpeechT5Tokenizer"
],
"processor_classes": [
"SpeechT5FeatureExtractor"
],
"model_classes": [
"SpeechT5ForSpeechToText"
],
"sha": "d46f0a83324e5865420a27a738ef203292de3479"
},
"SpeechT5ForTextToSpeech": {
"tokenizer_classes": [
"SpeechT5Tokenizer"
],
"processor_classes": [
"SpeechT5FeatureExtractor"
],
"model_classes": [
"SpeechT5ForTextToSpeech"
],
"sha": "922e748d9e1ea256a8d9259782021cd3820d5924"
},
"SpeechT5Model": {
"tokenizer_classes": [
"SpeechT5Tokenizer"
],
"processor_classes": [
"SpeechT5FeatureExtractor"
],
"model_classes": [
"SpeechT5Model"
],
"sha": "7b248f77ca88ffddcdb538e772f6de63a86a4f9b"
},
"SplinterForPreTraining": {
"tokenizer_classes": [
"SplinterTokenizer"
],
"processor_classes": [],
"model_classes": [
"SplinterForPreTraining"
],
"sha": "e8a94efa740f1d685fa553f49132c6f022de5389"
},
"SplinterForQuestionAnswering": {
"tokenizer_classes": [
"SplinterTokenizer"
],
"processor_classes": [],
"model_classes": [
"SplinterForQuestionAnswering"
],
"sha": "d038b7b683face4a361ab0f474d8a5b111c44c4d"
},
"SplinterModel": {
"tokenizer_classes": [
"SplinterTokenizer"
],
"processor_classes": [],
"model_classes": [
"SplinterModel"
],
"sha": "a35b13cbb7faba46dc265761bb839267eb53d248"
},
"SqueezeBertForMaskedLM": {
"tokenizer_classes": [
"SqueezeBertTokenizer",
"SqueezeBertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"SqueezeBertForMaskedLM"
],
"sha": "33ce239408c22d2c98be63c9ab4607ef9ceb6d49"
},
"SqueezeBertForMultipleChoice": {
"tokenizer_classes": [
"SqueezeBertTokenizer",
"SqueezeBertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"SqueezeBertForMultipleChoice"
],
"sha": "7e9e666896420c7839e27dcb280981d034ba4da5"
},
"SqueezeBertForQuestionAnswering": {
"tokenizer_classes": [
"SqueezeBertTokenizer",
"SqueezeBertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"SqueezeBertForQuestionAnswering"
],
"sha": "bceb045a9ac6eb2ded7d358ed577c6dc28ea487a"
},
"SqueezeBertForSequenceClassification": {
"tokenizer_classes": [
"SqueezeBertTokenizer",
"SqueezeBertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"SqueezeBertForSequenceClassification"
],
"sha": "c5aeb1f454a1d059d41a5f8dacaf784b9de0b899"
},
"SqueezeBertForTokenClassification": {
"tokenizer_classes": [
"SqueezeBertTokenizer",
"SqueezeBertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"SqueezeBertForTokenClassification"
],
"sha": "70ba60ca44a380e6aa983a37b163c57217219df7"
},
"SqueezeBertModel": {
"tokenizer_classes": [
"SqueezeBertTokenizer",
"SqueezeBertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"SqueezeBertModel"
],
"sha": "e0a3ac56a4047da3f921638252ead5e44438bbdb"
},
"SwiftFormerForImageClassification": {
"tokenizer_classes": [],
"processor_classes": [
"ViTImageProcessor"
],
"model_classes": [
"SwiftFormerForImageClassification"
],
"sha": "a249b14a525d29e675b6e4af4baacd9ba7df7598"
},
"SwiftFormerModel": {
"tokenizer_classes": [],
"processor_classes": [
"ViTImageProcessor"
],
"model_classes": [
"SwiftFormerModel"
],
"sha": "25ba2d88c770533f8c69811d2a454a00c1d09f5d"
},
"Swin2SRForImageSuperResolution": {
"tokenizer_classes": [],
"processor_classes": [
"Swin2SRImageProcessor"
],
"model_classes": [
"Swin2SRForImageSuperResolution"
],
"sha": "3a2780de0b455084c018ac8a62b56040969e26ec"
},
"Swin2SRModel": {
"tokenizer_classes": [],
"processor_classes": [
"Swin2SRImageProcessor"
],
"model_classes": [
"Swin2SRModel"
],
"sha": "c67f6ecff9ef8675c3869c987277b0a1e040f4be"
},
"SwinBackbone": {
"tokenizer_classes": [],
"processor_classes": [
"ViTImageProcessor"
],
"model_classes": [
"SwinBackbone"
],
"sha": "89b28b8ec05a7b3357be75a77eb7809e6fd5cfef"
},
"SwinForImageClassification": {
"tokenizer_classes": [],
"processor_classes": [
"ViTImageProcessor"
],
"model_classes": [
"SwinForImageClassification",
"TFSwinForImageClassification"
],
"sha": "e3c2e80f380ef79781313981da1a993dd8b8d34d"
},
"SwinForMaskedImageModeling": {
"tokenizer_classes": [],
"processor_classes": [
"ViTImageProcessor"
],
"model_classes": [
"SwinForMaskedImageModeling",
"TFSwinForMaskedImageModeling"
],
"sha": "d84b061fbace1bc6e697e3253e222de42053f978"
},
"SwinModel": {
"tokenizer_classes": [],
"processor_classes": [
"ViTImageProcessor"
],
"model_classes": [
"SwinModel",
"TFSwinModel"
],
"sha": "23ff641295660ec4fea399be8aa1bc14565961f8"
},
"Swinv2ForImageClassification": {
"tokenizer_classes": [],
"processor_classes": [
"ViTImageProcessor"
],
"model_classes": [
"Swinv2ForImageClassification"
],
"sha": "3fd755cdf4cf611db83f72f9c9b00eb9257a38ca"
},
"Swinv2ForMaskedImageModeling": {
"tokenizer_classes": [],
"processor_classes": [
"ViTImageProcessor"
],
"model_classes": [
"Swinv2ForMaskedImageModeling"
],
"sha": "8375c31eb6231fde36ec6533a34ba5b28e296163"
},
"Swinv2Model": {
"tokenizer_classes": [],
"processor_classes": [
"ViTImageProcessor"
],
"model_classes": [
"Swinv2Model"
],
"sha": "70aeb72e8a266f668c8b51a517ec01003b8d6804"
},
"SwitchTransformersForConditionalGeneration": {
"tokenizer_classes": [
"T5Tokenizer",
"T5TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"SwitchTransformersForConditionalGeneration"
],
"sha": "c8fcd2bb735894c78db7f1e5b51afc78aced7adb"
},
"SwitchTransformersModel": {
"tokenizer_classes": [
"T5Tokenizer",
"T5TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"SwitchTransformersModel"
],
"sha": "275bbf6d389bfd0540b9f824c609c6b22a577328"
},
"T5EncoderModel": {
"tokenizer_classes": [
"T5Tokenizer",
"T5TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"T5EncoderModel",
"TFT5EncoderModel"
],
"sha": "1c75090036a2b3740dfe2d570b889332ad8e59e8"
},
"T5ForConditionalGeneration": {
"tokenizer_classes": [
"T5Tokenizer",
"T5TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"T5ForConditionalGeneration",
"TFT5ForConditionalGeneration"
],
"sha": "593fd6072a4e265f5cc73b1973cd8af76b261f29"
},
"T5ForQuestionAnswering": {
"tokenizer_classes": [
"T5Tokenizer",
"T5TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"T5ForQuestionAnswering"
],
"sha": "b9edf2de494244ff032f67d2d7bdf6c591000c94"
},
"T5ForSequenceClassification": {
"tokenizer_classes": [
"T5Tokenizer",
"T5TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"T5ForSequenceClassification"
],
"sha": "105b5c4c8e1efe927444108f1388c4f102ebad15"
},
"T5Model": {
"tokenizer_classes": [
"T5Tokenizer",
"T5TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"T5Model",
"TFT5Model"
],
"sha": "eb3d20dda0ba77c1de618d78116a1a0c784c515c"
},
"TableTransformerForObjectDetection": {
"tokenizer_classes": [],
"processor_classes": [
"DetrImageProcessor"
],
"model_classes": [
"TableTransformerForObjectDetection"
],
"sha": "9cf1e3f5c3555a727672a32b49f8b96c5aa20be6"
},
"TableTransformerModel": {
"tokenizer_classes": [],
"processor_classes": [
"DetrImageProcessor"
],
"model_classes": [
"TableTransformerModel"
],
"sha": "7b446244d8739b0c29d98f7d537b15ad578577d5"
},
"TapasForMaskedLM": {
"tokenizer_classes": [
"TapasTokenizer"
],
"processor_classes": [],
"model_classes": [
"TFTapasForMaskedLM",
"TapasForMaskedLM"
],
"sha": "2cedb92dd9a3dc37ffb7d35ad5190b110992577c"
},
"TapasForQuestionAnswering": {
"tokenizer_classes": [
"TapasTokenizer"
],
"processor_classes": [],
"model_classes": [
"TFTapasForQuestionAnswering",
"TapasForQuestionAnswering"
],
"sha": "4cc91b9e5db662e6e392d8052587ae419896d72b"
},
"TapasForSequenceClassification": {
"tokenizer_classes": [
"TapasTokenizer"
],
"processor_classes": [],
"model_classes": [
"TFTapasForSequenceClassification",
"TapasForSequenceClassification"
],
"sha": "7c37bfb87a6fce2f8604bb3cab2a14e09a285e14"
},
"TapasModel": {
"tokenizer_classes": [
"TapasTokenizer"
],
"processor_classes": [],
"model_classes": [
"TFTapasModel",
"TapasModel"
],
"sha": "bc004af0a415afe1f566c3afe8dd4d48d08c1ce0"
},
"TimesformerForVideoClassification": {
"tokenizer_classes": [],
"processor_classes": [
"VideoMAEImageProcessor"
],
"model_classes": [
"TimesformerForVideoClassification"
],
"sha": "0b3b8e314618d7af34fb44477745491b44bf556d"
},
"TimesformerModel": {
"tokenizer_classes": [],
"processor_classes": [
"VideoMAEImageProcessor"
],
"model_classes": [
"TimesformerModel"
],
"sha": "ea51f7ebb6426ad2b1fa1396e83f8e8ad5bc3b44"
},
"TransfoXLForSequenceClassification": {
"tokenizer_classes": [
"TransfoXLTokenizer"
],
"processor_classes": [],
"model_classes": [
"TFTransfoXLForSequenceClassification",
"TransfoXLForSequenceClassification"
],
"sha": "f3d370184350667d74056b979081b0bf5b0083c1"
},
"TransfoXLLMHeadModel": {
"tokenizer_classes": [
"TransfoXLTokenizer"
],
"processor_classes": [],
"model_classes": [
"TFTransfoXLLMHeadModel",
"TransfoXLLMHeadModel"
],
"sha": "e0d4cebcdde52d8d4c81782a1edc606830bd6afd"
},
"TransfoXLModel": {
"tokenizer_classes": [
"TransfoXLTokenizer"
],
"processor_classes": [],
"model_classes": [
"TFTransfoXLModel",
"TransfoXLModel"
],
"sha": "6938eeae35662a862accb01412dfc486454bdc8f"
},
"TvltForPreTraining": {
"tokenizer_classes": [],
"processor_classes": [
"TvltProcessor"
],
"model_classes": [
"TvltForPreTraining"
],
"sha": "f7bd2833764eb6d55a921aaed81d3f21119016ae"
},
"TvltModel": {
"tokenizer_classes": [],
"processor_classes": [
"TvltProcessor"
],
"model_classes": [
"TvltModel"
],
"sha": "c3cbf7a6159c038f333ce7adda2480ea3396b2b3"
},
"UMT5EncoderModel": {
"tokenizer_classes": [
"T5Tokenizer",
"T5TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"UMT5EncoderModel"
],
"sha": "2894e49c9fbd17ea4b3dab56ec388be354c1a5f0"
},
"UMT5ForQuestionAnswering": {
"tokenizer_classes": [
"T5Tokenizer",
"T5TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"UMT5ForQuestionAnswering"
],
"sha": "b381aa068a44200db539f2f48f4e34a5ed1cb093"
},
"UMT5ForSequenceClassification": {
"tokenizer_classes": [
"T5Tokenizer",
"T5TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"UMT5ForSequenceClassification"
],
"sha": "aa9f77b7b3cff21425b7512e7c0f478af7b5db14"
},
"UMT5Model": {
"tokenizer_classes": [
"T5Tokenizer",
"T5TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"UMT5Model"
],
"sha": "9180d850b24e5494442a4f7a8ca1a4c102f9babd"
},
"UniSpeechForCTC": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"UniSpeechForCTC"
],
"sha": "102b56d76f4d74cface309801c0ad80892583751"
},
"UniSpeechForPreTraining": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"UniSpeechForPreTraining"
],
"sha": "830be5b3e85aaae7bcc961218e417c29743d6042"
},
"UniSpeechForSequenceClassification": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"UniSpeechForSequenceClassification"
],
"sha": "a30ac1516944757ccd8efcbcf94033a03f8708bf"
},
"UniSpeechModel": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"UniSpeechModel"
],
"sha": "18e170eb1091715b74ace28c8c380b6bf2b6202d"
},
"UniSpeechSatForAudioFrameClassification": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"UniSpeechSatForAudioFrameClassification"
],
"sha": "7eba5a1c6cd610928b27ecb217bb17c729a07a57"
},
"UniSpeechSatForCTC": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"UniSpeechSatForCTC"
],
"sha": "a8617538d3a2ae990f022bb0c36b8428a4870822"
},
"UniSpeechSatForPreTraining": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"UniSpeechSatForPreTraining"
],
"sha": "a772f66db0ab49e1050e524d7fcbe5106ebdaf96"
},
"UniSpeechSatForSequenceClassification": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"UniSpeechSatForSequenceClassification"
],
"sha": "f1c16567bd829a6d8a7a2d167d22e9653149e625"
},
"UniSpeechSatForXVector": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"UniSpeechSatForXVector"
],
"sha": "71cb3780cf3678f74fba00e19df82df76dca6133"
},
"UniSpeechSatModel": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"UniSpeechSatModel"
],
"sha": "ea755bbc7c6c6aa649c58b4b000f243acbbd6b5a"
},
"UperNetForSemanticSegmentation": {
"tokenizer_classes": [],
"processor_classes": [
"SegformerImageProcessor"
],
"model_classes": [
"UperNetForSemanticSegmentation"
],
"sha": "f1871cb388bc0b203f5397bfc06a373736c2fb9c"
},
"VanForImageClassification": {
"tokenizer_classes": [],
"processor_classes": [
"ConvNextImageProcessor"
],
"model_classes": [
"VanForImageClassification"
],
"sha": "694eb147bc4768aeabeffbfb97732281b71a621d"
},
"VanModel": {
"tokenizer_classes": [],
"processor_classes": [
"ConvNextImageProcessor"
],
"model_classes": [
"VanModel"
],
"sha": "d8ac60ce952020f2b0355fc566d634b2c5ba635d"
},
"ViTForImageClassification": {
"tokenizer_classes": [],
"processor_classes": [
"ViTImageProcessor"
],
"model_classes": [
"TFViTForImageClassification",
"ViTForImageClassification"
],
"sha": "5b3b44a3ed492070c273e481e30ecf4deddc5ec3"
},
"ViTForMaskedImageModeling": {
"tokenizer_classes": [],
"processor_classes": [
"ViTImageProcessor"
],
"model_classes": [
"ViTForMaskedImageModeling"
],
"sha": "d984e0b432fe195c2c26952d4f249031e7b1e2ea"
},
"ViTHybridForImageClassification": {
"tokenizer_classes": [],
"processor_classes": [
"ViTHybridImageProcessor"
],
"model_classes": [
"ViTHybridForImageClassification"
],
"sha": "69c7c396032ffe60d54953b584394899fb95ccc1"
},
"ViTHybridModel": {
"tokenizer_classes": [],
"processor_classes": [
"ViTHybridImageProcessor"
],
"model_classes": [
"ViTHybridModel"
],
"sha": "077443bfefe40d625314dbd274d2ff8089624797"
},
"ViTMAEForPreTraining": {
"tokenizer_classes": [],
"processor_classes": [
"ViTImageProcessor"
],
"model_classes": [
"TFViTMAEForPreTraining",
"ViTMAEForPreTraining"
],
"sha": "2d98d80d9c45eef0d5b6f5426d7196bb546fe9fc"
},
"ViTMAEModel": {
"tokenizer_classes": [],
"processor_classes": [
"ViTImageProcessor"
],
"model_classes": [
"TFViTMAEModel",
"ViTMAEModel"
],
"sha": "c7c2f12c19d2dbec08851a9dac7485909629a5fd"
},
"ViTMSNForImageClassification": {
"tokenizer_classes": [],
"processor_classes": [
"ViTImageProcessor"
],
"model_classes": [
"ViTMSNForImageClassification"
],
"sha": "feda819aa7dbb55d850130f4cf1d210858d7eb89"
},
"ViTMSNModel": {
"tokenizer_classes": [],
"processor_classes": [
"ViTImageProcessor"
],
"model_classes": [
"ViTMSNModel"
],
"sha": "0733abf168cb47a149821fdd2113d546e15c47de"
},
"ViTModel": {
"tokenizer_classes": [],
"processor_classes": [
"ViTImageProcessor"
],
"model_classes": [
"TFViTModel",
"ViTModel"
],
"sha": "31817b7a64ebc3333fcd4801dfbb356ab07b13dd"
},
"VideoMAEForPreTraining": {
"tokenizer_classes": [],
"processor_classes": [
"VideoMAEImageProcessor"
],
"model_classes": [
"VideoMAEForPreTraining"
],
"sha": "9de66c4bb759dc7269a7af17bf70b3194550acaa"
},
"VideoMAEForVideoClassification": {
"tokenizer_classes": [],
"processor_classes": [
"VideoMAEImageProcessor"
],
"model_classes": [
"VideoMAEForVideoClassification"
],
"sha": "d3f743408386bc0ffe2d979de35335e87bc34aec"
},
"VideoMAEModel": {
"tokenizer_classes": [],
"processor_classes": [
"VideoMAEImageProcessor"
],
"model_classes": [
"VideoMAEModel"
],
"sha": "a2be96beba888817d92b67525601569d830342ff"
},
"ViltForQuestionAnswering": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [
"ViltImageProcessor"
],
"model_classes": [
"ViltForQuestionAnswering"
],
"sha": "faeffbf43da6621717d8b13e7ebe87d58d750cb2"
},
"ViltModel": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [
"ViltImageProcessor"
],
"model_classes": [
"ViltModel"
],
"sha": "3a89b7b5782947c4f4125162ffe1c9cc18c9c800"
},
"VisionEncoderDecoderModel": {
"tokenizer_classes": [
"GPT2Tokenizer",
"GPT2TokenizerFast"
],
"processor_classes": [
"ViTImageProcessor"
],
"model_classes": [
"TFVisionEncoderDecoderModel",
"VisionEncoderDecoderModel"
],
"sha": "23917761070cf16b26a6d033b6bff9100bbc618b"
},
"VisionTextDualEncoderModel": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [
"ViTImageProcessor"
],
"model_classes": [
"TFVisionTextDualEncoderModel",
"VisionTextDualEncoderModel"
],
"sha": "c3569ef17f66acbacb76f7ceb6f71e02d075dd6c"
},
"VisualBertForPreTraining": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"VisualBertForPreTraining"
],
"sha": "ce5a4d93ce762971cd216cda9aef8b9ce3f0450b"
},
"VisualBertModel": {
"tokenizer_classes": [
"BertTokenizer",
"BertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"VisualBertModel"
],
"sha": "85020189fb7bf1217eb9370b09bca8ec5bcfdafa"
},
"VitsModel": {
"tokenizer_classes": [
"VitsTokenizer"
],
"processor_classes": [],
"model_classes": [
"VitsModel"
],
"sha": "b9a20ca5b6a7874576e485850260578895587dd2"
},
"Wav2Vec2ConformerForAudioFrameClassification": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"Wav2Vec2ConformerForAudioFrameClassification"
],
"sha": "e316a18a1d165b4cb51a7f28f8e8dab676da4b56"
},
"Wav2Vec2ConformerForCTC": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"Wav2Vec2ConformerForCTC"
],
"sha": "a2ecb2985fcbb9f3ed000c12c1af6da36f5eaa3a"
},
"Wav2Vec2ConformerForPreTraining": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"Wav2Vec2ConformerForPreTraining"
],
"sha": "099279b69e5da19efb05589804ccee210a0e57ae"
},
"Wav2Vec2ConformerForSequenceClassification": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"Wav2Vec2ConformerForSequenceClassification"
],
"sha": "e8c1bca543c54bf15a6c026cb3761993b52cf617"
},
"Wav2Vec2ConformerForXVector": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"Wav2Vec2ConformerForXVector"
],
"sha": "ba206a55998f16e134960728bd02006eaf39114f"
},
"Wav2Vec2ConformerModel": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"Wav2Vec2ConformerModel"
],
"sha": "ef2fe3aa8c23e6f8696e6612061aaddecae49994"
},
"Wav2Vec2ForAudioFrameClassification": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"Wav2Vec2ForAudioFrameClassification"
],
"sha": "ab219f119e10f56e1059966c66d23f0df3c2c343"
},
"Wav2Vec2ForCTC": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"Wav2Vec2ForCTC"
],
"sha": "6245fbb1cb99cea5c4de1e73f81fba978fb275ac"
},
"Wav2Vec2ForMaskedLM": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"Wav2Vec2ForMaskedLM"
],
"sha": "e083cf4fefec4df3c241dbbe5e17a84a794a89bd"
},
"Wav2Vec2ForPreTraining": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"Wav2Vec2ForPreTraining"
],
"sha": "a8d71e216334260353ccbf5ce84cd6924f7457da"
},
"Wav2Vec2ForSequenceClassification": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"TFWav2Vec2ForSequenceClassification",
"Wav2Vec2ForSequenceClassification"
],
"sha": "2000b2022abcc37100241485f5872126b70164c9"
},
"Wav2Vec2ForXVector": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"Wav2Vec2ForXVector"
],
"sha": "f4c422db53aae061ea609f4407af7cd5b33c8942"
},
"Wav2Vec2Model": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"TFWav2Vec2Model",
"Wav2Vec2Model"
],
"sha": "7a998ee3ee0619a52828a79c3eed6872fd053f37"
},
"WavLMForAudioFrameClassification": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"WavLMForAudioFrameClassification"
],
"sha": "b135610f8d5de0b1a5bf5ed7212966135c63d6ec"
},
"WavLMForCTC": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"WavLMForCTC"
],
"sha": "f1139c5ddf34d2327ae1f6917edd7da180b06971"
},
"WavLMForSequenceClassification": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"WavLMForSequenceClassification"
],
"sha": "4ba5f2019b46866ce2011c993194ebda60afc028"
},
"WavLMForXVector": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"WavLMForXVector"
],
"sha": "faf9264eac56a56d5510a0984d7e1146e4c8cf62"
},
"WavLMModel": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"WavLMModel"
],
"sha": "e932275e37cb643be271f655bd1d649f4f4b4bd5"
},
"WhisperForAudioClassification": {
"tokenizer_classes": [
"WhisperTokenizer"
],
"processor_classes": [
"WhisperFeatureExtractor"
],
"model_classes": [
"WhisperForAudioClassification"
],
"sha": "d71b13674b1a67443cd19d0594a3b5b1e5968f0d"
},
"WhisperForCausalLM": {
"tokenizer_classes": [
"WhisperTokenizer"
],
"processor_classes": [
"WhisperFeatureExtractor"
],
"model_classes": [
"WhisperForCausalLM"
],
"sha": "e7febfd7f4512e029293c677e6d2633e23fc459a"
},
"WhisperForConditionalGeneration": {
"tokenizer_classes": [
"WhisperTokenizer",
"WhisperTokenizerFast"
],
"processor_classes": [
"WhisperFeatureExtractor"
],
"model_classes": [
"TFWhisperForConditionalGeneration",
"WhisperForConditionalGeneration"
],
"sha": "598101b885b24508042d9292e54aa04bff96318e"
},
"WhisperModel": {
"tokenizer_classes": [
"WhisperTokenizer",
"WhisperTokenizerFast"
],
"processor_classes": [
"WhisperFeatureExtractor"
],
"model_classes": [
"TFWhisperModel",
"WhisperModel"
],
"sha": "c04c50216bb6b0a8f4d55f2fa9f9f4cf61c8a77c"
},
"XCLIPModel": {
"tokenizer_classes": [
"CLIPTokenizer",
"CLIPTokenizerFast"
],
"processor_classes": [
"VideoMAEImageProcessor"
],
"model_classes": [
"XCLIPModel"
],
"sha": "299ffffc6b94c3558bf7dbc38e24074c99490046"
},
"XGLMForCausalLM": {
"tokenizer_classes": [
"XGLMTokenizer",
"XGLMTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"TFXGLMForCausalLM",
"XGLMForCausalLM"
],
"sha": "d5381ce297c249d559937c6bb6316cf1fdad2613"
},
"XGLMModel": {
"tokenizer_classes": [
"XGLMTokenizer",
"XGLMTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"TFXGLMModel",
"XGLMModel"
],
"sha": "2b5cef167822cfaa558d259af1722e2f785cd3d5"
},
"XLMForMultipleChoice": {
"tokenizer_classes": [
"XLMTokenizer"
],
"processor_classes": [],
"model_classes": [
"TFXLMForMultipleChoice",
"XLMForMultipleChoice"
],
"sha": "f0c8cc6462449ac9eb9b4158e433bd3c923db3af"
},
"XLMForQuestionAnsweringSimple": {
"tokenizer_classes": [
"XLMTokenizer"
],
"processor_classes": [],
"model_classes": [
"TFXLMForQuestionAnsweringSimple",
"XLMForQuestionAnsweringSimple"
],
"sha": "82e93a2653cf3646eaaf02d8cc5f8ff9a4551523"
},
"XLMForSequenceClassification": {
"tokenizer_classes": [
"XLMTokenizer"
],
"processor_classes": [],
"model_classes": [
"TFXLMForSequenceClassification",
"XLMForSequenceClassification"
],
"sha": "2d6892f5f703be9b481bca91477032bd0e36dbe5"
},
"XLMForTokenClassification": {
"tokenizer_classes": [
"XLMTokenizer"
],
"processor_classes": [],
"model_classes": [
"TFXLMForTokenClassification",
"XLMForTokenClassification"
],
"sha": "9a591395e7a0643a03f5d2debb98caa3966e021c"
},
"XLMModel": {
"tokenizer_classes": [
"XLMTokenizer"
],
"processor_classes": [],
"model_classes": [
"TFXLMModel",
"XLMModel"
],
"sha": "022b86df246414ff712475d9ca55db690ff1d3bf"
},
"XLMRobertaXLForCausalLM": {
"tokenizer_classes": [
"XLMRobertaTokenizer",
"XLMRobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"XLMRobertaXLForCausalLM"
],
"sha": "fc05408e5b33a31638476ef337719dfbb7615ef3"
},
"XLMRobertaXLForMaskedLM": {
"tokenizer_classes": [
"XLMRobertaTokenizer",
"XLMRobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"XLMRobertaXLForMaskedLM"
],
"sha": "e96f198eede757e5ae2c87632fdcfb341073ef6e"
},
"XLMRobertaXLForMultipleChoice": {
"tokenizer_classes": [
"XLMRobertaTokenizer",
"XLMRobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"XLMRobertaXLForMultipleChoice"
],
"sha": "52732625f1bfbbb7cb4ba1cf0963de596d81822d"
},
"XLMRobertaXLForQuestionAnswering": {
"tokenizer_classes": [
"XLMRobertaTokenizer",
"XLMRobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"XLMRobertaXLForQuestionAnswering"
],
"sha": "da388fdd2d28e0757eb0c2b2c612a8ff03af2223"
},
"XLMRobertaXLForSequenceClassification": {
"tokenizer_classes": [
"XLMRobertaTokenizer",
"XLMRobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"XLMRobertaXLForSequenceClassification"
],
"sha": "980721187633bcf21ac0b8edbed933527f4611df"
},
"XLMRobertaXLForTokenClassification": {
"tokenizer_classes": [
"XLMRobertaTokenizer",
"XLMRobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"XLMRobertaXLForTokenClassification"
],
"sha": "37a97280faf6fef0bd946d3934d77a1b60fbf473"
},
"XLMRobertaXLModel": {
"tokenizer_classes": [
"XLMRobertaTokenizer",
"XLMRobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"XLMRobertaXLModel"
],
"sha": "8fbeb39a984912e47f5d24a31be61639031a0fc3"
},
"XLMWithLMHeadModel": {
"tokenizer_classes": [
"XLMTokenizer"
],
"processor_classes": [],
"model_classes": [
"TFXLMWithLMHeadModel",
"XLMWithLMHeadModel"
],
"sha": "db70bdefbaf095e88b8097e4b601d9105a511afa"
},
"XLNetForMultipleChoice": {
"tokenizer_classes": [
"XLNetTokenizer",
"XLNetTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"TFXLNetForMultipleChoice",
"XLNetForMultipleChoice"
],
"sha": "8bb7e28d0cd1e93154d3232baf5e9c79acaf9f1a"
},
"XLNetForQuestionAnsweringSimple": {
"tokenizer_classes": [
"XLNetTokenizer",
"XLNetTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"TFXLNetForQuestionAnsweringSimple",
"XLNetForQuestionAnsweringSimple"
],
"sha": "fabd06a45d947f3d46f1b8dce2186cf3b27776dc"
},
"XLNetForSequenceClassification": {
"tokenizer_classes": [
"XLNetTokenizer",
"XLNetTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"TFXLNetForSequenceClassification",
"XLNetForSequenceClassification"
],
"sha": "e3c194f24537ebf2c474ade60becb9397696edec"
},
"XLNetForTokenClassification": {
"tokenizer_classes": [
"XLNetTokenizer",
"XLNetTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"TFXLNetForTokenClassification",
"XLNetForTokenClassification"
],
"sha": "16aa15029aa667046d504c4a88ceddfdd5b5fb40"
},
"XLNetLMHeadModel": {
"tokenizer_classes": [
"XLNetTokenizer",
"XLNetTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"TFXLNetLMHeadModel",
"XLNetLMHeadModel"
],
"sha": "c9a98cc982a16ca162832a8cbea25116479bb938"
},
"XLNetModel": {
"tokenizer_classes": [
"XLNetTokenizer",
"XLNetTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"TFXLNetModel",
"XLNetModel"
],
"sha": "1d6e231942135faf32b8d9a97773d8f6c85ca561"
},
"XmodForCausalLM": {
"tokenizer_classes": [
"XLMRobertaTokenizer",
"XLMRobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"XmodForCausalLM"
],
"sha": "c6b746071f2f067099a8fb4f57ce3c27a7e4b67d"
},
"XmodForMaskedLM": {
"tokenizer_classes": [
"XLMRobertaTokenizer",
"XLMRobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"XmodForMaskedLM"
],
"sha": "e1085818f4ed3c6073b2038635e5f3061208923d"
},
"XmodForMultipleChoice": {
"tokenizer_classes": [
"XLMRobertaTokenizer",
"XLMRobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"XmodForMultipleChoice"
],
"sha": "c63042cdf196be3fed846421b345d439b2483f69"
},
"XmodForQuestionAnswering": {
"tokenizer_classes": [
"XLMRobertaTokenizer",
"XLMRobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"XmodForQuestionAnswering"
],
"sha": "75acd3071fae9978c82618cd0f090c87aabc1f23"
},
"XmodForSequenceClassification": {
"tokenizer_classes": [
"XLMRobertaTokenizer",
"XLMRobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"XmodForSequenceClassification"
],
"sha": "523a16570be048618913ac17ccd00d343bcb5e99"
},
"XmodForTokenClassification": {
"tokenizer_classes": [
"XLMRobertaTokenizer",
"XLMRobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"XmodForTokenClassification"
],
"sha": "a0f0a02732b4579670dad11a69ae244ebd777b49"
},
"XmodModel": {
"tokenizer_classes": [
"XLMRobertaTokenizer",
"XLMRobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"XmodModel"
],
"sha": "bc286de0035450e7dcd6bcce78098a967b9c2b6c"
},
"YolosForObjectDetection": {
"tokenizer_classes": [],
"processor_classes": [
"YolosImageProcessor"
],
"model_classes": [
"YolosForObjectDetection"
],
"sha": "0a4aae25bfbe8b5edd4815cb00d697a6ba7d2126"
},
"YolosModel": {
"tokenizer_classes": [],
"processor_classes": [
"YolosImageProcessor"
],
"model_classes": [
"YolosModel"
],
"sha": "339bc51f1914f031a550e5f95095ed4a4c22a7de"
},
"YosoForMaskedLM": {
"tokenizer_classes": [
"AlbertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"YosoForMaskedLM"
],
"sha": "cb291bedcbec199ea195f086e3ebea6fab026bba"
},
"YosoForMultipleChoice": {
"tokenizer_classes": [
"AlbertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"YosoForMultipleChoice"
],
"sha": "cf2d3a3f0628bc9d0da68ea8de26b12016453fee"
},
"YosoForQuestionAnswering": {
"tokenizer_classes": [
"AlbertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"YosoForQuestionAnswering"
],
"sha": "e8c3091f674588adfa3371b3de0427a9b39dd03f"
},
"YosoForSequenceClassification": {
"tokenizer_classes": [
"AlbertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"YosoForSequenceClassification"
],
"sha": "88132cbaa1a9a87f65b6f9813c388011377f18cf"
},
"YosoForTokenClassification": {
"tokenizer_classes": [
"AlbertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"YosoForTokenClassification"
],
"sha": "fd2219856608d3dba70dc7b1a06af629903dec31"
},
"YosoModel": {
"tokenizer_classes": [
"AlbertTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"YosoModel"
],
"sha": "e144d9f1fe39c21eda1177702640e126892605ce"
}
} | 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/utils/test_add_new_model_like.py | # Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers.commands.add_new_model_like import (
ModelPatterns,
_re_class_func,
add_content_to_file,
add_content_to_text,
clean_frameworks_in_init,
duplicate_doc_file,
duplicate_module,
filter_framework_files,
find_base_model_checkpoint,
get_model_files,
get_module_from_file,
parse_module_content,
replace_model_patterns,
retrieve_info_for_model,
retrieve_model_classes,
simplify_replacements,
)
from transformers.testing_utils import require_flax, require_tf, require_torch
BERT_MODEL_FILES = {
"src/transformers/models/bert/__init__.py",
"src/transformers/models/bert/configuration_bert.py",
"src/transformers/models/bert/tokenization_bert.py",
"src/transformers/models/bert/tokenization_bert_fast.py",
"src/transformers/models/bert/tokenization_bert_tf.py",
"src/transformers/models/bert/modeling_bert.py",
"src/transformers/models/bert/modeling_flax_bert.py",
"src/transformers/models/bert/modeling_tf_bert.py",
"src/transformers/models/bert/convert_bert_original_tf_checkpoint_to_pytorch.py",
"src/transformers/models/bert/convert_bert_original_tf2_checkpoint_to_pytorch.py",
"src/transformers/models/bert/convert_bert_pytorch_checkpoint_to_original_tf.py",
"src/transformers/models/bert/convert_bert_token_dropping_original_tf2_checkpoint_to_pytorch.py",
}
VIT_MODEL_FILES = {
"src/transformers/models/vit/__init__.py",
"src/transformers/models/vit/configuration_vit.py",
"src/transformers/models/vit/convert_dino_to_pytorch.py",
"src/transformers/models/vit/convert_vit_timm_to_pytorch.py",
"src/transformers/models/vit/feature_extraction_vit.py",
"src/transformers/models/vit/image_processing_vit.py",
"src/transformers/models/vit/modeling_vit.py",
"src/transformers/models/vit/modeling_tf_vit.py",
"src/transformers/models/vit/modeling_flax_vit.py",
}
WAV2VEC2_MODEL_FILES = {
"src/transformers/models/wav2vec2/__init__.py",
"src/transformers/models/wav2vec2/configuration_wav2vec2.py",
"src/transformers/models/wav2vec2/convert_wav2vec2_original_pytorch_checkpoint_to_pytorch.py",
"src/transformers/models/wav2vec2/convert_wav2vec2_original_s3prl_checkpoint_to_pytorch.py",
"src/transformers/models/wav2vec2/feature_extraction_wav2vec2.py",
"src/transformers/models/wav2vec2/modeling_wav2vec2.py",
"src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py",
"src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py",
"src/transformers/models/wav2vec2/processing_wav2vec2.py",
"src/transformers/models/wav2vec2/tokenization_wav2vec2.py",
}
REPO_PATH = Path(transformers.__path__[0]).parent.parent
@require_torch
@require_tf
@require_flax
class TestAddNewModelLike(unittest.TestCase):
def init_file(self, file_name, content):
with open(file_name, "w", encoding="utf-8") as f:
f.write(content)
def check_result(self, file_name, expected_result):
with open(file_name, "r", encoding="utf-8") as f:
result = f.read()
self.assertEqual(result, expected_result)
def test_re_class_func(self):
self.assertEqual(_re_class_func.search("def my_function(x, y):").groups()[0], "my_function")
self.assertEqual(_re_class_func.search("class MyClass:").groups()[0], "MyClass")
self.assertEqual(_re_class_func.search("class MyClass(SuperClass):").groups()[0], "MyClass")
def test_model_patterns_defaults(self):
model_patterns = ModelPatterns("GPT-New new", "huggingface/gpt-new-base")
self.assertEqual(model_patterns.model_type, "gpt-new-new")
self.assertEqual(model_patterns.model_lower_cased, "gpt_new_new")
self.assertEqual(model_patterns.model_camel_cased, "GPTNewNew")
self.assertEqual(model_patterns.model_upper_cased, "GPT_NEW_NEW")
self.assertEqual(model_patterns.config_class, "GPTNewNewConfig")
self.assertIsNone(model_patterns.tokenizer_class)
self.assertIsNone(model_patterns.feature_extractor_class)
self.assertIsNone(model_patterns.processor_class)
def test_parse_module_content(self):
test_code = """SOME_CONSTANT = a constant
CONSTANT_DEFINED_ON_SEVERAL_LINES = [
first_item,
second_item
]
def function(args):
some code
# Copied from transformers.some_module
class SomeClass:
some code
"""
expected_parts = [
"SOME_CONSTANT = a constant\n",
"CONSTANT_DEFINED_ON_SEVERAL_LINES = [\n first_item,\n second_item\n]",
"",
"def function(args):\n some code\n",
"# Copied from transformers.some_module\nclass SomeClass:\n some code\n",
]
self.assertEqual(parse_module_content(test_code), expected_parts)
def test_add_content_to_text(self):
test_text = """all_configs = {
"gpt": "GPTConfig",
"bert": "BertConfig",
"t5": "T5Config",
}"""
expected = """all_configs = {
"gpt": "GPTConfig",
"gpt2": "GPT2Config",
"bert": "BertConfig",
"t5": "T5Config",
}"""
line = ' "gpt2": "GPT2Config",'
self.assertEqual(add_content_to_text(test_text, line, add_before="bert"), expected)
self.assertEqual(add_content_to_text(test_text, line, add_before="bert", exact_match=True), test_text)
self.assertEqual(
add_content_to_text(test_text, line, add_before=' "bert": "BertConfig",', exact_match=True), expected
)
self.assertEqual(add_content_to_text(test_text, line, add_before=re.compile(r'^\s*"bert":')), expected)
self.assertEqual(add_content_to_text(test_text, line, add_after="gpt"), expected)
self.assertEqual(add_content_to_text(test_text, line, add_after="gpt", exact_match=True), test_text)
self.assertEqual(
add_content_to_text(test_text, line, add_after=' "gpt": "GPTConfig",', exact_match=True), expected
)
self.assertEqual(add_content_to_text(test_text, line, add_after=re.compile(r'^\s*"gpt":')), expected)
def test_add_content_to_file(self):
test_text = """all_configs = {
"gpt": "GPTConfig",
"bert": "BertConfig",
"t5": "T5Config",
}"""
expected = """all_configs = {
"gpt": "GPTConfig",
"gpt2": "GPT2Config",
"bert": "BertConfig",
"t5": "T5Config",
}"""
line = ' "gpt2": "GPT2Config",'
with tempfile.TemporaryDirectory() as tmp_dir:
file_name = os.path.join(tmp_dir, "code.py")
self.init_file(file_name, test_text)
add_content_to_file(file_name, line, add_before="bert")
self.check_result(file_name, expected)
self.init_file(file_name, test_text)
add_content_to_file(file_name, line, add_before="bert", exact_match=True)
self.check_result(file_name, test_text)
self.init_file(file_name, test_text)
add_content_to_file(file_name, line, add_before=' "bert": "BertConfig",', exact_match=True)
self.check_result(file_name, expected)
self.init_file(file_name, test_text)
add_content_to_file(file_name, line, add_before=re.compile(r'^\s*"bert":'))
self.check_result(file_name, expected)
self.init_file(file_name, test_text)
add_content_to_file(file_name, line, add_after="gpt")
self.check_result(file_name, expected)
self.init_file(file_name, test_text)
add_content_to_file(file_name, line, add_after="gpt", exact_match=True)
self.check_result(file_name, test_text)
self.init_file(file_name, test_text)
add_content_to_file(file_name, line, add_after=' "gpt": "GPTConfig",', exact_match=True)
self.check_result(file_name, expected)
self.init_file(file_name, test_text)
add_content_to_file(file_name, line, add_after=re.compile(r'^\s*"gpt":'))
self.check_result(file_name, expected)
def test_simplify_replacements(self):
self.assertEqual(simplify_replacements([("Bert", "NewBert")]), [("Bert", "NewBert")])
self.assertEqual(
simplify_replacements([("Bert", "NewBert"), ("bert", "new-bert")]),
[("Bert", "NewBert"), ("bert", "new-bert")],
)
self.assertEqual(
simplify_replacements([("BertConfig", "NewBertConfig"), ("Bert", "NewBert"), ("bert", "new-bert")]),
[("Bert", "NewBert"), ("bert", "new-bert")],
)
def test_replace_model_patterns(self):
bert_model_patterns = ModelPatterns("Bert", "bert-base-cased")
new_bert_model_patterns = ModelPatterns("New Bert", "huggingface/bert-new-base")
bert_test = '''class TFBertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = BertConfig
load_tf_weights = load_tf_weights_in_bert
base_model_prefix = "bert"
is_parallelizable = True
supports_gradient_checkpointing = True
model_type = "bert"
BERT_CONSTANT = "value"
'''
bert_expected = '''class TFNewBertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = NewBertConfig
load_tf_weights = load_tf_weights_in_new_bert
base_model_prefix = "new_bert"
is_parallelizable = True
supports_gradient_checkpointing = True
model_type = "new-bert"
NEW_BERT_CONSTANT = "value"
'''
bert_converted, replacements = replace_model_patterns(bert_test, bert_model_patterns, new_bert_model_patterns)
self.assertEqual(bert_converted, bert_expected)
# Replacements are empty here since bert as been replaced by bert_new in some instances and bert-new
# in others.
self.assertEqual(replacements, "")
# If we remove the model type, we will get replacements
bert_test = bert_test.replace(' model_type = "bert"\n', "")
bert_expected = bert_expected.replace(' model_type = "new-bert"\n', "")
bert_converted, replacements = replace_model_patterns(bert_test, bert_model_patterns, new_bert_model_patterns)
self.assertEqual(bert_converted, bert_expected)
self.assertEqual(replacements, "BERT->NEW_BERT,Bert->NewBert,bert->new_bert")
gpt_model_patterns = ModelPatterns("GPT2", "gpt2")
new_gpt_model_patterns = ModelPatterns("GPT-New new", "huggingface/gpt-new-base")
gpt_test = '''class GPT2PreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = GPT2Config
load_tf_weights = load_tf_weights_in_gpt2
base_model_prefix = "transformer"
is_parallelizable = True
supports_gradient_checkpointing = True
GPT2_CONSTANT = "value"
'''
gpt_expected = '''class GPTNewNewPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = GPTNewNewConfig
load_tf_weights = load_tf_weights_in_gpt_new_new
base_model_prefix = "transformer"
is_parallelizable = True
supports_gradient_checkpointing = True
GPT_NEW_NEW_CONSTANT = "value"
'''
gpt_converted, replacements = replace_model_patterns(gpt_test, gpt_model_patterns, new_gpt_model_patterns)
self.assertEqual(gpt_converted, gpt_expected)
# Replacements are empty here since GPT2 as been replaced by GPTNewNew in some instances and GPT_NEW_NEW
# in others.
self.assertEqual(replacements, "")
roberta_model_patterns = ModelPatterns("RoBERTa", "roberta-base", model_camel_cased="Roberta")
new_roberta_model_patterns = ModelPatterns(
"RoBERTa-New", "huggingface/roberta-new-base", model_camel_cased="RobertaNew"
)
roberta_test = '''# Copied from transformers.models.bert.BertModel with Bert->Roberta
class RobertaModel(RobertaPreTrainedModel):
""" The base RoBERTa model. """
checkpoint = roberta-base
base_model_prefix = "roberta"
'''
roberta_expected = '''# Copied from transformers.models.bert.BertModel with Bert->RobertaNew
class RobertaNewModel(RobertaNewPreTrainedModel):
""" The base RoBERTa-New model. """
checkpoint = huggingface/roberta-new-base
base_model_prefix = "roberta_new"
'''
roberta_converted, replacements = replace_model_patterns(
roberta_test, roberta_model_patterns, new_roberta_model_patterns
)
self.assertEqual(roberta_converted, roberta_expected)
def test_get_module_from_file(self):
self.assertEqual(
get_module_from_file("/git/transformers/src/transformers/models/bert/modeling_tf_bert.py"),
"transformers.models.bert.modeling_tf_bert",
)
self.assertEqual(
get_module_from_file("/transformers/models/gpt2/modeling_gpt2.py"),
"transformers.models.gpt2.modeling_gpt2",
)
with self.assertRaises(ValueError):
get_module_from_file("/models/gpt2/modeling_gpt2.py")
def test_duplicate_module(self):
bert_model_patterns = ModelPatterns("Bert", "bert-base-cased")
new_bert_model_patterns = ModelPatterns("New Bert", "huggingface/bert-new-base")
bert_test = '''class TFBertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = BertConfig
load_tf_weights = load_tf_weights_in_bert
base_model_prefix = "bert"
is_parallelizable = True
supports_gradient_checkpointing = True
BERT_CONSTANT = "value"
'''
bert_expected = '''class TFNewBertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = NewBertConfig
load_tf_weights = load_tf_weights_in_new_bert
base_model_prefix = "new_bert"
is_parallelizable = True
supports_gradient_checkpointing = True
NEW_BERT_CONSTANT = "value"
'''
bert_expected_with_copied_from = (
"# Copied from transformers.bert_module.TFBertPreTrainedModel with Bert->NewBert,bert->new_bert\n"
+ bert_expected
)
with tempfile.TemporaryDirectory() as tmp_dir:
work_dir = os.path.join(tmp_dir, "transformers")
os.makedirs(work_dir)
file_name = os.path.join(work_dir, "bert_module.py")
dest_file_name = os.path.join(work_dir, "new_bert_module.py")
self.init_file(file_name, bert_test)
duplicate_module(file_name, bert_model_patterns, new_bert_model_patterns)
self.check_result(dest_file_name, bert_expected_with_copied_from)
self.init_file(file_name, bert_test)
duplicate_module(file_name, bert_model_patterns, new_bert_model_patterns, add_copied_from=False)
self.check_result(dest_file_name, bert_expected)
def test_duplicate_module_with_copied_from(self):
bert_model_patterns = ModelPatterns("Bert", "bert-base-cased")
new_bert_model_patterns = ModelPatterns("New Bert", "huggingface/bert-new-base")
bert_test = '''# Copied from transformers.models.xxx.XxxModel with Xxx->Bert
class TFBertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = BertConfig
load_tf_weights = load_tf_weights_in_bert
base_model_prefix = "bert"
is_parallelizable = True
supports_gradient_checkpointing = True
BERT_CONSTANT = "value"
'''
bert_expected = '''# Copied from transformers.models.xxx.XxxModel with Xxx->NewBert
class TFNewBertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = NewBertConfig
load_tf_weights = load_tf_weights_in_new_bert
base_model_prefix = "new_bert"
is_parallelizable = True
supports_gradient_checkpointing = True
NEW_BERT_CONSTANT = "value"
'''
with tempfile.TemporaryDirectory() as tmp_dir:
work_dir = os.path.join(tmp_dir, "transformers")
os.makedirs(work_dir)
file_name = os.path.join(work_dir, "bert_module.py")
dest_file_name = os.path.join(work_dir, "new_bert_module.py")
self.init_file(file_name, bert_test)
duplicate_module(file_name, bert_model_patterns, new_bert_model_patterns)
# There should not be a new Copied from statement, the old one should be adapated.
self.check_result(dest_file_name, bert_expected)
self.init_file(file_name, bert_test)
duplicate_module(file_name, bert_model_patterns, new_bert_model_patterns, add_copied_from=False)
self.check_result(dest_file_name, bert_expected)
def test_filter_framework_files(self):
files = ["modeling_bert.py", "modeling_tf_bert.py", "modeling_flax_bert.py", "configuration_bert.py"]
self.assertEqual(filter_framework_files(files), files)
self.assertEqual(set(filter_framework_files(files, ["pt", "tf", "flax"])), set(files))
self.assertEqual(set(filter_framework_files(files, ["pt"])), {"modeling_bert.py", "configuration_bert.py"})
self.assertEqual(set(filter_framework_files(files, ["tf"])), {"modeling_tf_bert.py", "configuration_bert.py"})
self.assertEqual(
set(filter_framework_files(files, ["flax"])), {"modeling_flax_bert.py", "configuration_bert.py"}
)
self.assertEqual(
set(filter_framework_files(files, ["pt", "tf"])),
{"modeling_tf_bert.py", "modeling_bert.py", "configuration_bert.py"},
)
self.assertEqual(
set(filter_framework_files(files, ["tf", "flax"])),
{"modeling_tf_bert.py", "modeling_flax_bert.py", "configuration_bert.py"},
)
self.assertEqual(
set(filter_framework_files(files, ["pt", "flax"])),
{"modeling_bert.py", "modeling_flax_bert.py", "configuration_bert.py"},
)
def test_get_model_files(self):
# BERT
bert_files = get_model_files("bert")
doc_file = str(Path(bert_files["doc_file"]).relative_to(REPO_PATH))
self.assertEqual(doc_file, "docs/source/en/model_doc/bert.md")
model_files = {str(Path(f).relative_to(REPO_PATH)) for f in bert_files["model_files"]}
self.assertEqual(model_files, BERT_MODEL_FILES)
self.assertEqual(bert_files["module_name"], "bert")
test_files = {str(Path(f).relative_to(REPO_PATH)) for f in bert_files["test_files"]}
bert_test_files = {
"tests/models/bert/test_tokenization_bert.py",
"tests/models/bert/test_modeling_bert.py",
"tests/models/bert/test_modeling_tf_bert.py",
"tests/models/bert/test_modeling_flax_bert.py",
}
self.assertEqual(test_files, bert_test_files)
# VIT
vit_files = get_model_files("vit")
doc_file = str(Path(vit_files["doc_file"]).relative_to(REPO_PATH))
self.assertEqual(doc_file, "docs/source/en/model_doc/vit.md")
model_files = {str(Path(f).relative_to(REPO_PATH)) for f in vit_files["model_files"]}
self.assertEqual(model_files, VIT_MODEL_FILES)
self.assertEqual(vit_files["module_name"], "vit")
test_files = {str(Path(f).relative_to(REPO_PATH)) for f in vit_files["test_files"]}
vit_test_files = {
"tests/models/vit/test_image_processing_vit.py",
"tests/models/vit/test_modeling_vit.py",
"tests/models/vit/test_modeling_tf_vit.py",
"tests/models/vit/test_modeling_flax_vit.py",
}
self.assertEqual(test_files, vit_test_files)
# Wav2Vec2
wav2vec2_files = get_model_files("wav2vec2")
doc_file = str(Path(wav2vec2_files["doc_file"]).relative_to(REPO_PATH))
self.assertEqual(doc_file, "docs/source/en/model_doc/wav2vec2.md")
model_files = {str(Path(f).relative_to(REPO_PATH)) for f in wav2vec2_files["model_files"]}
self.assertEqual(model_files, WAV2VEC2_MODEL_FILES)
self.assertEqual(wav2vec2_files["module_name"], "wav2vec2")
test_files = {str(Path(f).relative_to(REPO_PATH)) for f in wav2vec2_files["test_files"]}
wav2vec2_test_files = {
"tests/models/wav2vec2/test_feature_extraction_wav2vec2.py",
"tests/models/wav2vec2/test_modeling_wav2vec2.py",
"tests/models/wav2vec2/test_modeling_tf_wav2vec2.py",
"tests/models/wav2vec2/test_modeling_flax_wav2vec2.py",
"tests/models/wav2vec2/test_processor_wav2vec2.py",
"tests/models/wav2vec2/test_tokenization_wav2vec2.py",
}
self.assertEqual(test_files, wav2vec2_test_files)
def test_get_model_files_only_pt(self):
# BERT
bert_files = get_model_files("bert", frameworks=["pt"])
doc_file = str(Path(bert_files["doc_file"]).relative_to(REPO_PATH))
self.assertEqual(doc_file, "docs/source/en/model_doc/bert.md")
model_files = {str(Path(f).relative_to(REPO_PATH)) for f in bert_files["model_files"]}
bert_model_files = BERT_MODEL_FILES - {
"src/transformers/models/bert/modeling_tf_bert.py",
"src/transformers/models/bert/modeling_flax_bert.py",
}
self.assertEqual(model_files, bert_model_files)
self.assertEqual(bert_files["module_name"], "bert")
test_files = {str(Path(f).relative_to(REPO_PATH)) for f in bert_files["test_files"]}
bert_test_files = {
"tests/models/bert/test_tokenization_bert.py",
"tests/models/bert/test_modeling_bert.py",
}
self.assertEqual(test_files, bert_test_files)
# VIT
vit_files = get_model_files("vit", frameworks=["pt"])
doc_file = str(Path(vit_files["doc_file"]).relative_to(REPO_PATH))
self.assertEqual(doc_file, "docs/source/en/model_doc/vit.md")
model_files = {str(Path(f).relative_to(REPO_PATH)) for f in vit_files["model_files"]}
vit_model_files = VIT_MODEL_FILES - {
"src/transformers/models/vit/modeling_tf_vit.py",
"src/transformers/models/vit/modeling_flax_vit.py",
}
self.assertEqual(model_files, vit_model_files)
self.assertEqual(vit_files["module_name"], "vit")
test_files = {str(Path(f).relative_to(REPO_PATH)) for f in vit_files["test_files"]}
vit_test_files = {
"tests/models/vit/test_image_processing_vit.py",
"tests/models/vit/test_modeling_vit.py",
}
self.assertEqual(test_files, vit_test_files)
# Wav2Vec2
wav2vec2_files = get_model_files("wav2vec2", frameworks=["pt"])
doc_file = str(Path(wav2vec2_files["doc_file"]).relative_to(REPO_PATH))
self.assertEqual(doc_file, "docs/source/en/model_doc/wav2vec2.md")
model_files = {str(Path(f).relative_to(REPO_PATH)) for f in wav2vec2_files["model_files"]}
wav2vec2_model_files = WAV2VEC2_MODEL_FILES - {
"src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py",
"src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py",
}
self.assertEqual(model_files, wav2vec2_model_files)
self.assertEqual(wav2vec2_files["module_name"], "wav2vec2")
test_files = {str(Path(f).relative_to(REPO_PATH)) for f in wav2vec2_files["test_files"]}
wav2vec2_test_files = {
"tests/models/wav2vec2/test_feature_extraction_wav2vec2.py",
"tests/models/wav2vec2/test_modeling_wav2vec2.py",
"tests/models/wav2vec2/test_processor_wav2vec2.py",
"tests/models/wav2vec2/test_tokenization_wav2vec2.py",
}
self.assertEqual(test_files, wav2vec2_test_files)
def test_get_model_files_tf_and_flax(self):
# BERT
bert_files = get_model_files("bert", frameworks=["tf", "flax"])
doc_file = str(Path(bert_files["doc_file"]).relative_to(REPO_PATH))
self.assertEqual(doc_file, "docs/source/en/model_doc/bert.md")
model_files = {str(Path(f).relative_to(REPO_PATH)) for f in bert_files["model_files"]}
bert_model_files = BERT_MODEL_FILES - {"src/transformers/models/bert/modeling_bert.py"}
self.assertEqual(model_files, bert_model_files)
self.assertEqual(bert_files["module_name"], "bert")
test_files = {str(Path(f).relative_to(REPO_PATH)) for f in bert_files["test_files"]}
bert_test_files = {
"tests/models/bert/test_tokenization_bert.py",
"tests/models/bert/test_modeling_tf_bert.py",
"tests/models/bert/test_modeling_flax_bert.py",
}
self.assertEqual(test_files, bert_test_files)
# VIT
vit_files = get_model_files("vit", frameworks=["tf", "flax"])
doc_file = str(Path(vit_files["doc_file"]).relative_to(REPO_PATH))
self.assertEqual(doc_file, "docs/source/en/model_doc/vit.md")
model_files = {str(Path(f).relative_to(REPO_PATH)) for f in vit_files["model_files"]}
vit_model_files = VIT_MODEL_FILES - {"src/transformers/models/vit/modeling_vit.py"}
self.assertEqual(model_files, vit_model_files)
self.assertEqual(vit_files["module_name"], "vit")
test_files = {str(Path(f).relative_to(REPO_PATH)) for f in vit_files["test_files"]}
vit_test_files = {
"tests/models/vit/test_image_processing_vit.py",
"tests/models/vit/test_modeling_tf_vit.py",
"tests/models/vit/test_modeling_flax_vit.py",
}
self.assertEqual(test_files, vit_test_files)
# Wav2Vec2
wav2vec2_files = get_model_files("wav2vec2", frameworks=["tf", "flax"])
doc_file = str(Path(wav2vec2_files["doc_file"]).relative_to(REPO_PATH))
self.assertEqual(doc_file, "docs/source/en/model_doc/wav2vec2.md")
model_files = {str(Path(f).relative_to(REPO_PATH)) for f in wav2vec2_files["model_files"]}
wav2vec2_model_files = WAV2VEC2_MODEL_FILES - {"src/transformers/models/wav2vec2/modeling_wav2vec2.py"}
self.assertEqual(model_files, wav2vec2_model_files)
self.assertEqual(wav2vec2_files["module_name"], "wav2vec2")
test_files = {str(Path(f).relative_to(REPO_PATH)) for f in wav2vec2_files["test_files"]}
wav2vec2_test_files = {
"tests/models/wav2vec2/test_feature_extraction_wav2vec2.py",
"tests/models/wav2vec2/test_modeling_tf_wav2vec2.py",
"tests/models/wav2vec2/test_modeling_flax_wav2vec2.py",
"tests/models/wav2vec2/test_processor_wav2vec2.py",
"tests/models/wav2vec2/test_tokenization_wav2vec2.py",
}
self.assertEqual(test_files, wav2vec2_test_files)
def test_find_base_model_checkpoint(self):
self.assertEqual(find_base_model_checkpoint("bert"), "bert-base-uncased")
self.assertEqual(find_base_model_checkpoint("gpt2"), "gpt2")
def test_retrieve_model_classes(self):
gpt_classes = {k: set(v) for k, v in retrieve_model_classes("gpt2").items()}
expected_gpt_classes = {
"pt": {"GPT2ForTokenClassification", "GPT2Model", "GPT2LMHeadModel", "GPT2ForSequenceClassification"},
"tf": {"TFGPT2Model", "TFGPT2ForSequenceClassification", "TFGPT2LMHeadModel"},
"flax": {"FlaxGPT2Model", "FlaxGPT2LMHeadModel"},
}
self.assertEqual(gpt_classes, expected_gpt_classes)
del expected_gpt_classes["flax"]
gpt_classes = {k: set(v) for k, v in retrieve_model_classes("gpt2", frameworks=["pt", "tf"]).items()}
self.assertEqual(gpt_classes, expected_gpt_classes)
del expected_gpt_classes["pt"]
gpt_classes = {k: set(v) for k, v in retrieve_model_classes("gpt2", frameworks=["tf"]).items()}
self.assertEqual(gpt_classes, expected_gpt_classes)
def test_retrieve_info_for_model_with_bert(self):
bert_info = retrieve_info_for_model("bert")
bert_classes = [
"BertForTokenClassification",
"BertForQuestionAnswering",
"BertForNextSentencePrediction",
"BertForSequenceClassification",
"BertForMaskedLM",
"BertForMultipleChoice",
"BertModel",
"BertForPreTraining",
"BertLMHeadModel",
]
expected_model_classes = {
"pt": set(bert_classes),
"tf": {f"TF{m}" for m in bert_classes},
"flax": {f"Flax{m}" for m in bert_classes[:-1] + ["BertForCausalLM"]},
}
self.assertEqual(set(bert_info["frameworks"]), {"pt", "tf", "flax"})
model_classes = {k: set(v) for k, v in bert_info["model_classes"].items()}
self.assertEqual(model_classes, expected_model_classes)
all_bert_files = bert_info["model_files"]
model_files = {str(Path(f).relative_to(REPO_PATH)) for f in all_bert_files["model_files"]}
self.assertEqual(model_files, BERT_MODEL_FILES)
test_files = {str(Path(f).relative_to(REPO_PATH)) for f in all_bert_files["test_files"]}
bert_test_files = {
"tests/models/bert/test_tokenization_bert.py",
"tests/models/bert/test_modeling_bert.py",
"tests/models/bert/test_modeling_tf_bert.py",
"tests/models/bert/test_modeling_flax_bert.py",
}
self.assertEqual(test_files, bert_test_files)
doc_file = str(Path(all_bert_files["doc_file"]).relative_to(REPO_PATH))
self.assertEqual(doc_file, "docs/source/en/model_doc/bert.md")
self.assertEqual(all_bert_files["module_name"], "bert")
bert_model_patterns = bert_info["model_patterns"]
self.assertEqual(bert_model_patterns.model_name, "BERT")
self.assertEqual(bert_model_patterns.checkpoint, "bert-base-uncased")
self.assertEqual(bert_model_patterns.model_type, "bert")
self.assertEqual(bert_model_patterns.model_lower_cased, "bert")
self.assertEqual(bert_model_patterns.model_camel_cased, "Bert")
self.assertEqual(bert_model_patterns.model_upper_cased, "BERT")
self.assertEqual(bert_model_patterns.config_class, "BertConfig")
self.assertEqual(bert_model_patterns.tokenizer_class, "BertTokenizer")
self.assertIsNone(bert_model_patterns.feature_extractor_class)
self.assertIsNone(bert_model_patterns.processor_class)
def test_retrieve_info_for_model_pt_tf_with_bert(self):
bert_info = retrieve_info_for_model("bert", frameworks=["pt", "tf"])
bert_classes = [
"BertForTokenClassification",
"BertForQuestionAnswering",
"BertForNextSentencePrediction",
"BertForSequenceClassification",
"BertForMaskedLM",
"BertForMultipleChoice",
"BertModel",
"BertForPreTraining",
"BertLMHeadModel",
]
expected_model_classes = {"pt": set(bert_classes), "tf": {f"TF{m}" for m in bert_classes}}
self.assertEqual(set(bert_info["frameworks"]), {"pt", "tf"})
model_classes = {k: set(v) for k, v in bert_info["model_classes"].items()}
self.assertEqual(model_classes, expected_model_classes)
all_bert_files = bert_info["model_files"]
model_files = {str(Path(f).relative_to(REPO_PATH)) for f in all_bert_files["model_files"]}
bert_model_files = BERT_MODEL_FILES - {"src/transformers/models/bert/modeling_flax_bert.py"}
self.assertEqual(model_files, bert_model_files)
test_files = {str(Path(f).relative_to(REPO_PATH)) for f in all_bert_files["test_files"]}
bert_test_files = {
"tests/models/bert/test_tokenization_bert.py",
"tests/models/bert/test_modeling_bert.py",
"tests/models/bert/test_modeling_tf_bert.py",
}
self.assertEqual(test_files, bert_test_files)
doc_file = str(Path(all_bert_files["doc_file"]).relative_to(REPO_PATH))
self.assertEqual(doc_file, "docs/source/en/model_doc/bert.md")
self.assertEqual(all_bert_files["module_name"], "bert")
bert_model_patterns = bert_info["model_patterns"]
self.assertEqual(bert_model_patterns.model_name, "BERT")
self.assertEqual(bert_model_patterns.checkpoint, "bert-base-uncased")
self.assertEqual(bert_model_patterns.model_type, "bert")
self.assertEqual(bert_model_patterns.model_lower_cased, "bert")
self.assertEqual(bert_model_patterns.model_camel_cased, "Bert")
self.assertEqual(bert_model_patterns.model_upper_cased, "BERT")
self.assertEqual(bert_model_patterns.config_class, "BertConfig")
self.assertEqual(bert_model_patterns.tokenizer_class, "BertTokenizer")
self.assertIsNone(bert_model_patterns.feature_extractor_class)
self.assertIsNone(bert_model_patterns.processor_class)
def test_retrieve_info_for_model_with_vit(self):
vit_info = retrieve_info_for_model("vit")
vit_classes = ["ViTForImageClassification", "ViTModel"]
pt_only_classes = ["ViTForMaskedImageModeling"]
expected_model_classes = {
"pt": set(vit_classes + pt_only_classes),
"tf": {f"TF{m}" for m in vit_classes},
"flax": {f"Flax{m}" for m in vit_classes},
}
self.assertEqual(set(vit_info["frameworks"]), {"pt", "tf", "flax"})
model_classes = {k: set(v) for k, v in vit_info["model_classes"].items()}
self.assertEqual(model_classes, expected_model_classes)
all_vit_files = vit_info["model_files"]
model_files = {str(Path(f).relative_to(REPO_PATH)) for f in all_vit_files["model_files"]}
self.assertEqual(model_files, VIT_MODEL_FILES)
test_files = {str(Path(f).relative_to(REPO_PATH)) for f in all_vit_files["test_files"]}
vit_test_files = {
"tests/models/vit/test_image_processing_vit.py",
"tests/models/vit/test_modeling_vit.py",
"tests/models/vit/test_modeling_tf_vit.py",
"tests/models/vit/test_modeling_flax_vit.py",
}
self.assertEqual(test_files, vit_test_files)
doc_file = str(Path(all_vit_files["doc_file"]).relative_to(REPO_PATH))
self.assertEqual(doc_file, "docs/source/en/model_doc/vit.md")
self.assertEqual(all_vit_files["module_name"], "vit")
vit_model_patterns = vit_info["model_patterns"]
self.assertEqual(vit_model_patterns.model_name, "ViT")
self.assertEqual(vit_model_patterns.checkpoint, "google/vit-base-patch16-224-in21k")
self.assertEqual(vit_model_patterns.model_type, "vit")
self.assertEqual(vit_model_patterns.model_lower_cased, "vit")
self.assertEqual(vit_model_patterns.model_camel_cased, "ViT")
self.assertEqual(vit_model_patterns.model_upper_cased, "VIT")
self.assertEqual(vit_model_patterns.config_class, "ViTConfig")
self.assertEqual(vit_model_patterns.feature_extractor_class, "ViTFeatureExtractor")
self.assertEqual(vit_model_patterns.image_processor_class, "ViTImageProcessor")
self.assertIsNone(vit_model_patterns.tokenizer_class)
self.assertIsNone(vit_model_patterns.processor_class)
def test_retrieve_info_for_model_with_wav2vec2(self):
wav2vec2_info = retrieve_info_for_model("wav2vec2")
wav2vec2_classes = [
"Wav2Vec2Model",
"Wav2Vec2ForPreTraining",
"Wav2Vec2ForAudioFrameClassification",
"Wav2Vec2ForCTC",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2ForSequenceClassification",
"Wav2Vec2ForXVector",
]
expected_model_classes = {
"pt": set(wav2vec2_classes),
"tf": {f"TF{m}" for m in wav2vec2_classes[:1]},
"flax": {f"Flax{m}" for m in wav2vec2_classes[:2]},
}
self.assertEqual(set(wav2vec2_info["frameworks"]), {"pt", "tf", "flax"})
model_classes = {k: set(v) for k, v in wav2vec2_info["model_classes"].items()}
self.assertEqual(model_classes, expected_model_classes)
all_wav2vec2_files = wav2vec2_info["model_files"]
model_files = {str(Path(f).relative_to(REPO_PATH)) for f in all_wav2vec2_files["model_files"]}
self.assertEqual(model_files, WAV2VEC2_MODEL_FILES)
test_files = {str(Path(f).relative_to(REPO_PATH)) for f in all_wav2vec2_files["test_files"]}
wav2vec2_test_files = {
"tests/models/wav2vec2/test_feature_extraction_wav2vec2.py",
"tests/models/wav2vec2/test_modeling_wav2vec2.py",
"tests/models/wav2vec2/test_modeling_tf_wav2vec2.py",
"tests/models/wav2vec2/test_modeling_flax_wav2vec2.py",
"tests/models/wav2vec2/test_processor_wav2vec2.py",
"tests/models/wav2vec2/test_tokenization_wav2vec2.py",
}
self.assertEqual(test_files, wav2vec2_test_files)
doc_file = str(Path(all_wav2vec2_files["doc_file"]).relative_to(REPO_PATH))
self.assertEqual(doc_file, "docs/source/en/model_doc/wav2vec2.md")
self.assertEqual(all_wav2vec2_files["module_name"], "wav2vec2")
wav2vec2_model_patterns = wav2vec2_info["model_patterns"]
self.assertEqual(wav2vec2_model_patterns.model_name, "Wav2Vec2")
self.assertEqual(wav2vec2_model_patterns.checkpoint, "facebook/wav2vec2-base-960h")
self.assertEqual(wav2vec2_model_patterns.model_type, "wav2vec2")
self.assertEqual(wav2vec2_model_patterns.model_lower_cased, "wav2vec2")
self.assertEqual(wav2vec2_model_patterns.model_camel_cased, "Wav2Vec2")
self.assertEqual(wav2vec2_model_patterns.model_upper_cased, "WAV_2_VEC_2")
self.assertEqual(wav2vec2_model_patterns.config_class, "Wav2Vec2Config")
self.assertEqual(wav2vec2_model_patterns.feature_extractor_class, "Wav2Vec2FeatureExtractor")
self.assertEqual(wav2vec2_model_patterns.processor_class, "Wav2Vec2Processor")
self.assertEqual(wav2vec2_model_patterns.tokenizer_class, "Wav2Vec2CTCTokenizer")
def test_clean_frameworks_in_init_with_gpt(self):
test_init = """
from typing import TYPE_CHECKING
from ...utils import _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available
_import_structure = {
"configuration_gpt2": ["GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPT2Config", "GPT2OnnxConfig"],
"tokenization_gpt2": ["GPT2Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["tokenization_gpt2_fast"] = ["GPT2TokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_gpt2"] = ["GPT2Model"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_tf_gpt2"] = ["TFGPT2Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_flax_gpt2"] = ["FlaxGPT2Model"]
if TYPE_CHECKING:
from .configuration_gpt2 import GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2Config, GPT2OnnxConfig
from .tokenization_gpt2 import GPT2Tokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt2_fast import GPT2TokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt2 import GPT2Model
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_gpt2 import TFGPT2Model
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt2 import FlaxGPT2Model
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
"""
init_no_tokenizer = """
from typing import TYPE_CHECKING
from ...utils import _LazyModule, is_flax_available, is_tf_available, is_torch_available
_import_structure = {
"configuration_gpt2": ["GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPT2Config", "GPT2OnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_gpt2"] = ["GPT2Model"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_tf_gpt2"] = ["TFGPT2Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_flax_gpt2"] = ["FlaxGPT2Model"]
if TYPE_CHECKING:
from .configuration_gpt2 import GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2Config, GPT2OnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt2 import GPT2Model
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_gpt2 import TFGPT2Model
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt2 import FlaxGPT2Model
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
"""
init_pt_only = """
from typing import TYPE_CHECKING
from ...utils import _LazyModule, is_tokenizers_available, is_torch_available
_import_structure = {
"configuration_gpt2": ["GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPT2Config", "GPT2OnnxConfig"],
"tokenization_gpt2": ["GPT2Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["tokenization_gpt2_fast"] = ["GPT2TokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_gpt2"] = ["GPT2Model"]
if TYPE_CHECKING:
from .configuration_gpt2 import GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2Config, GPT2OnnxConfig
from .tokenization_gpt2 import GPT2Tokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt2_fast import GPT2TokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt2 import GPT2Model
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
"""
init_pt_only_no_tokenizer = """
from typing import TYPE_CHECKING
from ...utils import _LazyModule, is_torch_available
_import_structure = {
"configuration_gpt2": ["GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPT2Config", "GPT2OnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_gpt2"] = ["GPT2Model"]
if TYPE_CHECKING:
from .configuration_gpt2 import GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2Config, GPT2OnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt2 import GPT2Model
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
"""
with tempfile.TemporaryDirectory() as tmp_dir:
file_name = os.path.join(tmp_dir, "../__init__.py")
self.init_file(file_name, test_init)
clean_frameworks_in_init(file_name, keep_processing=False)
self.check_result(file_name, init_no_tokenizer)
self.init_file(file_name, test_init)
clean_frameworks_in_init(file_name, frameworks=["pt"])
self.check_result(file_name, init_pt_only)
self.init_file(file_name, test_init)
clean_frameworks_in_init(file_name, frameworks=["pt"], keep_processing=False)
self.check_result(file_name, init_pt_only_no_tokenizer)
def test_clean_frameworks_in_init_with_vit(self):
test_init = """
from typing import TYPE_CHECKING
from ...utils import _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available
_import_structure = {
"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["image_processing_vit"] = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_vit"] = ["ViTModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_tf_vit"] = ["TFViTModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_flax_vit"] = ["FlaxViTModel"]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import ViTModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTModel
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
"""
init_no_feature_extractor = """
from typing import TYPE_CHECKING
from ...utils import _LazyModule, is_flax_available, is_tf_available, is_torch_available
_import_structure = {
"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_vit"] = ["ViTModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_tf_vit"] = ["TFViTModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_flax_vit"] = ["FlaxViTModel"]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import ViTModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTModel
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
"""
init_pt_only = """
from typing import TYPE_CHECKING
from ...utils import _LazyModule, is_torch_available, is_vision_available
_import_structure = {
"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["image_processing_vit"] = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_vit"] = ["ViTModel"]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import ViTModel
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
"""
init_pt_only_no_feature_extractor = """
from typing import TYPE_CHECKING
from ...utils import _LazyModule, is_torch_available
_import_structure = {
"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_vit"] = ["ViTModel"]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import ViTModel
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
"""
with tempfile.TemporaryDirectory() as tmp_dir:
file_name = os.path.join(tmp_dir, "../__init__.py")
self.init_file(file_name, test_init)
clean_frameworks_in_init(file_name, keep_processing=False)
self.check_result(file_name, init_no_feature_extractor)
self.init_file(file_name, test_init)
clean_frameworks_in_init(file_name, frameworks=["pt"])
self.check_result(file_name, init_pt_only)
self.init_file(file_name, test_init)
clean_frameworks_in_init(file_name, frameworks=["pt"], keep_processing=False)
self.check_result(file_name, init_pt_only_no_feature_extractor)
def test_duplicate_doc_file(self):
test_doc = """
# GPT2
## Overview
Overview of the model.
## GPT2Config
[[autodoc]] GPT2Config
## GPT2Tokenizer
[[autodoc]] GPT2Tokenizer
- save_vocabulary
## GPT2TokenizerFast
[[autodoc]] GPT2TokenizerFast
## GPT2 specific outputs
[[autodoc]] models.gpt2.modeling_gpt2.GPT2DoubleHeadsModelOutput
[[autodoc]] models.gpt2.modeling_tf_gpt2.TFGPT2DoubleHeadsModelOutput
## GPT2Model
[[autodoc]] GPT2Model
- forward
## TFGPT2Model
[[autodoc]] TFGPT2Model
- call
## FlaxGPT2Model
[[autodoc]] FlaxGPT2Model
- __call__
"""
test_new_doc = """
# GPT-New New
## Overview
The GPT-New New model was proposed in [<INSERT PAPER NAME HERE>](<INSERT PAPER LINK HERE>) by <INSERT AUTHORS HERE>.
<INSERT SHORT SUMMARY HERE>
The abstract from the paper is the following:
*<INSERT PAPER ABSTRACT HERE>*
Tips:
<INSERT TIPS ABOUT MODEL HERE>
This model was contributed by [INSERT YOUR HF USERNAME HERE](https://huggingface.co/<INSERT YOUR HF USERNAME HERE>).
The original code can be found [here](<INSERT LINK TO GITHUB REPO HERE>).
## GPTNewNewConfig
[[autodoc]] GPTNewNewConfig
## GPTNewNewTokenizer
[[autodoc]] GPTNewNewTokenizer
- save_vocabulary
## GPTNewNewTokenizerFast
[[autodoc]] GPTNewNewTokenizerFast
## GPTNewNew specific outputs
[[autodoc]] models.gpt_new_new.modeling_gpt_new_new.GPTNewNewDoubleHeadsModelOutput
[[autodoc]] models.gpt_new_new.modeling_tf_gpt_new_new.TFGPTNewNewDoubleHeadsModelOutput
## GPTNewNewModel
[[autodoc]] GPTNewNewModel
- forward
## TFGPTNewNewModel
[[autodoc]] TFGPTNewNewModel
- call
## FlaxGPTNewNewModel
[[autodoc]] FlaxGPTNewNewModel
- __call__
"""
with tempfile.TemporaryDirectory() as tmp_dir:
doc_file = os.path.join(tmp_dir, "gpt2.md")
new_doc_file = os.path.join(tmp_dir, "gpt-new-new.md")
gpt2_model_patterns = ModelPatterns("GPT2", "gpt2", tokenizer_class="GPT2Tokenizer")
new_model_patterns = ModelPatterns(
"GPT-New New", "huggingface/gpt-new-new", tokenizer_class="GPTNewNewTokenizer"
)
self.init_file(doc_file, test_doc)
duplicate_doc_file(doc_file, gpt2_model_patterns, new_model_patterns)
self.check_result(new_doc_file, test_new_doc)
test_new_doc_pt_only = test_new_doc.replace(
"""
## TFGPTNewNewModel
[[autodoc]] TFGPTNewNewModel
- call
## FlaxGPTNewNewModel
[[autodoc]] FlaxGPTNewNewModel
- __call__
""",
"",
)
self.init_file(doc_file, test_doc)
duplicate_doc_file(doc_file, gpt2_model_patterns, new_model_patterns, frameworks=["pt"])
self.check_result(new_doc_file, test_new_doc_pt_only)
test_new_doc_no_tok = test_new_doc.replace(
"""
## GPTNewNewTokenizer
[[autodoc]] GPTNewNewTokenizer
- save_vocabulary
## GPTNewNewTokenizerFast
[[autodoc]] GPTNewNewTokenizerFast
""",
"",
)
new_model_patterns = ModelPatterns(
"GPT-New New", "huggingface/gpt-new-new", tokenizer_class="GPT2Tokenizer"
)
self.init_file(doc_file, test_doc)
duplicate_doc_file(doc_file, gpt2_model_patterns, new_model_patterns)
print(test_new_doc_no_tok)
self.check_result(new_doc_file, test_new_doc_no_tok)
test_new_doc_pt_only_no_tok = test_new_doc_no_tok.replace(
"""
## TFGPTNewNewModel
[[autodoc]] TFGPTNewNewModel
- call
## FlaxGPTNewNewModel
[[autodoc]] FlaxGPTNewNewModel
- __call__
""",
"",
)
self.init_file(doc_file, test_doc)
duplicate_doc_file(doc_file, gpt2_model_patterns, new_model_patterns, frameworks=["pt"])
self.check_result(new_doc_file, test_new_doc_pt_only_no_tok)
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/utils/test_image_processing_utils.py | # coding=utf-8
# Copyright 2022 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers.image_processing_utils import get_size_dict
class ImageProcessingUtilsTester(unittest.TestCase):
def test_get_size_dict(self):
# Test a dict with the wrong keys raises an error
inputs = {"wrong_key": 224}
with self.assertRaises(ValueError):
get_size_dict(inputs)
inputs = {"height": 224}
with self.assertRaises(ValueError):
get_size_dict(inputs)
inputs = {"width": 224, "shortest_edge": 224}
with self.assertRaises(ValueError):
get_size_dict(inputs)
# Test a dict with the correct keys is returned as is
inputs = {"height": 224, "width": 224}
outputs = get_size_dict(inputs)
self.assertEqual(outputs, inputs)
inputs = {"shortest_edge": 224}
outputs = get_size_dict(inputs)
self.assertEqual(outputs, {"shortest_edge": 224})
inputs = {"longest_edge": 224, "shortest_edge": 224}
outputs = get_size_dict(inputs)
self.assertEqual(outputs, {"longest_edge": 224, "shortest_edge": 224})
# Test a single int value which represents (size, size)
outputs = get_size_dict(224)
self.assertEqual(outputs, {"height": 224, "width": 224})
# Test a single int value which represents the shortest edge
outputs = get_size_dict(224, default_to_square=False)
self.assertEqual(outputs, {"shortest_edge": 224})
# Test a tuple of ints which represents (height, width)
outputs = get_size_dict((150, 200))
self.assertEqual(outputs, {"height": 150, "width": 200})
# Test a tuple of ints which represents (width, height)
outputs = get_size_dict((150, 200), height_width_order=False)
self.assertEqual(outputs, {"height": 200, "width": 150})
# Test an int representing the shortest edge and max_size which represents the longest edge
outputs = get_size_dict(224, max_size=256, default_to_square=False)
self.assertEqual(outputs, {"shortest_edge": 224, "longest_edge": 256})
# Test int with default_to_square=True and max_size fails
with self.assertRaises(ValueError):
get_size_dict(224, max_size=256, default_to_square=True)
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/utils/test_audio_utils.py | # coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import pytest
from transformers.audio_utils import (
amplitude_to_db,
hertz_to_mel,
mel_filter_bank,
mel_to_hertz,
power_to_db,
spectrogram,
window_function,
)
class AudioUtilsFunctionTester(unittest.TestCase):
def test_hertz_to_mel(self):
self.assertEqual(hertz_to_mel(0.0), 0.0)
self.assertAlmostEqual(hertz_to_mel(100), 150.48910241)
inputs = np.array([100, 200])
expected = np.array([150.48910241, 283.22989816])
self.assertTrue(np.allclose(hertz_to_mel(inputs), expected))
self.assertEqual(hertz_to_mel(0.0, "slaney"), 0.0)
self.assertEqual(hertz_to_mel(100, "slaney"), 1.5)
inputs = np.array([60, 100, 200, 1000, 1001, 2000])
expected = np.array([0.9, 1.5, 3.0, 15.0, 15.01453781, 25.08188016])
self.assertTrue(np.allclose(hertz_to_mel(inputs, "slaney"), expected))
inputs = np.array([60, 100, 200, 1000, 1001, 2000])
expected = np.array([92.6824, 150.4899, 283.2313, 999.9907, 1000.6534, 1521.3674])
self.assertTrue(np.allclose(hertz_to_mel(inputs, "kaldi"), expected))
with pytest.raises(ValueError):
hertz_to_mel(100, mel_scale=None)
def test_mel_to_hertz(self):
self.assertEqual(mel_to_hertz(0.0), 0.0)
self.assertAlmostEqual(mel_to_hertz(150.48910241), 100)
inputs = np.array([150.48910241, 283.22989816])
expected = np.array([100, 200])
self.assertTrue(np.allclose(mel_to_hertz(inputs), expected))
self.assertEqual(mel_to_hertz(0.0, "slaney"), 0.0)
self.assertEqual(mel_to_hertz(1.5, "slaney"), 100)
inputs = np.array([0.9, 1.5, 3.0, 15.0, 15.01453781, 25.08188016])
expected = np.array([60, 100, 200, 1000, 1001, 2000])
self.assertTrue(np.allclose(mel_to_hertz(inputs, "slaney"), expected))
inputs = np.array([92.6824, 150.4899, 283.2313, 999.9907, 1000.6534, 1521.3674])
expected = np.array([60, 100, 200, 1000, 1001, 2000])
self.assertTrue(np.allclose(mel_to_hertz(inputs, "kaldi"), expected))
with pytest.raises(ValueError):
mel_to_hertz(100, mel_scale=None)
def test_mel_filter_bank_shape(self):
mel_filters = mel_filter_bank(
num_frequency_bins=513,
num_mel_filters=13,
min_frequency=100,
max_frequency=4000,
sampling_rate=16000,
norm=None,
mel_scale="htk",
)
self.assertEqual(mel_filters.shape, (513, 13))
mel_filters = mel_filter_bank(
num_frequency_bins=513,
num_mel_filters=13,
min_frequency=100,
max_frequency=4000,
sampling_rate=16000,
norm="slaney",
mel_scale="slaney",
)
self.assertEqual(mel_filters.shape, (513, 13))
mel_filters = mel_filter_bank(
num_frequency_bins=513,
num_mel_filters=13,
min_frequency=100,
max_frequency=4000,
sampling_rate=16000,
norm="slaney",
mel_scale="slaney",
triangularize_in_mel_space=True,
)
self.assertEqual(mel_filters.shape, (513, 13))
def test_mel_filter_bank_htk(self):
mel_filters = mel_filter_bank(
num_frequency_bins=16,
num_mel_filters=4,
min_frequency=0,
max_frequency=2000,
sampling_rate=4000,
norm=None,
mel_scale="htk",
)
# fmt: off
expected = np.array([
[0.0 , 0.0 , 0.0 , 0.0 ],
[0.61454786, 0.0 , 0.0 , 0.0 ],
[0.82511046, 0.17488954, 0.0 , 0.0 ],
[0.35597035, 0.64402965, 0.0 , 0.0 ],
[0.0 , 0.91360726, 0.08639274, 0.0 ],
[0.0 , 0.55547007, 0.44452993, 0.0 ],
[0.0 , 0.19733289, 0.80266711, 0.0 ],
[0.0 , 0.0 , 0.87724349, 0.12275651],
[0.0 , 0.0 , 0.6038449 , 0.3961551 ],
[0.0 , 0.0 , 0.33044631, 0.66955369],
[0.0 , 0.0 , 0.05704771, 0.94295229],
[0.0 , 0.0 , 0.0 , 0.83483975],
[0.0 , 0.0 , 0.0 , 0.62612982],
[0.0 , 0.0 , 0.0 , 0.41741988],
[0.0 , 0.0 , 0.0 , 0.20870994],
[0.0 , 0.0 , 0.0 , 0.0 ]
])
# fmt: on
self.assertTrue(np.allclose(mel_filters, expected))
def test_mel_filter_bank_slaney(self):
mel_filters = mel_filter_bank(
num_frequency_bins=16,
num_mel_filters=4,
min_frequency=0,
max_frequency=2000,
sampling_rate=4000,
norm=None,
mel_scale="slaney",
)
# fmt: off
expected = np.array([
[0.0 , 0.0 , 0.0 , 0.0 ],
[0.39869419, 0.0 , 0.0 , 0.0 ],
[0.79738839, 0.0 , 0.0 , 0.0 ],
[0.80391742, 0.19608258, 0.0 , 0.0 ],
[0.40522322, 0.59477678, 0.0 , 0.0 ],
[0.00652903, 0.99347097, 0.0 , 0.0 ],
[0.0 , 0.60796161, 0.39203839, 0.0 ],
[0.0 , 0.20939631, 0.79060369, 0.0 ],
[0.0 , 0.0 , 0.84685344, 0.15314656],
[0.0 , 0.0 , 0.52418477, 0.47581523],
[0.0 , 0.0 , 0.2015161 , 0.7984839 ],
[0.0 , 0.0 , 0.0 , 0.9141874 ],
[0.0 , 0.0 , 0.0 , 0.68564055],
[0.0 , 0.0 , 0.0 , 0.4570937 ],
[0.0 , 0.0 , 0.0 , 0.22854685],
[0.0 , 0.0 , 0.0 , 0.0 ]
])
# fmt: on
self.assertTrue(np.allclose(mel_filters, expected))
def test_mel_filter_bank_kaldi(self):
mel_filters = mel_filter_bank(
num_frequency_bins=16,
num_mel_filters=4,
min_frequency=0,
max_frequency=2000,
sampling_rate=4000,
norm=None,
mel_scale="kaldi",
triangularize_in_mel_space=True,
)
# fmt: off
expected = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000],
[0.6086, 0.0000, 0.0000, 0.0000],
[0.8689, 0.1311, 0.0000, 0.0000],
[0.4110, 0.5890, 0.0000, 0.0000],
[0.0036, 0.9964, 0.0000, 0.0000],
[0.0000, 0.6366, 0.3634, 0.0000],
[0.0000, 0.3027, 0.6973, 0.0000],
[0.0000, 0.0000, 0.9964, 0.0036],
[0.0000, 0.0000, 0.7135, 0.2865],
[0.0000, 0.0000, 0.4507, 0.5493],
[0.0000, 0.0000, 0.2053, 0.7947],
[0.0000, 0.0000, 0.0000, 0.9752],
[0.0000, 0.0000, 0.0000, 0.7585],
[0.0000, 0.0000, 0.0000, 0.5539],
[0.0000, 0.0000, 0.0000, 0.3599],
[0.0000, 0.0000, 0.0000, 0.1756]]
)
# fmt: on
self.assertTrue(np.allclose(mel_filters, expected, atol=5e-5))
def test_mel_filter_bank_slaney_norm(self):
mel_filters = mel_filter_bank(
num_frequency_bins=16,
num_mel_filters=4,
min_frequency=0,
max_frequency=2000,
sampling_rate=4000,
norm="slaney",
mel_scale="slaney",
)
# fmt: off
expected = np.array([
[0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
[1.19217795e-03, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
[2.38435591e-03, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
[2.40387905e-03, 5.86232616e-04, 0.00000000e+00, 0.00000000e+00],
[1.21170110e-03, 1.77821783e-03, 0.00000000e+00, 0.00000000e+00],
[1.95231437e-05, 2.97020305e-03, 0.00000000e+00, 0.00000000e+00],
[0.00000000e+00, 1.81763684e-03, 1.04857612e-03, 0.00000000e+00],
[0.00000000e+00, 6.26036972e-04, 2.11460963e-03, 0.00000000e+00],
[0.00000000e+00, 0.00000000e+00, 2.26505954e-03, 3.07332945e-04],
[0.00000000e+00, 0.00000000e+00, 1.40202503e-03, 9.54861093e-04],
[0.00000000e+00, 0.00000000e+00, 5.38990521e-04, 1.60238924e-03],
[0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.83458185e-03],
[0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.37593638e-03],
[0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 9.17290923e-04],
[0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 4.58645462e-04],
[0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00]
])
# fmt: on
self.assertTrue(np.allclose(mel_filters, expected))
def test_window_function(self):
window = window_function(16, "hann")
self.assertEqual(len(window), 16)
# fmt: off
expected = np.array([
0.0, 0.03806023, 0.14644661, 0.30865828, 0.5, 0.69134172, 0.85355339, 0.96193977,
1.0, 0.96193977, 0.85355339, 0.69134172, 0.5, 0.30865828, 0.14644661, 0.03806023,
])
# fmt: on
self.assertTrue(np.allclose(window, expected))
def _load_datasamples(self, num_samples):
from datasets import load_dataset
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def test_spectrogram_impulse(self):
waveform = np.zeros(40)
waveform[9] = 1.0 # impulse shifted in time
spec = spectrogram(
waveform,
window_function(12, "hann", frame_length=16),
frame_length=16,
hop_length=4,
power=1.0,
center=True,
pad_mode="reflect",
onesided=True,
)
self.assertEqual(spec.shape, (9, 11))
expected = np.array([[0.0, 0.0669873, 0.9330127, 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])
self.assertTrue(np.allclose(spec, expected))
def test_spectrogram_integration_test(self):
waveform = self._load_datasamples(1)[0]
spec = spectrogram(
waveform,
window_function(400, "hann", frame_length=512),
frame_length=512,
hop_length=128,
power=1.0,
center=True,
pad_mode="reflect",
onesided=True,
)
self.assertEqual(spec.shape, (257, 732))
# fmt: off
expected = np.array([
0.02464888, 0.04648664, 0.05872392, 0.02311783, 0.0327175 ,
0.02433643, 0.01198814, 0.02055709, 0.01559287, 0.01394357,
0.01299037, 0.01728045, 0.0254554 , 0.02486533, 0.02011792,
0.01755333, 0.02100457, 0.02337024, 0.01436963, 0.01464558,
0.0211017 , 0.0193489 , 0.01272165, 0.01858462, 0.03722598,
0.0456542 , 0.03281558, 0.00620586, 0.02226466, 0.03618042,
0.03508182, 0.02271432, 0.01051649, 0.01225771, 0.02315293,
0.02331886, 0.01417785, 0.0106844 , 0.01791214, 0.017177 ,
0.02125114, 0.05028201, 0.06830665, 0.05216664, 0.01963666,
0.06941418, 0.11513043, 0.12257859, 0.10948435, 0.08568069,
0.05509328, 0.05047818, 0.047112 , 0.05060737, 0.02982424,
0.02803827, 0.02933729, 0.01760491, 0.00587815, 0.02117637,
0.0293578 , 0.03452379, 0.02194803, 0.01676056,
])
# fmt: on
self.assertTrue(np.allclose(spec[:64, 400], expected))
spec = spectrogram(
waveform,
window_function(400, "hann"),
frame_length=400,
hop_length=128,
fft_length=512,
power=1.0,
center=True,
pad_mode="reflect",
onesided=True,
)
self.assertEqual(spec.shape, (257, 732))
self.assertTrue(np.allclose(spec[:64, 400], expected))
mel_filters = mel_filter_bank(
num_frequency_bins=256,
num_mel_filters=400,
min_frequency=20,
max_frequency=8000,
sampling_rate=16000,
norm=None,
mel_scale="kaldi",
triangularize_in_mel_space=True,
)
mel_filters = np.pad(mel_filters, ((0, 1), (0, 0)))
spec = spectrogram(
waveform,
window_function(400, "povey", periodic=False),
frame_length=400,
hop_length=160,
fft_length=512,
power=2.0,
center=False,
pad_mode="reflect",
onesided=True,
preemphasis=0.97,
mel_filters=mel_filters,
log_mel="log",
mel_floor=1.1920928955078125e-07,
remove_dc_offset=True,
)
self.assertEqual(spec.shape, (400, 584))
# fmt: off
expected = np.array([-15.94238515, -8.20712299, -8.22704352, -15.94238515,
-15.94238515, -15.94238515, -15.94238515, -15.94238515,
-6.52463769, -7.73677889, -15.94238515, -15.94238515,
-15.94238515, -15.94238515, -4.18650018, -3.37195286,
-15.94238515, -15.94238515, -15.94238515, -15.94238515,
-4.70190154, -2.4217066 , -15.94238515, -15.94238515,
-15.94238515, -15.94238515, -5.62755239, -3.53385194,
-15.94238515, -15.94238515, -15.94238515, -15.94238515,
-9.43303023, -8.77480925, -15.94238515, -15.94238515,
-15.94238515, -15.94238515, -4.2951092 , -5.51585994,
-15.94238515, -15.94238515, -15.94238515, -4.40151721,
-3.95228878, -15.94238515, -15.94238515, -15.94238515,
-6.10365415, -4.59494697, -15.94238515, -15.94238515,
-15.94238515, -8.10727767, -6.2585298 , -15.94238515,
-15.94238515, -15.94238515, -5.60161702, -4.47217004,
-15.94238515, -15.94238515, -15.94238515, -5.91641988]
)
# fmt: on
self.assertTrue(np.allclose(spec[:64, 400], expected, atol=1e-5))
def test_spectrogram_center_padding(self):
waveform = self._load_datasamples(1)[0]
spec = spectrogram(
waveform,
window_function(512, "hann"),
frame_length=512,
hop_length=128,
center=True,
pad_mode="reflect",
)
self.assertEqual(spec.shape, (257, 732))
# fmt: off
expected = np.array([
0.1287945 , 0.12792738, 0.08311573, 0.03155122, 0.02470202,
0.00727857, 0.00910694, 0.00686163, 0.01238981, 0.01473668,
0.00336144, 0.00370314, 0.00600871, 0.01120164, 0.01942998,
0.03132008, 0.0232842 , 0.01124642, 0.02754783, 0.02423725,
0.00147893, 0.00038027, 0.00112299, 0.00596233, 0.00571529,
0.02084235, 0.0231855 , 0.00810006, 0.01837943, 0.00651339,
0.00093931, 0.00067426, 0.01058399, 0.01270507, 0.00151734,
0.00331913, 0.00302416, 0.01081792, 0.00754549, 0.00148963,
0.00111943, 0.00152573, 0.00608017, 0.01749986, 0.01205949,
0.0143082 , 0.01910573, 0.00413786, 0.03916619, 0.09873404,
0.08302026, 0.02673891, 0.00401255, 0.01397392, 0.00751862,
0.01024884, 0.01544606, 0.00638907, 0.00623633, 0.0085103 ,
0.00217659, 0.00276204, 0.00260835, 0.00299299,
])
# fmt: on
self.assertTrue(np.allclose(spec[:64, 0], expected))
spec = spectrogram(
waveform,
window_function(512, "hann"),
frame_length=512,
hop_length=128,
center=True,
pad_mode="constant",
)
self.assertEqual(spec.shape, (257, 732))
# fmt: off
expected = np.array([
0.06558744, 0.06889656, 0.06263352, 0.04264418, 0.03404115,
0.03244197, 0.02279134, 0.01646339, 0.01452216, 0.00826055,
0.00062093, 0.0031821 , 0.00419456, 0.00689327, 0.01106367,
0.01712119, 0.01721762, 0.00977533, 0.01606626, 0.02275621,
0.01727687, 0.00992739, 0.01217688, 0.01049927, 0.01022947,
0.01302475, 0.01166873, 0.01081812, 0.01057327, 0.00767912,
0.00429567, 0.00089625, 0.00654583, 0.00912084, 0.00700984,
0.00225026, 0.00290545, 0.00667712, 0.00730663, 0.00410813,
0.00073102, 0.00219296, 0.00527618, 0.00996585, 0.01123781,
0.00872816, 0.01165121, 0.02047945, 0.03681747, 0.0514379 ,
0.05137928, 0.03960042, 0.02821562, 0.01813349, 0.01201322,
0.01260964, 0.00900654, 0.00207905, 0.00456714, 0.00850599,
0.00788239, 0.00664407, 0.00824227, 0.00628301,
])
# fmt: on
self.assertTrue(np.allclose(spec[:64, 0], expected))
spec = spectrogram(
waveform,
window_function(512, "hann"),
frame_length=512,
hop_length=128,
center=False,
)
self.assertEqual(spec.shape, (257, 728))
# fmt: off
expected = np.array([
0.00250445, 0.02161521, 0.06232229, 0.04339567, 0.00937727,
0.01080616, 0.00248685, 0.0095264 , 0.00727476, 0.0079152 ,
0.00839946, 0.00254932, 0.00716622, 0.005559 , 0.00272623,
0.00581774, 0.01896395, 0.01829788, 0.01020514, 0.01632692,
0.00870888, 0.02065827, 0.0136022 , 0.0132382 , 0.011827 ,
0.00194505, 0.0189979 , 0.026874 , 0.02194014, 0.01923883,
0.01621437, 0.00661967, 0.00289517, 0.00470257, 0.00957801,
0.00191455, 0.00431664, 0.00544359, 0.01126213, 0.00785778,
0.00423469, 0.01322504, 0.02226548, 0.02318576, 0.03428908,
0.03648811, 0.0202938 , 0.011902 , 0.03226198, 0.06347476,
0.01306318, 0.05308729, 0.05474771, 0.03127991, 0.00998512,
0.01449977, 0.01272741, 0.00868176, 0.00850386, 0.00313876,
0.00811857, 0.00538216, 0.00685749, 0.00535275,
])
# fmt: on
self.assertTrue(np.allclose(spec[:64, 0], expected))
def test_spectrogram_shapes(self):
waveform = self._load_datasamples(1)[0]
spec = spectrogram(
waveform,
window_function(400, "hann"),
frame_length=400,
hop_length=128,
power=1.0,
center=True,
pad_mode="reflect",
onesided=True,
)
self.assertEqual(spec.shape, (201, 732))
spec = spectrogram(
waveform,
window_function(400, "hann"),
frame_length=400,
hop_length=128,
power=1.0,
center=False,
pad_mode="reflect",
onesided=True,
)
self.assertEqual(spec.shape, (201, 729))
spec = spectrogram(
waveform,
window_function(400, "hann"),
frame_length=400,
hop_length=128,
fft_length=512,
power=1.0,
center=True,
pad_mode="reflect",
onesided=True,
)
self.assertEqual(spec.shape, (257, 732))
spec = spectrogram(
waveform,
window_function(400, "hann", frame_length=512),
frame_length=512,
hop_length=64,
power=1.0,
center=True,
pad_mode="reflect",
onesided=False,
)
self.assertEqual(spec.shape, (512, 1464))
spec = spectrogram(
waveform,
window_function(512, "hann"),
frame_length=512,
hop_length=64,
power=1.0,
center=True,
pad_mode="reflect",
onesided=False,
)
self.assertEqual(spec.shape, (512, 1464))
spec = spectrogram(
waveform,
window_function(512, "hann"),
frame_length=512,
hop_length=512,
power=1.0,
center=True,
pad_mode="reflect",
onesided=False,
)
self.assertEqual(spec.shape, (512, 183))
def test_mel_spectrogram(self):
waveform = self._load_datasamples(1)[0]
mel_filters = mel_filter_bank(
num_frequency_bins=513,
num_mel_filters=13,
min_frequency=100,
max_frequency=4000,
sampling_rate=16000,
norm=None,
mel_scale="htk",
)
self.assertEqual(mel_filters.shape, (513, 13))
spec = spectrogram(
waveform,
window_function(800, "hann", frame_length=1024),
frame_length=1024,
hop_length=128,
power=2.0,
)
self.assertEqual(spec.shape, (513, 732))
spec = spectrogram(
waveform,
window_function(800, "hann", frame_length=1024),
frame_length=1024,
hop_length=128,
power=2.0,
mel_filters=mel_filters,
)
self.assertEqual(spec.shape, (13, 732))
# fmt: off
expected = np.array([
1.08027889e+02, 1.48080673e+01, 7.70758213e+00, 9.57676639e-01,
8.81639061e-02, 5.26073833e-02, 1.52736155e-02, 9.95350117e-03,
7.95364356e-03, 1.01148004e-02, 4.29241020e-03, 9.90708797e-03,
9.44153646e-04
])
# fmt: on
self.assertTrue(np.allclose(spec[:, 300], expected))
def test_spectrogram_power(self):
waveform = self._load_datasamples(1)[0]
spec = spectrogram(
waveform,
window_function(400, "hann", frame_length=512),
frame_length=512,
hop_length=128,
power=None,
)
self.assertEqual(spec.shape, (257, 732))
self.assertEqual(spec.dtype, np.complex64)
# fmt: off
expected = np.array([
0.01452305+0.01820039j, -0.01737362-0.01641946j,
0.0121028 +0.01565081j, -0.02794554-0.03021514j,
0.04719803+0.04086519j, -0.04391563-0.02779365j,
0.05682834+0.01571325j, -0.08604821-0.02023657j,
0.07497991+0.0186641j , -0.06366091-0.00922475j,
0.11003416+0.0114788j , -0.13677941-0.01523552j,
0.10934535-0.00117226j, -0.11635598+0.02551187j,
0.14708674-0.03469823j, -0.1328196 +0.06034218j,
0.12667368-0.13973421j, -0.14764774+0.18912019j,
0.10235471-0.12181523j, -0.00773012+0.04730498j,
-0.01487191-0.07312611j, -0.02739162+0.09619419j,
0.02895459-0.05398273j, 0.01198589+0.05276592j,
-0.02117299-0.10123465j, 0.00666388+0.09526499j,
-0.01672773-0.05649684j, 0.02723125+0.05939891j,
-0.01879361-0.062954j , 0.03686557+0.04568823j,
-0.07394181-0.07949649j, 0.06238583+0.13905765j,
])
# fmt: on
self.assertTrue(np.allclose(spec[64:96, 321], expected))
spec = spectrogram(
waveform,
window_function(400, "hann", frame_length=512),
frame_length=512,
hop_length=128,
power=1.0,
)
self.assertEqual(spec.shape, (257, 732))
self.assertEqual(spec.dtype, np.float64)
# fmt: off
expected = np.array([
0.02328461, 0.02390484, 0.01978448, 0.04115711, 0.0624309 ,
0.05197181, 0.05896072, 0.08839577, 0.07726794, 0.06432579,
0.11063128, 0.13762532, 0.10935163, 0.11911998, 0.15112405,
0.14588428, 0.18860507, 0.23992978, 0.15910825, 0.04793241,
0.07462307, 0.10001811, 0.06125769, 0.05411011, 0.10342509,
0.09549777, 0.05892122, 0.06534349, 0.06569936, 0.05870678,
0.10856833, 0.1524107 , 0.11463385, 0.05766969, 0.12385171,
0.14472842, 0.11978184, 0.10353675, 0.07244056, 0.03461861,
0.02624896, 0.02227475, 0.01238363, 0.00885281, 0.0110049 ,
0.00807005, 0.01033663, 0.01703181, 0.01445856, 0.00585615,
0.0132431 , 0.02754132, 0.01524478, 0.0204908 , 0.07453328,
0.10716327, 0.07195779, 0.08816078, 0.18340898, 0.16449876,
0.12322842, 0.1621659 , 0.12334293, 0.06033659,
])
# fmt: on
self.assertTrue(np.allclose(spec[64:128, 321], expected))
spec = spectrogram(
waveform,
window_function(400, "hann", frame_length=512),
frame_length=512,
hop_length=128,
power=2.0,
)
self.assertEqual(spec.shape, (257, 732))
self.assertEqual(spec.dtype, np.float64)
# fmt: off
expected = np.array([
5.42173162e-04, 5.71441371e-04, 3.91425507e-04, 1.69390778e-03,
3.89761780e-03, 2.70106923e-03, 3.47636663e-03, 7.81381316e-03,
5.97033510e-03, 4.13780799e-03, 1.22392802e-02, 1.89407300e-02,
1.19577805e-02, 1.41895693e-02, 2.28384770e-02, 2.12822221e-02,
3.55718732e-02, 5.75663000e-02, 2.53154356e-02, 2.29751552e-03,
5.56860259e-03, 1.00036217e-02, 3.75250424e-03, 2.92790355e-03,
1.06967501e-02, 9.11982451e-03, 3.47171025e-03, 4.26977174e-03,
4.31640586e-03, 3.44648538e-03, 1.17870830e-02, 2.32290216e-02,
1.31409196e-02, 3.32579296e-03, 1.53392460e-02, 2.09463164e-02,
1.43476883e-02, 1.07198600e-02, 5.24763530e-03, 1.19844836e-03,
6.89007982e-04, 4.96164430e-04, 1.53354369e-04, 7.83722571e-05,
1.21107812e-04, 6.51257360e-05, 1.06845939e-04, 2.90082477e-04,
2.09049831e-04, 3.42945241e-05, 1.75379610e-04, 7.58524227e-04,
2.32403356e-04, 4.19872697e-04, 5.55520924e-03, 1.14839673e-02,
5.17792348e-03, 7.77232368e-03, 3.36388536e-02, 2.70598419e-02,
1.51852425e-02, 2.62977779e-02, 1.52134784e-02, 3.64050455e-03,
])
# fmt: on
self.assertTrue(np.allclose(spec[64:128, 321], expected))
def test_power_to_db(self):
spectrogram = np.zeros((2, 3))
spectrogram[0, 0] = 2.0
spectrogram[0, 1] = 0.5
spectrogram[0, 2] = 0.707
spectrogram[1, 1] = 1.0
output = power_to_db(spectrogram, reference=1.0)
expected = np.array([[3.01029996, -3.01029996, -1.50580586], [-100.0, 0.0, -100.0]])
self.assertTrue(np.allclose(output, expected))
output = power_to_db(spectrogram, reference=2.0)
expected = np.array([[0.0, -6.02059991, -4.51610582], [-103.01029996, -3.01029996, -103.01029996]])
self.assertTrue(np.allclose(output, expected))
output = power_to_db(spectrogram, min_value=1e-6)
expected = np.array([[3.01029996, -3.01029996, -1.50580586], [-60.0, 0.0, -60.0]])
self.assertTrue(np.allclose(output, expected))
output = power_to_db(spectrogram, db_range=80)
expected = np.array([[3.01029996, -3.01029996, -1.50580586], [-76.98970004, 0.0, -76.98970004]])
self.assertTrue(np.allclose(output, expected))
output = power_to_db(spectrogram, reference=2.0, db_range=80)
expected = np.array([[0.0, -6.02059991, -4.51610582], [-80.0, -3.01029996, -80.0]])
self.assertTrue(np.allclose(output, expected))
output = power_to_db(spectrogram, reference=2.0, min_value=1e-6, db_range=80)
expected = np.array([[0.0, -6.02059991, -4.51610582], [-63.01029996, -3.01029996, -63.01029996]])
self.assertTrue(np.allclose(output, expected))
with pytest.raises(ValueError):
power_to_db(spectrogram, reference=0.0)
with pytest.raises(ValueError):
power_to_db(spectrogram, min_value=0.0)
with pytest.raises(ValueError):
power_to_db(spectrogram, db_range=-80)
def test_amplitude_to_db(self):
spectrogram = np.zeros((2, 3))
spectrogram[0, 0] = 2.0
spectrogram[0, 1] = 0.5
spectrogram[0, 2] = 0.707
spectrogram[1, 1] = 1.0
output = amplitude_to_db(spectrogram, reference=1.0)
expected = np.array([[6.02059991, -6.02059991, -3.01161172], [-100.0, 0.0, -100.0]])
self.assertTrue(np.allclose(output, expected))
output = amplitude_to_db(spectrogram, reference=2.0)
expected = np.array([[0.0, -12.04119983, -9.03221164], [-106.02059991, -6.02059991, -106.02059991]])
self.assertTrue(np.allclose(output, expected))
output = amplitude_to_db(spectrogram, min_value=1e-3)
expected = np.array([[6.02059991, -6.02059991, -3.01161172], [-60.0, 0.0, -60.0]])
self.assertTrue(np.allclose(output, expected))
output = amplitude_to_db(spectrogram, db_range=80)
expected = np.array([[6.02059991, -6.02059991, -3.01161172], [-73.97940009, 0.0, -73.97940009]])
self.assertTrue(np.allclose(output, expected))
output = amplitude_to_db(spectrogram, reference=2.0, db_range=80)
expected = np.array([[0.0, -12.04119983, -9.03221164], [-80.0, -6.02059991, -80.0]])
self.assertTrue(np.allclose(output, expected))
output = amplitude_to_db(spectrogram, reference=2.0, min_value=1e-3, db_range=80)
expected = np.array([[0.0, -12.04119983, -9.03221164], [-66.02059991, -6.02059991, -66.02059991]])
self.assertTrue(np.allclose(output, expected))
with pytest.raises(ValueError):
amplitude_to_db(spectrogram, reference=0.0)
with pytest.raises(ValueError):
amplitude_to_db(spectrogram, min_value=0.0)
with pytest.raises(ValueError):
amplitude_to_db(spectrogram, db_range=-80)
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/utils/test_skip_decorators.py | # coding=utf-8
# Copyright 2019-present, the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
#
# this test validates that we can stack skip decorators in groups and whether
# they work correctly with other decorators
#
# since the decorators have already built their decision params (like checking
# env[], we can't mock the env and test each of the combinations), so ideally
# the following 4 should be run. But since we have different CI jobs running
# different configs, all combinations should get covered
#
# RUN_SLOW=1 pytest -rA tests/test_skip_decorators.py
# RUN_SLOW=1 CUDA_VISIBLE_DEVICES="" pytest -rA tests/test_skip_decorators.py
# RUN_SLOW=0 pytest -rA tests/test_skip_decorators.py
# RUN_SLOW=0 CUDA_VISIBLE_DEVICES="" pytest -rA tests/test_skip_decorators.py
import os
import unittest
import pytest
from parameterized import parameterized
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
# skipping in unittest tests
params = [(1,)]
# test that we can stack our skip decorators with 3rd party decorators
def check_slow():
run_slow = bool(os.getenv("RUN_SLOW", 0))
if run_slow:
assert True
else:
assert False, "should have been skipped"
# test that we can stack our skip decorators
def check_slow_torch_cuda():
run_slow = bool(os.getenv("RUN_SLOW", 0))
if run_slow and torch_device == "cuda":
assert True
else:
assert False, "should have been skipped"
@require_torch
class SkipTester(unittest.TestCase):
@slow
@require_torch_gpu
def test_2_skips_slow_first(self):
check_slow_torch_cuda()
@require_torch_gpu
@slow
def test_2_skips_slow_last(self):
check_slow_torch_cuda()
# The combination of any skip decorator, followed by parameterized fails to skip the tests
# 1. @slow manages to correctly skip `test_param_slow_first`
# 2. but then `parameterized` creates new tests, with a unique name for each parameter groups.
# It has no idea that they are to be skipped and so they all run, ignoring @slow
# Therefore skip decorators must come after `parameterized`
#
# @slow
# @parameterized.expand(params)
# def test_param_slow_first(self, param=None):
# check_slow()
# This works as expected:
# 1. `parameterized` creates new tests with unique names
# 2. each of them gets an opportunity to be skipped
@parameterized.expand(params)
@slow
def test_param_slow_last(self, param=None):
check_slow()
# skipping in non-unittest tests
# no problem at all here
@slow
@require_torch_gpu
def test_pytest_2_skips_slow_first():
check_slow_torch_cuda()
@require_torch_gpu
@slow
def test_pytest_2_skips_slow_last():
check_slow_torch_cuda()
@slow
@pytest.mark.parametrize("param", [1])
def test_pytest_param_slow_first(param):
check_slow()
@pytest.mark.parametrize("param", [1])
@slow
def test_pytest_param_slow_last(param):
check_slow()
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/utils/test_cli.py | # coding=utf-8
# Copyright 2019-present, the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import unittest
from unittest.mock import patch
from transformers.testing_utils import CaptureStd, is_pt_tf_cross_test, require_torch
class CLITest(unittest.TestCase):
@patch("sys.argv", ["fakeprogrampath", "env"])
def test_cli_env(self):
# test transformers-cli env
import transformers.commands.transformers_cli
with CaptureStd() as cs:
transformers.commands.transformers_cli.main()
self.assertIn("Python version", cs.out)
self.assertIn("Platform", cs.out)
self.assertIn("Using distributed or parallel set-up in script?", cs.out)
@is_pt_tf_cross_test
@patch(
"sys.argv", ["fakeprogrampath", "pt-to-tf", "--model-name", "hf-internal-testing/tiny-random-gptj", "--no-pr"]
)
def test_cli_pt_to_tf(self):
import transformers.commands.transformers_cli
shutil.rmtree("/tmp/hf-internal-testing/tiny-random-gptj", ignore_errors=True) # cleans potential past runs
transformers.commands.transformers_cli.main()
self.assertTrue(os.path.exists("/tmp/hf-internal-testing/tiny-random-gptj/tf_model.h5"))
@require_torch
@patch("sys.argv", ["fakeprogrampath", "download", "hf-internal-testing/tiny-random-gptj", "--cache-dir", "/tmp"])
def test_cli_download(self):
import transformers.commands.transformers_cli
# # remove any previously downloaded model to start clean
shutil.rmtree("/tmp/models--hf-internal-testing--tiny-random-gptj", ignore_errors=True)
# run the command
transformers.commands.transformers_cli.main()
# check if the model files are downloaded correctly on /tmp/models--hf-internal-testing--tiny-random-gptj
self.assertTrue(os.path.exists("/tmp/models--hf-internal-testing--tiny-random-gptj/blobs"))
self.assertTrue(os.path.exists("/tmp/models--hf-internal-testing--tiny-random-gptj/refs"))
self.assertTrue(os.path.exists("/tmp/models--hf-internal-testing--tiny-random-gptj/snapshots"))
@require_torch
@patch(
"sys.argv",
[
"fakeprogrampath",
"download",
"hf-internal-testing/test_dynamic_model_with_tokenizer",
"--trust-remote-code",
"--cache-dir",
"/tmp",
],
)
def test_cli_download_trust_remote(self):
import transformers.commands.transformers_cli
# # remove any previously downloaded model to start clean
shutil.rmtree("/tmp/models--hf-internal-testing--test_dynamic_model_with_tokenizer", ignore_errors=True)
# run the command
transformers.commands.transformers_cli.main()
# check if the model files are downloaded correctly on /tmp/models--hf-internal-testing--test_dynamic_model_with_tokenizer
self.assertTrue(os.path.exists("/tmp/models--hf-internal-testing--test_dynamic_model_with_tokenizer/blobs"))
self.assertTrue(os.path.exists("/tmp/models--hf-internal-testing--test_dynamic_model_with_tokenizer/refs"))
self.assertTrue(
os.path.exists("/tmp/models--hf-internal-testing--test_dynamic_model_with_tokenizer/snapshots")
)
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/utils/test_convert_slow_tokenizer.py | import unittest
import warnings
from dataclasses import dataclass
from transformers.convert_slow_tokenizer import SpmConverter
from transformers.testing_utils import get_tests_dir
@dataclass
class FakeOriginalTokenizer:
vocab_file: str
class ConvertSlowTokenizerTest(unittest.TestCase):
def test_spm_converter_bytefallback_warning(self):
spm_model_file_without_bytefallback = get_tests_dir("fixtures/test_sentencepiece.model")
spm_model_file_with_bytefallback = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
original_tokenizer_without_bytefallback = FakeOriginalTokenizer(vocab_file=spm_model_file_without_bytefallback)
with warnings.catch_warnings(record=True) as w:
_ = SpmConverter(original_tokenizer_without_bytefallback)
self.assertEqual(len(w), 0)
original_tokenizer_with_bytefallback = FakeOriginalTokenizer(vocab_file=spm_model_file_with_bytefallback)
with warnings.catch_warnings(record=True) as w:
_ = SpmConverter(original_tokenizer_with_bytefallback)
self.assertEqual(len(w), 1)
self.assertIn(
"The sentencepiece tokenizer that you are converting to a fast tokenizer uses the byte fallback option"
" which is not implemented in the fast tokenizers.",
str(w[0].message),
)
| 0 |
hf_public_repos/transformers/tests | hf_public_repos/transformers/tests/utils/test_activations_tf.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers.activations_tf import get_tf_activation
@require_tf
class TestTFActivations(unittest.TestCase):
def test_gelu_10(self):
x = tf.constant([-100, -1.0, -0.1, 0, 0.1, 1.0, 100.0])
gelu = get_tf_activation("gelu")
gelu10 = get_tf_activation("gelu_10")
y_gelu = gelu(x)
y_gelu_10 = gelu10(x)
clipped_mask = tf.where(y_gelu_10 < 10.0, 1.0, 0.0)
self.assertEqual(tf.math.reduce_max(y_gelu_10).numpy().item(), 10.0)
self.assertTrue(np.allclose(y_gelu * clipped_mask, y_gelu_10 * clipped_mask))
def test_get_activation(self):
get_tf_activation("gelu")
get_tf_activation("gelu_10")
get_tf_activation("gelu_fast")
get_tf_activation("gelu_new")
get_tf_activation("glu")
get_tf_activation("mish")
get_tf_activation("quick_gelu")
get_tf_activation("relu")
get_tf_activation("sigmoid")
get_tf_activation("silu")
get_tf_activation("swish")
get_tf_activation("tanh")
with self.assertRaises(KeyError):
get_tf_activation("bogus")
with self.assertRaises(KeyError):
get_tf_activation(None)
| 0 |
hf_public_repos/transformers/tests/models | hf_public_repos/transformers/tests/models/nllb_moe/test_modeling_nllb_moe.py | # coding=utf-8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch NLLB-MoE model. """
import copy
import tempfile
import unittest
from transformers import NllbMoeConfig, is_torch_available, set_seed
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
require_torch_fp16,
slow,
torch_device,
)
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import NllbMoeForConditionalGeneration, NllbMoeModel, NllbTokenizer
from transformers.models.nllb_moe.modeling_nllb_moe import NllbMoeDecoder, NllbMoeEncoder, NllbMoeTop2Router
class NllbMoeModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_labels=False,
vocab_size=99,
hidden_size=16,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=4,
hidden_act="relu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
encoder_layerdrop=0.0,
decoder_layerdrop=0.0,
max_position_embeddings=20,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
num_experts=4,
encoder_sparse_step=2,
decoder_sparse_step=1,
expert_capacity=100,
router_jitter_noise=0.0,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.max_position_embeddings = max_position_embeddings
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.encoder_sparse_step = encoder_sparse_step
self.decoder_sparse_step = decoder_sparse_step
self.expert_capacity = expert_capacity
self.router_jitter_noise = router_jitter_noise
self.num_experts = num_experts
def prepare_nllb_moe_inputs_dict(
self,
config,
input_ids,
decoder_input_ids,
attention_mask=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
):
if attention_mask is None:
attention_mask = input_ids.ne(config.pad_token_id)
if decoder_attention_mask is None:
decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id)
if head_mask is None:
head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device)
if decoder_head_mask is None:
decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device)
if cross_attn_head_mask is None:
cross_attn_head_mask = torch.ones(
config.decoder_layers, config.decoder_attention_heads, device=torch_device
)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_ids[:, -1] = self.eos_token_id # Eos Token
decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
input_ids = input_ids.clamp(self.pad_token_id + 1)
decoder_input_ids = decoder_input_ids.clamp(self.pad_token_id + 1)
config = self.get_config()
inputs_dict = self.prepare_nllb_moe_inputs_dict(config, input_ids, decoder_input_ids)
return config, inputs_dict
def get_config(self):
return NllbMoeConfig(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
encoder_layerdrop=self.encoder_layerdrop,
decoder_layerdrop=self.decoder_layerdrop,
max_position_embeddings=self.max_position_embeddings,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
expert_capacity=self.expert_capacity,
router_jitter_noise=self.router_jitter_noise,
decoder_sparse_step=self.decoder_sparse_step,
encoder_sparse_step=self.encoder_sparse_step,
num_experts=self.num_experts,
)
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
@require_torch
def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict):
model = NllbMoeModel(config=config).get_decoder().to(torch_device).eval()
input_ids = inputs_dict["input_ids"]
attention_mask = inputs_dict["attention_mask"]
head_mask = inputs_dict["head_mask"]
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = ids_tensor((self.batch_size, 3), 2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[
"last_hidden_state"
]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def check_encoder_decoder_model_standalone(self, config, inputs_dict):
model = NllbMoeModel(config=config).to(torch_device).eval()
outputs = model(**inputs_dict)
encoder_last_hidden_state = outputs.encoder_last_hidden_state
last_hidden_state = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
encoder = model.get_encoder()
encoder.save_pretrained(tmpdirname)
encoder = NllbMoeEncoder.from_pretrained(tmpdirname).to(torch_device)
encoder_last_hidden_state_2 = encoder(inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"])[
0
]
self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3)
with tempfile.TemporaryDirectory() as tmpdirname:
decoder = model.get_decoder()
decoder.save_pretrained(tmpdirname)
decoder = NllbMoeDecoder.from_pretrained(tmpdirname).to(torch_device)
last_hidden_state_2 = decoder(
input_ids=inputs_dict["decoder_input_ids"],
attention_mask=inputs_dict["decoder_attention_mask"],
encoder_hidden_states=encoder_last_hidden_state,
encoder_attention_mask=inputs_dict["attention_mask"],
)[0]
self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3)
@require_torch
class NllbMoeModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (NllbMoeModel, NllbMoeForConditionalGeneration) if is_torch_available() else ()
all_generative_model_classes = (NllbMoeForConditionalGeneration,) if is_torch_available() else ()
pipeline_model_mapping = (
{
"conversational": NllbMoeForConditionalGeneration,
"feature-extraction": NllbMoeModel,
"summarization": NllbMoeForConditionalGeneration,
"text2text-generation": NllbMoeForConditionalGeneration,
"translation": NllbMoeForConditionalGeneration,
}
if is_torch_available()
else {}
)
is_encoder_decoder = True
fx_compatible = False
test_pruning = False
test_missing_keys = True
test_torchscript = False
# TODO: Fix the failed tests when this model gets more usage
def is_pipeline_test_to_skip(
self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name
):
# Saving the slow tokenizer after saving the fast tokenizer causes the loading of the later hanging forever.
return True
def setUp(self):
self.model_tester = NllbMoeModelTester(self)
self.config_tester = ConfigTester(self, config_class=NllbMoeConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_save_load_strict(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True)
self.assertEqual(info["missing_keys"], [])
def test_decoder_model_past_with_large_inputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
config.decoder_sparse_step = 0
self.model_tester.create_and_check_decoder_model_past_large_inputs(config, inputs_dict)
def test_encoder_decoder_model_standalone(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs)
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (NllbMoeModel, NllbMoeForConditionalGeneration):
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
if not self.is_encoder_decoder:
input_ids = inputs["input_ids"]
del inputs["input_ids"]
else:
encoder_input_ids = inputs["input_ids"]
decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
del inputs["input_ids"]
inputs.pop("decoder_input_ids", None)
wte = model.get_input_embeddings()
if not self.is_encoder_decoder:
inputs["inputs_embeds"] = wte(input_ids)
else:
inputs["inputs_embeds"] = wte(encoder_input_ids)
inputs["decoder_inputs_embeds"] = wte(decoder_input_ids)
with torch.no_grad():
model(**inputs)[0]
@require_torch_fp16
def test_generate_fp16(self):
config, input_dict = self.model_tester.prepare_config_and_inputs()
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
model = NllbMoeForConditionalGeneration(config).eval().to(torch_device)
model.half()
model.generate(input_ids, attention_mask=attention_mask)
model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3)
def test_get_loss(self):
config, input_dict = self.model_tester.prepare_config_and_inputs()
input_dict["output_router_logits"] = True
input_dict["labels"] = input_dict["input_ids"]
model = NllbMoeForConditionalGeneration(config).eval().to(torch_device)
out = model(**input_dict)
self.assertIsNotNone(out.loss)
self.assertIsNotNone(model(**input_dict)["encoder_router_logits"][1])
self.assertIsNotNone(model(**input_dict)["decoder_router_logits"][0])
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class NllbMoeModelIntegrationTests(unittest.TestCase):
@require_torch
@cached_property
def model_inputs(self):
return {
"input_ids": torch.LongTensor(
[
[28768, 248, 6399, 9, 65972, 452, 1925, 629, 123543, 248075, 2, 256047],
[117, 7027, 7195, 202, 44778, 248075, 2, 256047, 1, 1, 1, 1],
]
),
"attention_mask": torch.Tensor(
[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0]]
),
"decoder_input_ids": torch.LongTensor([[2, 256057], [2, 256057]]),
}
@cached_property
def tokenizer(self):
return NllbTokenizer.from_pretrained("hf-internal-testing/random-nllb-moe-2-experts")
@cached_property
def big_model(self):
return NllbMoeForConditionalGeneration.from_pretrained("facebook/nllb-moe-54b")
def inference_no_head(self):
model = NllbMoeModel.from_pretrained("hf-internal-testing/random-nllb-moe-2-experts").eval()
with torch.no_grad():
output = model(**self.model_inputs)
# fmt: off
EXPECTED_ENCODER_STATE = torch.Tensor([ 0.3920, -0.1974, -0.0279, 0.3463, -0.8306, -1.0629, -0.4643, 2.0563, 1.1123, 0.3566, -0.9291, -0.3840, -0.2527, -0.9858, 1.5185, -1.1346, 0.0323, -0.9103, -0.3647, -0.4462, -0.9720, -0.3541, 0.1777, -0.4647, 1.6970, -0.9062, 0.2727, -1.0737, 0.8785, 0.4324])
EXPECTED_DECODER_STATE = torch.Tensor([-6.0425e-02, -2.0015e-01, 6.0575e-02, -8.6366e-01, -1.1310e+00, 6.8369e-01, 7.5615e-01, 7.3555e-01, 2.3071e-01, 1.5954e+00, -7.0728e-01, -2.2647e-01, -1.3292e+00, 4.8246e-01, -6.9153e-01, -1.8199e-02, -7.3664e-01, 1.5902e-03, 1.0760e-01, 1.0298e-01, -9.3933e-01, -4.6567e-01, 8.0417e-01, 1.5243e+00, 5.5844e-01, -9.9239e-02, 1.4885e+00, 7.1527e-02, -5.2612e-01, 9.4435e-02])
# fmt: on
torch.testing.assert_allclose(
output.encoder_last_hidden_state[1, 0, :30], EXPECTED_ENCODER_STATE, rtol=6e-3, atol=9e-3
)
torch.testing.assert_allclose(
output.last_hidden_state[1, 0, :30], EXPECTED_DECODER_STATE, rtol=6e-3, atol=9e-3
)
def test_inference_logits(self):
r"""
Logits testing to check implementation consistency between `fairseq` implementation
and `transformers` implementation of NLLB-MoE transformers. We only check the logits
of the second sample of the batch, as it is padded.
"""
model = NllbMoeForConditionalGeneration.from_pretrained("hf-internal-testing/random-nllb-moe-2-experts").eval()
with torch.no_grad():
output = model(**self.model_inputs)
EXPECTED_LOGTIS = torch.Tensor([-0.3059, 0.0000, 9.3029, 0.6456, -0.9148, 1.7836, 0.6478, 0.9438, -0.5272, -0.6617, -1.2717, 0.4564, 0.1345, -0.2301, -1.0140, 1.1427, -1.5535, 0.1337, 0.2082, -0.8112, -0.3842, -0.3377, 0.1256, 0.6450, -0.0452, 0.0219, 1.4274, -0.4991, -0.2063, -0.4409,]) # fmt: skip
torch.testing.assert_allclose(output.logits[1, 0, :30], EXPECTED_LOGTIS, rtol=6e-3, atol=9e-3)
@unittest.skip("This requires 300GB of RAM")
def test_large_logits(self):
model = self.big_model
with torch.no_grad():
output = model(**self.model_inputs)
# fmt: off
EXPECTED_ENCODER_STATE = torch.Tensor([ 0.1696, -0.0059, 0.0489, 0.0479, -0.4222, -0.2178, -0.1372, -0.0860, -0.4249, -0.0081, -0.1186, 0.6678, 0.0160, 0.4140, 0.1799, 0.0672, -0.4941, 0.0173, -0.0740, 0.0845, -0.2197, 0.4465, 0.2268, -0.1752, -0.0562, 0.1033, -0.0869, -0.5490, 0.0582, 0.2165])
EXPECTED_DECODER_STATE = torch.Tensor([ 0.0374, -0.1055, -0.1060, -0.1711, -0.0540, -0.1183, -0.0779, 0.0610, -0.0279, -0.0848, 0.0222, 0.0372, -0.0298, -0.0861, -0.0354, -0.0103, 0.0538, -0.0148, -0.0105, 0.0224, 0.0629, -0.0291, -0.0671, 0.0173, -0.0066, -0.0245, -0.0499, 0.0760, -0.0067, 0.0086])
EXPECTED_LOGTIS = torch.Tensor([ 0.3834, 0.2057, 4.5399, 0.8301, 0.4810, 0.9325, 0.9928, 0.9574, 0.5517, 0.9156, 0.2698, 0.6728, 0.7121, 0.3080, 0.4693, 0.5756, 1.0407, 0.2219, 0.3714, 0.5699, 0.5547, 0.8472, 0.3178, 0.1286, 0.1791, 0.9391, 0.5153, -0.2146, 0.1689, 0.6816])
# fmt: on
torch.testing.assert_allclose(
output.encoder_last_hidden_state[1, 0, :30], EXPECTED_ENCODER_STATE, rtol=6e-3, atol=9e-3
)
torch.testing.assert_allclose(
output.last_hidden_state[1, 0, :30], EXPECTED_DECODER_STATE, rtol=6e-3, atol=9e-3
)
torch.testing.assert_allclose(output.logits[1, 0, :30], EXPECTED_LOGTIS, rtol=6e-3, atol=9e-3)
@unittest.skip("This requires 300GB of RAM")
def test_seq_to_seq_generation(self):
model = self.big_model
tokenizer = NllbTokenizer.from_pretrained("facebook/nllb-moe-54b")
# first 6 samples of load_dataset("facebook/flores", "eng_Latn-fra_Latn"), devtest. Truth are very similar to the fairseq translation files
FIRST_6_FLORES_200 = [
'We now have 4-month-old mice that are non-diabetic that used to be diabetic," he added.',
"Dr. Ehud Ur, professor of medicine at Dalhousie University in Halifax, Nova Scotia and chair of the clinical and scientific division of the Canadian Diabetes Association cautioned that the research is still in its early days.",
"Like some other experts, he is skeptical about whether diabetes can be cured, noting that these findings have no relevance to people who already have Type 1 diabetes.",
"On Monday, Sara Danius, permanent secretary of the Nobel Committee for Literature at the Swedish Academy, publicly announced during a radio program on Sveriges Radio in Sweden the committee, unable to reach Bob Dylan directly about winning the 2016 Nobel Prize in Literature, had abandoned its efforts to reach him.",
'Danius said, "Right now we are doing nothing. I have called and sent emails to his closest collaborator and received very friendly replies. For now, that is certainly enough."',
"Previously, Ring's CEO, Jamie Siminoff, remarked the company started when his doorbell wasn't audible from his shop in his garage.",
]
inputs = tokenizer(FIRST_6_FLORES_200, padding=True, return_tensors="pt").to(torch_device)
batch_translation = model.generate(**inputs, forced_bos_token_id=tokenizer.lang_code_to_id["fra_Latn"])
EXPECTED_FAIRSEQ_TRANSLATION = [
'"Nous avons maintenant des souris de 4 mois non diabétiques qui étaient diabétiques", a-t-il ajouté.',
"Le docteur Ehud Ur, professeur de médecine à l'université Dalhousie, à Halifax, en Nouvelle-Écosse, et président de la division clinique et scientifique de l'Association canadienne du diabète, prévient que la recherche n'en est qu'à ses débuts.",
"Comme d'autres spécialistes, il est sceptique quant à la guérison du diabète.",
"Lundi, Sara Danius, secrétaire permanente du Comité Nobel de littérature à l'Académie suédoise, a annoncé publiquement lors d'une émission de radio sur Sveriges Radio en Suède que le comité, incapable de joindre Bob Dylan directement pour lui annoncer le prix Nobel de littérature 2016, avait abandonné ses efforts pour le joindre.",
"Danius a déclaré: \"Pour l'instant, nous ne faisons rien. J'ai appelé et envoyé des courriels à son plus proche collaborateur et j'ai reçu des réponses très amicales. Pour l'instant, c'est certainement suffisant\".",
"Auparavant, le PDG de Ring, Jamie Siminoff, a fait remarquer que la société avait commencé lorsque sa sonnette n'était pas audible depuis son magasin dans son garage.",
]
translation = tokenizer.batch_decode(
batch_translation.tolist(), clean_up_tokenization_spaces=True, skip_special_tokens=True
)
assert translation == EXPECTED_FAIRSEQ_TRANSLATION
@require_torch
class NllbMoeRouterTest(unittest.TestCase):
r"""
Switch Transformers has different blocks from classic transformer based models.
The Swift MLP contains a Router class, that has to be tested to check if it is correctly implemented
Original implementation of the routers here:
"""
config = NllbMoeConfig(
num_experts=4,
hidden_size=32,
d_ff=16,
expert_capacity=4,
)
batch_size = 2
sequence_length = 20
def test_top_2_routing(self):
# test routing with minimal reproduction
mask = torch.ones((self.batch_size, self.sequence_length), dtype=torch.bool)
mask[0][0] = False
mask[1][0] = False
mask = mask.reshape(-1)
set_seed(0)
hidden_states = torch.rand((self.batch_size, self.sequence_length, self.config.hidden_size))
classfier = torch.nn.Linear(self.config.hidden_size, self.config.num_experts)
hf_router = NllbMoeTop2Router(self.config)
_, _, hidden_dim = hidden_states.shape
logits = classfier(hidden_states.reshape((self.batch_size * self.sequence_length), hidden_dim))
top_1_mask, router_probs = hf_router.route_tokens(logits, padding_mask=mask)
torch.argmax(top_1_mask, dim=-1)
router_mask = router_probs.bool()
set_seed(0)
experts = [
torch.nn.Linear(hidden_dim, hidden_dim),
torch.nn.Linear(hidden_dim, hidden_dim),
torch.nn.Linear(hidden_dim, hidden_dim),
torch.nn.Linear(hidden_dim, hidden_dim),
]
hidden_states = hidden_states.reshape((self.batch_size * self.sequence_length), hidden_dim)
masked_hidden_states = torch.einsum("bm,be->ebm", hidden_states, router_mask)
for idx, expert in enumerate(experts):
token_indices = router_mask[:, idx]
combining_weights = router_probs[token_indices, idx]
expert_output = expert(masked_hidden_states[idx, token_indices])
expert_output *= 1 - self.config.moe_token_dropout
masked_hidden_states[idx, token_indices] = torch.einsum("b,be->be", combining_weights, expert_output)
hidden_states = masked_hidden_states.sum(dim=0).reshape(self.batch_size, self.sequence_length, hidden_dim)
EXPECTED_MEAN_FAIRSEQ_HIDDEN_STATES = torch.Tensor([[ 7.0340e-04, 2.7997e-03, -1.3351e-02, -7.6705e-03, -3.5089e-03,3.9773e-03, 7.4593e-03, 1.2566e-02, 3.5860e-03, -2.7448e-02,-1.3731e-02, -1.0534e-02, -1.3606e-02, -1.5048e-02, -2.8914e-03,-5.0371e-03, -1.3963e-03, 6.0076e-03, -1.1380e-02, -1.4620e-02, 5.2401e-03, 8.4660e-04, -1.5319e-03, -1.6735e-02, 1.1302e-02, 3.6119e-03, 4.6084e-03, -1.3458e-02, 7.7792e-05, 1.4312e-02, 4.9107e-03, -5.0936e-03], [-4.4538e-03, 3.1026e-03, 1.4121e-04, -4.8121e-03, -5.6279e-03, 7.2493e-03, 3.9769e-03, 1.1114e-02, -1.5666e-03, -2.3477e-02, 8.7268e-03, 1.3446e-02, -2.8845e-05, -1.7287e-02, 8.7619e-03, -4.5316e-03, -1.2164e-02, 5.7461e-03, -4.5861e-03, -9.3907e-03, 2.9808e-02, 8.9206e-04, -7.6232e-04, -1.4173e-02, 3.0208e-03, 1.5310e-02, 9.7717e-03, 3.1014e-03, 7.8042e-03, 8.0197e-03, 3.4784e-03, -7.1728e-03]]) # fmt: skip
self.assertTrue(torch.allclose(hidden_states.mean(1), EXPECTED_MEAN_FAIRSEQ_HIDDEN_STATES, 1e-4))
def test_batch_prioritized_routing(self):
set_seed(0)
config = NllbMoeConfig(
num_experts=4, hidden_size=32, d_ff=16, expert_capacity=4, second_expert_policy="random"
)
mask = torch.zeros((self.batch_size * self.sequence_length), dtype=torch.bool)
logits = torch.rand((self.batch_size * self.sequence_length, 4))
config.batch_prioritized_routing = True
router = NllbMoeTop2Router(config)
top_1_mask, _ = router.route_tokens(logits, padding_mask=mask)
# check that the routing is batch first. One of the last token is routed while expert capacity is very small
# this means that it had a greater probability of being routed
assert top_1_mask[-1, 0] == 1
def test_second_expert_policy(self):
config = NllbMoeConfig(
num_experts=4,
hidden_size=32,
d_ff=16,
expert_capacity=40,
)
set_seed(0)
mask = torch.zeros((self.batch_size * self.sequence_length), dtype=torch.bool)
logits = torch.rand((self.batch_size * self.sequence_length, 4))
set_seed(0)
config.second_expert_policy = "random"
router = NllbMoeTop2Router(config)
top_1_mask, router_probs = router.route_tokens(logits, padding_mask=mask)
set_seed(0)
config.second_expert_policy = "sampling"
router = NllbMoeTop2Router(config)
top_1_mask_sp, router_probs_sp = router.route_tokens(logits, padding_mask=mask)
set_seed(0)
config.second_expert_policy = "all"
router = NllbMoeTop2Router(config)
top_1_mask_all, router_probs_all = router.route_tokens(logits, padding_mask=mask)
# fmt: off
EXPECTED_ROUTER_ALL = torch.tensor([[0.3902, 0.0000, 0.0000, 0.6098], [0.0000, 0.0000, 0.7770, 0.2230], [0.0000, 0.0000, 0.2726, 0.7274], [0.4221, 0.0000, 0.5779, 0.0000], [0.0000, 0.0000, 0.7810, 0.2190], [0.5518, 0.4482, 0.0000, 0.0000], [0.0000, 0.4060, 0.5940, 0.0000], [0.7340, 0.0000, 0.0000, 0.2660], [0.4778, 0.5222, 0.0000, 0.0000], [0.0000, 0.3984, 0.0000, 0.6016], [0.0000, 0.0548, 0.9452, 0.0000], [0.6796, 0.0000, 0.0000, 0.3204], [0.0700, 0.0000, 0.9300, 0.0000], [0.1854, 0.0000, 0.8146, 0.0000], [0.6775, 0.3225, 0.0000, 0.0000], [0.0000, 0.0000, 0.5027, 0.4973], [0.0000, 0.6577, 0.0000, 0.3423], [0.0000, 0.7767, 0.0000, 0.2233], [0.1944, 0.8056, 0.0000, 0.0000], [0.0000, 0.3073, 0.0000, 0.6927], [0.0000, 0.5655, 0.4345, 0.0000], [0.5791, 0.0000, 0.0000, 0.4209], [0.0440, 0.0000, 0.9560, 0.0000], [0.0083, 0.9917, 0.0000, 0.0000], [0.0000, 0.8395, 0.0000, 0.1605], [0.0000, 0.1458, 0.0000, 0.8542], [0.0000, 0.8534, 0.1466, 0.0000], [0.4938, 0.0000, 0.0000, 0.5062], [0.1329, 0.8671, 0.0000, 0.0000], [0.3058, 0.0000, 0.6942, 0.0000], [0.4458, 0.0000, 0.0000, 0.5542], [0.9053, 0.0947, 0.0000, 0.0000], [0.0000, 0.7563, 0.2437, 0.0000], [0.0000, 0.0000, 0.4096, 0.5904], [0.4551, 0.0000, 0.0000, 0.5449], [0.8502, 0.1498, 0.0000, 0.0000], [0.0000, 0.6312, 0.3688, 0.0000], [0.8920, 0.0000, 0.0000, 0.1080], [0.1913, 0.0000, 0.0000, 0.8087], [0.2491, 0.7509, 0.0000, 0.0000]])
EXPECTED_ROUTER_SP = torch.tensor([[0.0000, 0.6539, 0.0000, 0.3461], [0.0000, 0.0000, 0.3998, 0.6002], [0.0000, 0.5574, 0.0000, 0.4426], [0.0000, 0.0000, 0.4441, 0.5559], [0.0000, 0.6545, 0.3455, 0.0000], [0.4419, 0.5581, 0.0000, 0.0000], [0.0000, 0.4014, 0.5986, 0.0000], [0.3215, 0.0000, 0.0000, 0.6785], [0.4765, 0.5235, 0.0000, 0.0000], [0.0000, 0.5467, 0.0000, 0.4533], [0.0000, 0.4156, 0.5844, 0.0000], [0.3370, 0.0000, 0.6630, 0.0000], [0.0000, 0.0000, 0.4558, 0.5442], [0.4659, 0.0000, 0.5341, 0.0000], [0.6179, 0.3821, 0.0000, 0.0000], [0.6277, 0.0000, 0.3723, 0.0000], [0.5836, 0.4164, 0.0000, 0.0000], [0.0000, 0.6600, 0.0000, 0.3400], [0.0000, 0.4933, 0.0000, 0.5067], [0.6016, 0.0000, 0.0000, 0.3984], [0.0000, 0.5160, 0.4840, 0.0000], [0.5799, 0.0000, 0.0000, 0.4201], [0.0000, 0.0000, 0.4826, 0.5174], [0.5426, 0.4574, 0.0000, 0.0000], [0.5362, 0.4638, 0.0000, 0.0000], [0.6448, 0.0000, 0.0000, 0.3552], [0.0000, 0.5909, 0.4091, 0.0000], [0.4196, 0.0000, 0.0000, 0.5804], [0.3191, 0.6809, 0.0000, 0.0000], [0.0000, 0.0000, 0.4886, 0.5114], [0.4899, 0.0000, 0.0000, 0.5101], [0.4123, 0.0000, 0.5877, 0.0000], [0.0000, 0.3736, 0.0000, 0.6264], [0.0000, 0.0000, 0.6009, 0.3991], [0.4246, 0.0000, 0.0000, 0.5754], [0.4997, 0.0000, 0.5003, 0.0000], [0.0000, 0.3595, 0.6405, 0.0000], [0.5433, 0.0000, 0.0000, 0.4567], [0.0000, 0.6806, 0.0000, 0.3194], [0.6689, 0.3311, 0.0000, 0.0000]])
EXPECTED_ROUTER = torch.tensor([[0.4324, 0.5676, 0.0000, 0.0000], [0.0000, 0.4348, 0.0000, 0.5652], [0.4559, 0.5441, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 1.0000], [0.4744, 0.5256, 0.0000, 0.0000], [0.0000, 0.5103, 0.0000, 0.4897], [0.0000, 0.0000, 1.0000, 0.0000], [0.0000, 0.0000, 0.0000, 1.0000], [0.0000, 1.0000, 0.0000, 0.0000], [0.0000, 0.5467, 0.0000, 0.4533], [0.0000, 0.0000, 1.0000, 0.0000], [0.0000, 0.0000, 1.0000, 0.0000], [0.0000, 0.0000, 0.0000, 1.0000], [0.0000, 0.0000, 1.0000, 0.0000], [1.0000, 0.0000, 0.0000, 0.0000], [0.5063, 0.4937, 0.0000, 0.0000], [0.5396, 0.0000, 0.0000, 0.4604], [0.4576, 0.5424, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000, 1.0000], [0.5134, 0.0000, 0.4866, 0.0000], [0.0000, 0.5160, 0.4840, 0.0000], [0.5439, 0.0000, 0.4561, 0.0000], [0.4849, 0.0000, 0.0000, 0.5151], [0.5426, 0.4574, 0.0000, 0.0000], [0.5362, 0.4638, 0.0000, 0.0000], [1.0000, 0.0000, 0.0000, 0.0000], [0.0000, 1.0000, 0.0000, 0.0000], [0.0000, 0.4448, 0.0000, 0.5552], [0.0000, 1.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.4886, 0.5114], [0.4899, 0.0000, 0.0000, 0.5101], [0.0000, 0.0000, 0.5296, 0.4704], [0.0000, 0.0000, 0.4469, 0.5531], [0.0000, 0.4053, 0.5947, 0.0000], [0.0000, 0.0000, 0.4460, 0.5540], [0.4997, 0.0000, 0.5003, 0.0000], [0.0000, 0.0000, 0.5851, 0.4149], [1.0000, 0.0000, 0.0000, 0.0000], [0.0000, 0.5010, 0.4990, 0.0000], [1.0000, 0.0000, 0.0000, 0.0000]])
EXPECTED_TOP_1_ALL = torch.LongTensor([[0, 0, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 1, 0], [0, 0, 1, 0], [1, 0, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0], [1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0], [1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 1, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 1, 0, 0], [0, 0, 0, 1], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 0, 1], [1, 0, 0, 0], [0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1], [0, 1, 0, 0]])
EXPECTED_TOP_1_SP = torch.LongTensor([[0, 1, 0, 0], [0, 0, 0, 1], [0, 1, 0, 0], [0, 0, 0, 1], [0, 1, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [0, 1, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 1, 0], [1, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 1, 0], [0, 0, 1, 0], [1, 0, 0, 0], [0, 1, 0, 0], [1, 0, 0, 0]])
# `sampling` and `random` do not affect the mask of the top_1 router
# fmt: on
torch.testing.assert_allclose(router_probs_all, EXPECTED_ROUTER_ALL, 1e-4, 1e-4)
torch.testing.assert_allclose(router_probs_sp, EXPECTED_ROUTER_SP, 1e-4, 1e-4)
torch.testing.assert_allclose(router_probs, EXPECTED_ROUTER, 1e-4, 1e-4)
torch.testing.assert_allclose(top_1_mask_all, EXPECTED_TOP_1_ALL, 1e-4, 1e-4)
torch.testing.assert_allclose(top_1_mask_sp, EXPECTED_TOP_1_SP, 1e-4, 1e-4)
torch.testing.assert_allclose(top_1_mask, EXPECTED_TOP_1_SP, 1e-4, 1e-4)
| 0 |
hf_public_repos/transformers/tests/models | hf_public_repos/transformers/tests/models/sew_d/test_modeling_sew_d.py | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch Hubert model. """
import math
import unittest
import pytest
from transformers import SEWDConfig, is_torch_available
from transformers.testing_utils import require_soundfile, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import (
ModelTesterMixin,
_config_zero_init,
floats_tensor,
ids_tensor,
random_attention_mask,
)
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SEWDForCTC,
SEWDForSequenceClassification,
SEWDModel,
Wav2Vec2FeatureExtractor,
Wav2Vec2Processor,
)
from transformers.models.hubert.modeling_hubert import _compute_mask_indices
class SEWDModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=1024, # speech is longer
is_training=False,
hidden_size=32,
feat_extract_norm="group",
feat_extract_dropout=0.0,
feat_extract_activation="gelu",
conv_dim=(64, 32, 32),
conv_stride=(5, 2, 1),
conv_kernel=(10, 3, 1),
conv_bias=False,
num_conv_pos_embeddings=31,
num_conv_pos_embedding_groups=2,
squeeze_factor=2,
max_position_embeddings=512,
position_buckets=256,
share_att_key=True,
relative_attention=True,
position_biased_input=False,
pos_att_type=("p2c", "c2p"),
norm_rel_ebd="layer_norm",
num_hidden_layers=2,
num_attention_heads=2,
hidden_dropout=0.1,
intermediate_size=20,
layer_norm_eps=1e-5,
hidden_act="gelu",
initializer_range=0.02,
vocab_size=32,
do_stable_layer_norm=False,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.hidden_size = hidden_size
self.feat_extract_norm = feat_extract_norm
self.feat_extract_dropout = feat_extract_dropout
self.feat_extract_activation = feat_extract_activation
self.conv_dim = conv_dim
self.conv_stride = conv_stride
self.conv_kernel = conv_kernel
self.conv_bias = conv_bias
self.num_conv_pos_embeddings = num_conv_pos_embeddings
self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
self.squeeze_factor = squeeze_factor
self.max_position_embeddings = max_position_embeddings
self.position_buckets = position_buckets
self.share_att_key = share_att_key
self.relative_attention = relative_attention
self.position_biased_input = position_biased_input
self.pos_att_type = pos_att_type
self.norm_rel_ebd = norm_rel_ebd
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_dropout = hidden_dropout
self.intermediate_size = intermediate_size
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.vocab_size = vocab_size
self.do_stable_layer_norm = do_stable_layer_norm
self.scope = scope
output_seq_length = self.seq_length
for kernel, stride in zip(self.conv_kernel, self.conv_stride):
output_seq_length = (output_seq_length - (kernel - 1)) / stride
self.output_seq_length = int(math.ceil(output_seq_length))
self.encoder_seq_length = self.output_seq_length // self.squeeze_factor
def prepare_config_and_inputs(self):
input_values = floats_tensor([self.batch_size, self.seq_length], scale=1.0)
attention_mask = random_attention_mask([self.batch_size, self.seq_length])
config = self.get_config()
return config, input_values, attention_mask
def get_config(self):
return SEWDConfig(
hidden_size=self.hidden_size,
feat_extract_norm=self.feat_extract_norm,
feat_extract_dropout=self.feat_extract_dropout,
feat_extract_activation=self.feat_extract_activation,
conv_dim=self.conv_dim,
conv_stride=self.conv_stride,
conv_kernel=self.conv_kernel,
conv_bias=self.conv_bias,
num_conv_pos_embeddings=self.num_conv_pos_embeddings,
num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups,
squeeze_factor=self.squeeze_factor,
max_position_embeddings=self.max_position_embeddings,
position_buckets=self.position_buckets,
share_att_key=self.share_att_key,
relative_attention=self.relative_attention,
position_biased_input=self.position_biased_input,
pos_att_type=self.pos_att_type,
norm_rel_ebd=self.norm_rel_ebd,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
hidden_dropout=self.hidden_dropout,
intermediate_size=self.intermediate_size,
layer_norm_eps=self.layer_norm_eps,
hidden_act=self.hidden_act,
initializer_range=self.initializer_range,
vocab_size=self.vocab_size,
)
def create_and_check_model(self, config, input_values, attention_mask):
model = SEWDModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_values, attention_mask=attention_mask)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size)
)
def create_and_check_batch_inference(self, config, input_values, *args):
# test does not pass for models making use of `group_norm`
# check: https://github.com/pytorch/fairseq/issues/3227
model = SEWDModel(config=config)
model.to(torch_device)
model.eval()
input_values = input_values[:3]
attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.bool)
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
attention_mask[i, input_lengths[i] :] = 0.0
batch_outputs = model(input_values, attention_mask=attention_mask).last_hidden_state
for i in range(input_values.shape[0]):
input_slice = input_values[i : i + 1, : input_lengths[i]]
output = model(input_slice).last_hidden_state
batch_output = batch_outputs[i : i + 1, : output.shape[1]]
self.parent.assertTrue(torch.allclose(output, batch_output, atol=1e-3))
def check_ctc_loss(self, config, input_values, *args):
model = SEWDForCTC(config=config)
model.to(torch_device)
# make sure that dropout is disabled
model.eval()
input_values = input_values[:3]
attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long)
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths))
labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size)
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
attention_mask[i, input_lengths[i] :] = 0
model.config.ctc_loss_reduction = "sum"
sum_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item()
model.config.ctc_loss_reduction = "mean"
mean_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item()
self.parent.assertTrue(isinstance(sum_loss, float))
self.parent.assertTrue(isinstance(mean_loss, float))
def check_ctc_training(self, config, input_values, *args):
config.ctc_zero_infinity = True
model = SEWDForCTC(config=config)
model.to(torch_device)
model.train()
# freeze feature encoder
model.freeze_feature_encoder()
input_values = input_values[:3]
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths))
labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size)
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
if max_length_labels[i] < labels.shape[-1]:
# it's important that we make sure that target lengths are at least
# one shorter than logit lengths to prevent -inf
labels[i, max_length_labels[i] - 1 :] = -100
loss = model(input_values, labels=labels).loss
self.parent.assertFalse(torch.isinf(loss).item())
loss.backward()
def check_seq_classifier_loss(self, config, input_values, *args):
model = SEWDForSequenceClassification(config=config)
model.to(torch_device)
# make sure that dropout is disabled
model.eval()
input_values = input_values[:3]
attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long)
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label))
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
attention_mask[i, input_lengths[i] :] = 0
masked_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item()
unmasked_loss = model(input_values, labels=labels).loss.item()
self.parent.assertTrue(isinstance(masked_loss, float))
self.parent.assertTrue(isinstance(unmasked_loss, float))
self.parent.assertTrue(masked_loss != unmasked_loss)
def check_seq_classifier_training(self, config, input_values, *args):
config.ctc_zero_infinity = True
model = SEWDForSequenceClassification(config=config)
model.to(torch_device)
model.train()
# freeze everything but the classification head
model.freeze_base_model()
input_values = input_values[:3]
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label))
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
loss = model(input_values, labels=labels).loss
self.parent.assertFalse(torch.isinf(loss).item())
loss.backward()
def check_labels_out_of_vocab(self, config, input_values, *args):
model = SEWDForCTC(config)
model.to(torch_device)
model.train()
input_values = input_values[:3]
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths))
labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size + 100)
with pytest.raises(ValueError):
model(input_values, labels=labels)
def prepare_config_and_inputs_for_common(self):
config, input_values, attention_mask = self.prepare_config_and_inputs()
inputs_dict = {"input_values": input_values, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class SEWDModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (SEWDForCTC, SEWDModel, SEWDForSequenceClassification) if is_torch_available() else ()
pipeline_model_mapping = (
{
"audio-classification": SEWDForSequenceClassification,
"automatic-speech-recognition": SEWDForCTC,
"feature-extraction": SEWDModel,
}
if is_torch_available()
else {}
)
test_pruning = False
test_headmasking = False
test_torchscript = False
def setUp(self):
self.model_tester = SEWDModelTester(self)
self.config_tester = ConfigTester(self, config_class=SEWDConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_ctc_loss_inference(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_ctc_loss(*config_and_inputs)
def test_ctc_train(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_ctc_training(*config_and_inputs)
def test_labels_out_of_vocab(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_labels_out_of_vocab(*config_and_inputs)
# Hubert has no inputs_embeds
def test_inputs_embeds(self):
pass
# `input_ids` is renamed to `input_values`
def test_forward_signature(self):
pass
# SEW cannot resize token embeddings
# since it has no tokens embeddings
def test_resize_tokens_embeddings(self):
pass
# SEW has no inputs_embeds
# and thus the `get_input_embeddings` fn
# is not implemented
def test_model_common_attributes(self):
pass
def test_retain_grad_hidden_states_attentions(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
# no need to test all models as different heads yield the same functionality
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
# set layer drop to 0
model.config.layerdrop = 0.0
input_values = inputs_dict["input_values"]
input_lengths = torch.tensor(
[input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device
)
output_lengths = model._get_feat_extract_output_lengths(input_lengths)
labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size)
inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"])
inputs_dict["labels"] = labels
outputs = model(**inputs_dict)
output = outputs[0]
# Encoder-/Decoder-only models
hidden_states = outputs.hidden_states[0]
attentions = outputs.attentions[0]
hidden_states.retain_grad()
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(hidden_states.grad)
self.assertIsNotNone(attentions.grad)
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
uniform_init_parms = [
"conv.weight",
"masked_spec_embed",
"quantizer.weight_proj.weight",
]
if param.requires_grad:
if any(x in name for x in uniform_init_parms):
self.assertTrue(
-1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0,
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
else:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),
[0.0, 1.0],
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
# overwrite from test_modeling_common
def _mock_init_weights(self, module):
if hasattr(module, "weight") and module.weight is not None:
module.weight.data.fill_(3)
if hasattr(module, "weight_g") and module.weight_g is not None:
module.weight_g.data.fill_(3)
if hasattr(module, "weight_v") and module.weight_v is not None:
module.weight_v.data.fill_(3)
if hasattr(module, "bias") and module.bias is not None:
module.bias.data.fill_(3)
if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None:
module.masked_spec_embed.data.fill_(3)
@unittest.skip(reason="Feed forward chunking is not implemented")
def test_feed_forward_chunking(self):
pass
@slow
def test_model_from_pretrained(self):
model = SEWDModel.from_pretrained("asapp/sew-d-tiny-100k")
self.assertIsNotNone(model)
@require_torch
class SEWDUtilsTest(unittest.TestCase):
def test_compute_mask_indices(self):
batch_size = 4
sequence_length = 60
mask_prob = 0.5
mask_length = 1
mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length)
mask = torch.from_numpy(mask).to(torch_device)
self.assertListEqual(mask.sum(axis=-1).tolist(), [mask_prob * sequence_length for _ in range(batch_size)])
def test_compute_mask_indices_overlap(self):
batch_size = 4
sequence_length = 80
mask_prob = 0.5
mask_length = 4
mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length)
mask = torch.from_numpy(mask).to(torch_device)
# because of overlap mask don't have to add up exactly to `mask_prob * sequence_length`, but have to be smaller or equal
for batch_sum in mask.sum(axis=-1):
self.assertTrue(int(batch_sum) <= mask_prob * sequence_length)
@require_torch
@require_soundfile
@slow
class SEWDModelIntegrationTest(unittest.TestCase):
def _load_datasamples(self, num_samples):
from datasets import load_dataset
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
# automatic decoding with librispeech
speech_samples = ds.sort("id").filter(
lambda x: x["id"] in [f"1272-141231-000{i}" for i in range(num_samples)]
)[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def test_inference_pretrained_batched(self):
model = SEWDModel.from_pretrained("asapp/sew-d-tiny-100k").to(torch_device)
processor = Wav2Vec2FeatureExtractor.from_pretrained("asapp/sew-d-tiny-100k")
input_speech = self._load_datasamples(2)
inputs = processor(input_speech, return_tensors="pt", padding=True)
input_values = inputs.input_values.to(torch_device)
with torch.no_grad():
outputs = model(input_values).last_hidden_state
# expected outputs taken from the original SEW-D implementation
expected_outputs_first = torch.tensor(
[
[
[-0.1619, 0.6995, 0.4062, -0.1014],
[-0.1364, 0.5960, 0.0952, -0.0873],
[-0.1572, 0.5718, 0.4228, -0.0864],
[-0.1325, 0.6823, 0.1387, -0.0871],
],
[
[-0.1296, 0.4008, 0.4952, -0.1450],
[-0.1152, 0.3693, 0.3037, -0.1290],
[-0.1194, 0.6074, 0.3531, -0.1466],
[-0.1113, 0.3135, 0.2224, -0.1338],
],
],
device=torch_device,
)
expected_outputs_last = torch.tensor(
[
[
[-0.1577, 0.5108, 0.8553, 0.2550],
[-0.1530, 0.3580, 0.6143, 0.2672],
[-0.1535, 0.4954, 0.8503, 0.1387],
[-0.1572, 0.3363, 0.6217, 0.1490],
],
[
[-0.1338, 0.5459, 0.9607, -0.1133],
[-0.1502, 0.3738, 0.7313, -0.0986],
[-0.0953, 0.4708, 1.0821, -0.0944],
[-0.1474, 0.3598, 0.7248, -0.0748],
],
],
device=torch_device,
)
expected_output_sum = 54201.0469
self.assertTrue(torch.allclose(outputs[:, :4, :4], expected_outputs_first, atol=1e-3))
self.assertTrue(torch.allclose(outputs[:, -4:, -4:], expected_outputs_last, atol=1e-3))
self.assertTrue(abs(outputs.sum() - expected_output_sum) < 1)
def test_inference_ctc_batched(self):
model = SEWDForCTC.from_pretrained("asapp/sew-d-tiny-100k-ft-ls100h").to(torch_device)
processor = Wav2Vec2Processor.from_pretrained("asapp/sew-d-tiny-100k-ft-ls100h", do_lower_case=True)
input_speech = self._load_datasamples(2)
inputs = processor(input_speech, return_tensors="pt", padding=True)
input_values = inputs.input_values.to(torch_device)
with torch.no_grad():
logits = model(input_values).logits
predicted_ids = torch.argmax(logits, dim=-1)
predicted_trans = processor.batch_decode(predicted_ids)
EXPECTED_TRANSCRIPTIONS = [
"a man said to the universe sir i exist",
"swet covered breon's body trickling into the titlowing closs that was the only garmened he war",
]
self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS)
| 0 |
hf_public_repos/transformers/tests/models | hf_public_repos/transformers/tests/models/table_transformer/test_modeling_table_transformer.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch Table Transformer model. """
import inspect
import math
import unittest
from huggingface_hub import hf_hub_download
from transformers import ResNetConfig, TableTransformerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_timm, require_torch, require_vision, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import TableTransformerForObjectDetection, TableTransformerModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class TableTransformerModelTester:
def __init__(
self,
parent,
batch_size=8,
is_training=True,
use_labels=True,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=8,
intermediate_size=4,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
num_queries=12,
num_channels=3,
min_size=200,
max_size=200,
n_targets=8,
num_labels=3,
):
self.parent = parent
self.batch_size = batch_size
self.is_training = is_training
self.use_labels = use_labels
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.num_queries = num_queries
self.num_channels = num_channels
self.min_size = min_size
self.max_size = max_size
self.n_targets = n_targets
self.num_labels = num_labels
# we also set the expected seq length for both encoder and decoder
self.encoder_seq_length = math.ceil(self.min_size / 32) * math.ceil(self.max_size / 32)
self.decoder_seq_length = self.num_queries
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size])
pixel_mask = torch.ones([self.batch_size, self.min_size, self.max_size], device=torch_device)
labels = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
labels = []
for i in range(self.batch_size):
target = {}
target["class_labels"] = torch.randint(
high=self.num_labels, size=(self.n_targets,), device=torch_device
)
target["boxes"] = torch.rand(self.n_targets, 4, device=torch_device)
target["masks"] = torch.rand(self.n_targets, self.min_size, self.max_size, device=torch_device)
labels.append(target)
config = self.get_config()
return config, pixel_values, pixel_mask, labels
def get_config(self):
resnet_config = ResNetConfig(
num_channels=3,
embeddings_size=10,
hidden_sizes=[10, 20, 30, 40],
depths=[1, 1, 2, 1],
hidden_act="relu",
num_labels=3,
out_features=["stage2", "stage3", "stage4"],
out_indices=[2, 3, 4],
)
return TableTransformerConfig(
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
num_queries=self.num_queries,
num_labels=self.num_labels,
use_timm_backbone=False,
backbone_config=resnet_config,
)
def prepare_config_and_inputs_for_common(self):
config, pixel_values, pixel_mask, labels = self.prepare_config_and_inputs()
inputs_dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def create_and_check_table_transformer_model(self, config, pixel_values, pixel_mask, labels):
model = TableTransformerModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask)
result = model(pixel_values)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.decoder_seq_length, self.hidden_size)
)
def create_and_check_table_transformer_object_detection_head_model(self, config, pixel_values, pixel_mask, labels):
model = TableTransformerForObjectDetection(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask)
result = model(pixel_values)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4))
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask, labels=labels)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4))
def create_and_check_table_transformer_no_timm_backbone(self, config, pixel_values, pixel_mask, labels):
config.use_timm_backbone = False
config.backbone_config = ResNetConfig()
model = TableTransformerForObjectDetection(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask)
result = model(pixel_values)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4))
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask, labels=labels)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4))
@require_torch
class TableTransformerModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
TableTransformerModel,
TableTransformerForObjectDetection,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{"feature-extraction": TableTransformerModel, "object-detection": TableTransformerForObjectDetection}
if is_torch_available()
else {}
)
is_encoder_decoder = True
test_torchscript = False
test_pruning = False
test_head_masking = False
test_missing_keys = False
# special case for head models
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if return_labels:
if model_class.__name__ in ["TableTransformerForObjectDetection"]:
labels = []
for i in range(self.model_tester.batch_size):
target = {}
target["class_labels"] = torch.ones(
size=(self.model_tester.n_targets,), device=torch_device, dtype=torch.long
)
target["boxes"] = torch.ones(
self.model_tester.n_targets, 4, device=torch_device, dtype=torch.float
)
target["masks"] = torch.ones(
self.model_tester.n_targets,
self.model_tester.min_size,
self.model_tester.max_size,
device=torch_device,
dtype=torch.float,
)
labels.append(target)
inputs_dict["labels"] = labels
return inputs_dict
def setUp(self):
self.model_tester = TableTransformerModelTester(self)
self.config_tester = ConfigTester(self, config_class=TableTransformerConfig, has_text_modality=False)
def test_config(self):
self.config_tester.run_common_tests()
def test_table_transformer_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_table_transformer_model(*config_and_inputs)
def test_table_transformer_object_detection_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_table_transformer_object_detection_head_model(*config_and_inputs)
def test_table_transformer_no_timm_backbone(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_table_transformer_no_timm_backbone(*config_and_inputs)
@unittest.skip(reason="Table Transformer does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Table Transformer does not have a get_input_embeddings method")
def test_model_common_attributes(self):
pass
@unittest.skip(reason="Table Transformer is not a generative model")
def test_generate_without_input_ids(self):
pass
@unittest.skip(reason="Table Transformer does not use token embeddings")
def test_resize_tokens_embeddings(self):
pass
@slow
def test_model_outputs_equivalence(self):
# TODO Niels: fix me!
pass
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
decoder_seq_length = self.model_tester.decoder_seq_length
encoder_seq_length = self.model_tester.encoder_seq_length
decoder_key_length = self.model_tester.decoder_seq_length
encoder_key_length = self.model_tester.encoder_seq_length
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
out_len = len(outputs)
if self.is_encoder_decoder:
correct_outlen = 5
# loss is at first position
if "labels" in inputs_dict:
correct_outlen += 1 # loss is added to beginning
# Object Detection model returns pred_logits and pred_boxes
if model_class.__name__ == "TableTransformerForObjectDetection":
correct_outlen += 2
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
self.assertEqual(out_len, correct_outlen)
# decoder attentions
decoder_attentions = outputs.decoder_attentions
self.assertIsInstance(decoder_attentions, (list, tuple))
self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],
)
# cross attentions
cross_attentions = outputs.cross_attentions
self.assertIsInstance(cross_attentions, (list, tuple))
self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(cross_attentions[0].shape[-3:]),
[
self.model_tester.num_attention_heads,
decoder_seq_length,
encoder_key_length,
],
)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if hasattr(self.model_tester, "num_hidden_states_types"):
added_hidden_states = self.model_tester.num_hidden_states_types
elif self.is_encoder_decoder:
added_hidden_states = 2
else:
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
def test_retain_grad_hidden_states_attentions(self):
# removed retain_grad and grad on decoder_hidden_states, as queries don't require grad
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
# no need to test all models as different heads yield the same functionality
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
inputs = self._prepare_for_class(inputs_dict, model_class)
outputs = model(**inputs)
output = outputs[0]
encoder_hidden_states = outputs.encoder_hidden_states[0]
encoder_attentions = outputs.encoder_attentions[0]
encoder_hidden_states.retain_grad()
encoder_attentions.retain_grad()
decoder_attentions = outputs.decoder_attentions[0]
decoder_attentions.retain_grad()
cross_attentions = outputs.cross_attentions[0]
cross_attentions.retain_grad()
output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(encoder_attentions.grad)
self.assertIsNotNone(decoder_attentions.grad)
self.assertIsNotNone(cross_attentions.grad)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
if model.config.is_encoder_decoder:
expected_arg_names = ["pixel_values", "pixel_mask"]
expected_arg_names.extend(
["head_mask", "decoder_head_mask", "encoder_outputs"]
if "head_mask" and "decoder_head_mask" in arg_names
else []
)
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
else:
expected_arg_names = ["pixel_values", "pixel_mask"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_different_timm_backbone(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# let's pick a random timm backbone
config.backbone = "tf_mobilenetv3_small_075"
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if model_class.__name__ == "TableTransformerForObjectDetection":
expected_shape = (
self.model_tester.batch_size,
self.model_tester.num_queries,
self.model_tester.num_labels + 1,
)
self.assertEqual(outputs.logits.shape, expected_shape)
self.assertTrue(outputs)
def test_greyscale_images(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# use greyscale pixel values
inputs_dict["pixel_values"] = floats_tensor(
[self.model_tester.batch_size, 1, self.model_tester.min_size, self.model_tester.max_size]
)
# let's set num_channels to 1
config.num_channels = 1
config.backbone_config.num_channels = 1
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
self.assertTrue(outputs)
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
configs_no_init.init_xavier_std = 1e9
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
if param.requires_grad:
if "bbox_attention" in name and "bias" not in name:
self.assertLess(
100000,
abs(param.data.max().item()),
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
else:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),
[0.0, 1.0],
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
TOLERANCE = 1e-4
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_timm
@require_vision
@slow
class TableTransformerModelIntegrationTests(unittest.TestCase):
def test_table_detection(self):
image_processor = AutoImageProcessor.from_pretrained("microsoft/table-transformer-detection")
model = TableTransformerForObjectDetection.from_pretrained("microsoft/table-transformer-detection")
model.to(torch_device)
file_path = hf_hub_download(repo_id="nielsr/example-pdf", repo_type="dataset", filename="example_pdf.png")
image = Image.open(file_path).convert("RGB")
inputs = image_processor(image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
expected_shape = (1, 15, 3)
self.assertEqual(outputs.logits.shape, expected_shape)
expected_logits = torch.tensor(
[[-6.7329, -16.9590, 6.7447], [-8.0038, -22.3071, 6.9288], [-7.2445, -20.9855, 7.3465]],
device=torch_device,
)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_logits, atol=1e-4))
expected_boxes = torch.tensor(
[[0.4868, 0.1764, 0.6729], [0.6674, 0.4621, 0.3864], [0.4720, 0.1757, 0.6362]], device=torch_device
)
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes, atol=1e-3))
| 0 |
hf_public_repos/transformers/tests/models | hf_public_repos/transformers/tests/models/bark/test_processor_bark.py | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class BarkProcessorTest(unittest.TestCase):
def setUp(self):
self.checkpoint = "suno/bark-small"
self.tmpdirname = tempfile.mkdtemp()
self.voice_preset = "en_speaker_1"
self.input_string = "This is a test string"
self.speaker_embeddings_dict_path = "speaker_embeddings_path.json"
self.speaker_embeddings_directory = "speaker_embeddings"
def get_tokenizer(self, **kwargs):
return AutoTokenizer.from_pretrained(self.checkpoint, **kwargs)
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def test_save_load_pretrained_default(self):
tokenizer = self.get_tokenizer()
processor = BarkProcessor(tokenizer=tokenizer)
processor.save_pretrained(self.tmpdirname)
processor = BarkProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab())
@slow
def test_save_load_pretrained_additional_features(self):
processor = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint,
speaker_embeddings_dict_path=self.speaker_embeddings_dict_path,
)
processor.save_pretrained(
self.tmpdirname,
speaker_embeddings_dict_path=self.speaker_embeddings_dict_path,
speaker_embeddings_directory=self.speaker_embeddings_directory,
)
tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
processor = BarkProcessor.from_pretrained(
self.tmpdirname,
self.speaker_embeddings_dict_path,
bos_token="(BOS)",
eos_token="(EOS)",
)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
def test_speaker_embeddings(self):
processor = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint,
speaker_embeddings_dict_path=self.speaker_embeddings_dict_path,
)
seq_len = 35
nb_codebooks_coarse = 2
nb_codebooks_total = 8
voice_preset = {
"semantic_prompt": np.ones(seq_len),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len)),
"fine_prompt": np.ones((nb_codebooks_total, seq_len)),
}
# test providing already loaded voice_preset
inputs = processor(text=self.input_string, voice_preset=voice_preset)
processed_voice_preset = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist(), processed_voice_preset.get(key, np.array([])).tolist())
# test loading voice preset from npz file
tmpfilename = os.path.join(self.tmpdirname, "file.npz")
np.savez(tmpfilename, **voice_preset)
inputs = processor(text=self.input_string, voice_preset=tmpfilename)
processed_voice_preset = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist(), processed_voice_preset.get(key, np.array([])).tolist())
# test loading voice preset from the hub
inputs = processor(text=self.input_string, voice_preset=self.voice_preset)
def test_tokenizer(self):
tokenizer = self.get_tokenizer()
processor = BarkProcessor(tokenizer=tokenizer)
encoded_processor = processor(text=self.input_string)
encoded_tok = tokenizer(
self.input_string,
padding="max_length",
max_length=256,
add_special_tokens=False,
return_attention_mask=True,
return_token_type_ids=False,
)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key].squeeze().tolist())
| 0 |
hf_public_repos/transformers/tests/models | hf_public_repos/transformers/tests/models/bark/test_modeling_bark.py | # coding=utf-8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch Bark model. """
import copy
import inspect
import tempfile
import unittest
import pytest
from transformers import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
is_torch_available,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.testing_utils import (
require_flash_attn,
require_torch,
require_torch_fp16,
require_torch_gpu,
slow,
torch_device,
)
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ..encodec.test_modeling_encodec import EncodecModelTester
if is_torch_available():
import torch
from transformers import (
BarkCausalModel,
BarkCoarseModel,
BarkFineModel,
BarkModel,
BarkProcessor,
BarkSemanticModel,
)
class BarkSemanticModelTester:
def __init__(
self,
parent,
batch_size=2,
seq_length=4,
is_training=False, # for now training is not supported
use_input_mask=True,
use_labels=True,
vocab_size=33,
output_vocab_size=33,
hidden_size=16,
num_hidden_layers=2,
num_attention_heads=2,
intermediate_size=15,
dropout=0.1,
window_size=256,
initializer_range=0.02,
n_codes_total=8, # for BarkFineModel
n_codes_given=1, # for BarkFineModel
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.output_vocab_size = output_vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.window_size = window_size
self.initializer_range = initializer_range
self.bos_token_id = output_vocab_size - 1
self.eos_token_id = output_vocab_size - 1
self.pad_token_id = output_vocab_size - 1
self.n_codes_total = n_codes_total
self.n_codes_given = n_codes_given
self.is_encoder_decoder = False
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
config = self.get_config()
head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2)
inputs_dict = {
"input_ids": input_ids,
"head_mask": head_mask,
"attention_mask": input_mask,
}
return config, inputs_dict
def get_config(self):
return BarkSemanticConfig(
vocab_size=self.vocab_size,
output_vocab_size=self.output_vocab_size,
hidden_size=self.hidden_size,
num_layers=self.num_hidden_layers,
num_heads=self.num_attention_heads,
use_cache=True,
bos_token_id=self.bos_token_id,
eos_token_id=self.eos_token_id,
pad_token_id=self.pad_token_id,
window_size=self.window_size,
)
def get_pipeline_config(self):
config = self.get_config()
config.vocab_size = 300
config.output_vocab_size = 300
return config
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict):
model = BarkSemanticModel(config=config).to(torch_device).eval()
input_ids = inputs_dict["input_ids"]
attention_mask = inputs_dict["attention_mask"]
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = ids_tensor((self.batch_size, 3), 2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["logits"]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[
"logits"
]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
# test no attention_mask works
outputs = model(input_ids, use_cache=True)
_, past_key_values = outputs.to_tuple()
output_from_no_past = model(next_input_ids)["logits"]
output_from_past = model(next_tokens, past_key_values=past_key_values)["logits"]
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
class BarkCoarseModelTester:
def __init__(
self,
parent,
batch_size=2,
seq_length=4,
is_training=False, # for now training is not supported
use_input_mask=True,
use_labels=True,
vocab_size=33,
output_vocab_size=33,
hidden_size=16,
num_hidden_layers=2,
num_attention_heads=2,
intermediate_size=15,
dropout=0.1,
window_size=256,
initializer_range=0.02,
n_codes_total=8, # for BarkFineModel
n_codes_given=1, # for BarkFineModel
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.output_vocab_size = output_vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.window_size = window_size
self.initializer_range = initializer_range
self.bos_token_id = output_vocab_size - 1
self.eos_token_id = output_vocab_size - 1
self.pad_token_id = output_vocab_size - 1
self.n_codes_total = n_codes_total
self.n_codes_given = n_codes_given
self.is_encoder_decoder = False
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
config = self.get_config()
head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2)
inputs_dict = {
"input_ids": input_ids,
"head_mask": head_mask,
"attention_mask": input_mask,
}
return config, inputs_dict
def get_config(self):
return BarkCoarseConfig(
vocab_size=self.vocab_size,
output_vocab_size=self.output_vocab_size,
hidden_size=self.hidden_size,
num_layers=self.num_hidden_layers,
num_heads=self.num_attention_heads,
use_cache=True,
bos_token_id=self.bos_token_id,
eos_token_id=self.eos_token_id,
pad_token_id=self.pad_token_id,
window_size=self.window_size,
)
def get_pipeline_config(self):
config = self.get_config()
config.vocab_size = 300
config.output_vocab_size = 300
return config
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict):
model = BarkCoarseModel(config=config).to(torch_device).eval()
input_ids = inputs_dict["input_ids"]
attention_mask = inputs_dict["attention_mask"]
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = ids_tensor((self.batch_size, 3), 2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["logits"]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[
"logits"
]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
# test no attention_mask works
outputs = model(input_ids, use_cache=True)
_, past_key_values = outputs.to_tuple()
output_from_no_past = model(next_input_ids)["logits"]
output_from_past = model(next_tokens, past_key_values=past_key_values)["logits"]
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
class BarkFineModelTester:
def __init__(
self,
parent,
batch_size=2,
seq_length=4,
is_training=False, # for now training is not supported
use_input_mask=True,
use_labels=True,
vocab_size=33,
output_vocab_size=33,
hidden_size=16,
num_hidden_layers=2,
num_attention_heads=2,
intermediate_size=15,
dropout=0.1,
window_size=256,
initializer_range=0.02,
n_codes_total=8, # for BarkFineModel
n_codes_given=1, # for BarkFineModel
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.output_vocab_size = output_vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.window_size = window_size
self.initializer_range = initializer_range
self.bos_token_id = output_vocab_size - 1
self.eos_token_id = output_vocab_size - 1
self.pad_token_id = output_vocab_size - 1
self.n_codes_total = n_codes_total
self.n_codes_given = n_codes_given
self.is_encoder_decoder = False
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length, self.n_codes_total], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
config = self.get_config()
head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2)
# randint between self.n_codes_given - 1 and self.n_codes_total - 1
codebook_idx = ids_tensor((1,), self.n_codes_total - self.n_codes_given).item() + self.n_codes_given
inputs_dict = {
"codebook_idx": codebook_idx,
"input_ids": input_ids,
"head_mask": head_mask,
"attention_mask": input_mask,
}
return config, inputs_dict
def get_config(self):
return BarkFineConfig(
vocab_size=self.vocab_size,
output_vocab_size=self.output_vocab_size,
hidden_size=self.hidden_size,
num_layers=self.num_hidden_layers,
num_heads=self.num_attention_heads,
use_cache=True,
bos_token_id=self.bos_token_id,
eos_token_id=self.eos_token_id,
pad_token_id=self.pad_token_id,
window_size=self.window_size,
)
def get_pipeline_config(self):
config = self.get_config()
config.vocab_size = 300
config.output_vocab_size = 300
return config
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict):
model = BarkFineModel(config=config).to(torch_device).eval()
input_ids = inputs_dict["input_ids"]
attention_mask = inputs_dict["attention_mask"]
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = ids_tensor((self.batch_size, 3), 2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["logits"]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[
"logits"
]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
# test no attention_mask works
outputs = model(input_ids, use_cache=True)
_, past_key_values = outputs.to_tuple()
output_from_no_past = model(next_input_ids)["logits"]
output_from_past = model(next_tokens, past_key_values=past_key_values)["logits"]
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
class BarkModelTester:
def __init__(
self,
parent,
semantic_kwargs=None,
coarse_acoustics_kwargs=None,
fine_acoustics_kwargs=None,
codec_kwargs=None,
is_training=False, # for now training is not supported
):
if semantic_kwargs is None:
semantic_kwargs = {}
if coarse_acoustics_kwargs is None:
coarse_acoustics_kwargs = {}
if fine_acoustics_kwargs is None:
fine_acoustics_kwargs = {}
if codec_kwargs is None:
codec_kwargs = {}
self.parent = parent
self.semantic_model_tester = BarkSemanticModelTester(parent, **semantic_kwargs)
self.coarse_acoustics_model_tester = BarkCoarseModelTester(parent, **coarse_acoustics_kwargs)
self.fine_acoustics_model_tester = BarkFineModelTester(parent, **fine_acoustics_kwargs)
self.codec_model_tester = EncodecModelTester(parent, **codec_kwargs)
self.is_training = is_training
def get_config(self):
return BarkConfig.from_sub_model_configs(
self.semantic_model_tester.get_config(),
self.coarse_acoustics_model_tester.get_config(),
self.fine_acoustics_model_tester.get_config(),
self.codec_model_tester.get_config(),
)
def get_pipeline_config(self):
config = self.get_config()
# follow the `get_pipeline_config` of the sub component models
config.semantic_config.vocab_size = 300
config.coarse_acoustics_config.vocab_size = 300
config.fine_acoustics_config.vocab_size = 300
config.semantic_config.output_vocab_size = 300
config.coarse_acoustics_config.output_vocab_size = 300
config.fine_acoustics_config.output_vocab_size = 300
return config
@require_torch
class BarkSemanticModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (BarkSemanticModel,) if is_torch_available() else ()
all_generative_model_classes = (BarkCausalModel,) if is_torch_available() else ()
is_encoder_decoder = False
fx_compatible = False
test_missing_keys = False
test_pruning = False
test_model_parallel = False
# no model_parallel for now
test_resize_embeddings = True
def setUp(self):
self.model_tester = BarkSemanticModelTester(self)
self.config_tester = ConfigTester(self, config_class=BarkSemanticConfig, n_embd=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_save_load_strict(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True)
self.assertEqual(info["missing_keys"], [])
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
input_ids = inputs["input_ids"]
del inputs["input_ids"]
wte = model.get_input_embeddings()
inputs["input_embeds"] = wte(input_ids)
with torch.no_grad():
model(**inputs)[0]
@require_torch_fp16
def test_generate_fp16(self):
config, input_dict = self.model_tester.prepare_config_and_inputs()
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
model = self.all_generative_model_classes[0](config).eval().to(torch_device)
model.half()
model.generate(input_ids, attention_mask=attention_mask)
model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3)
@require_torch
class BarkCoarseModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
# Same tester as BarkSemanticModelTest, except for model_class and config_class
all_model_classes = (BarkCoarseModel,) if is_torch_available() else ()
all_generative_model_classes = (BarkCausalModel,) if is_torch_available() else ()
is_encoder_decoder = False
fx_compatible = False
test_missing_keys = False
test_pruning = False
test_model_parallel = False
# no model_parallel for now
test_resize_embeddings = True
def setUp(self):
self.model_tester = BarkCoarseModelTester(self)
self.config_tester = ConfigTester(self, config_class=BarkCoarseConfig, n_embd=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_save_load_strict(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True)
self.assertEqual(info["missing_keys"], [])
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
input_ids = inputs["input_ids"]
del inputs["input_ids"]
wte = model.get_input_embeddings()
inputs["input_embeds"] = wte(input_ids)
with torch.no_grad():
model(**inputs)[0]
@require_torch_fp16
def test_generate_fp16(self):
config, input_dict = self.model_tester.prepare_config_and_inputs()
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
model = self.all_generative_model_classes[0](config).eval().to(torch_device)
model.half()
model.generate(input_ids, attention_mask=attention_mask)
model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3)
@require_torch
class BarkFineModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (BarkFineModel,) if is_torch_available() else ()
is_encoder_decoder = False
fx_compatible = False
test_missing_keys = False
test_pruning = False
# no model_parallel for now
test_model_parallel = False
# torchscript disabled for now because forward with an int
test_torchscript = False
test_resize_embeddings = True
def setUp(self):
self.model_tester = BarkFineModelTester(self)
self.config_tester = ConfigTester(self, config_class=BarkFineConfig, n_embd=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_save_load_strict(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True)
self.assertEqual(info["missing_keys"], [])
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
input_ids = inputs["input_ids"]
del inputs["input_ids"]
wte = model.get_input_embeddings()[inputs_dict["codebook_idx"]]
inputs["input_embeds"] = wte(input_ids[:, :, inputs_dict["codebook_idx"]])
with torch.no_grad():
model(**inputs)[0]
@require_torch_fp16
def test_generate_fp16(self):
config, input_dict = self.model_tester.prepare_config_and_inputs()
input_ids = input_dict["input_ids"]
# take first codebook channel
model = self.all_model_classes[0](config).eval().to(torch_device)
model.half()
# toy generation_configs
semantic_generation_config = BarkSemanticGenerationConfig(semantic_vocab_size=0)
coarse_generation_config = BarkCoarseGenerationConfig(n_coarse_codebooks=config.n_codes_given)
fine_generation_config = BarkFineGenerationConfig(
max_fine_history_length=config.block_size // 2,
max_fine_input_length=config.block_size,
n_fine_codebooks=config.n_codes_total,
)
codebook_size = config.vocab_size - 1
model.generate(
input_ids,
history_prompt=None,
temperature=None,
semantic_generation_config=semantic_generation_config,
coarse_generation_config=coarse_generation_config,
fine_generation_config=fine_generation_config,
codebook_size=codebook_size,
)
model.generate(
input_ids,
history_prompt=None,
temperature=0.7,
semantic_generation_config=semantic_generation_config,
coarse_generation_config=coarse_generation_config,
fine_generation_config=fine_generation_config,
codebook_size=codebook_size,
)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["codebook_idx", "input_ids"]
self.assertListEqual(arg_names[:2], expected_arg_names)
def test_model_common_attributes(self):
# one embedding layer per codebook
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings()[0], (torch.nn.Embedding))
model.set_input_embeddings(
torch.nn.ModuleList([torch.nn.Embedding(10, 10) for _ in range(config.n_codes_total)])
)
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x[0], torch.nn.Linear))
def test_resize_tokens_embeddings(self):
# resizing tokens_embeddings of a ModuleList
original_config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.test_resize_embeddings:
return
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
model = model_class(config)
model.to(torch_device)
if self.model_tester.is_training is False:
model.eval()
model_vocab_size = config.vocab_size
# Retrieve the embeddings and clone theme
model_embed_list = model.resize_token_embeddings(model_vocab_size)
cloned_embeddings_list = [model_embed.weight.clone() for model_embed in model_embed_list]
# Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
model_embed_list = model.resize_token_embeddings(model_vocab_size + 10)
self.assertEqual(model.config.vocab_size, model_vocab_size + 10)
# Check that it actually resizes the embeddings matrix for each codebook
for model_embed, cloned_embeddings in zip(model_embed_list, cloned_embeddings_list):
self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
model_embed_list = model.resize_token_embeddings(model_vocab_size - 15)
self.assertEqual(model.config.vocab_size, model_vocab_size - 15)
for model_embed, cloned_embeddings in zip(model_embed_list, cloned_embeddings_list):
self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
# Input ids should be clamped to the maximum size of the vocabulary
inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 1)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that adding and removing tokens has not modified the first part of the embedding matrix.
# only check for the first embedding matrix
models_equal = True
for p1, p2 in zip(cloned_embeddings_list[0], model_embed_list[0].weight):
if p1.data.ne(p2.data).sum() > 0:
models_equal = False
self.assertTrue(models_equal)
def test_resize_embeddings_untied(self):
# resizing tokens_embeddings of a ModuleList
original_config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.test_resize_embeddings:
return
original_config.tie_word_embeddings = False
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
model = model_class(config).to(torch_device)
# if no output embeddings -> leave test
if model.get_output_embeddings() is None:
continue
# Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
model_vocab_size = config.vocab_size
model.resize_token_embeddings(model_vocab_size + 10)
self.assertEqual(model.config.vocab_size, model_vocab_size + 10)
output_embeds_list = model.get_output_embeddings()
for output_embeds in output_embeds_list:
self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10)
# Check bias if present
if output_embeds.bias is not None:
self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
model.resize_token_embeddings(model_vocab_size - 15)
self.assertEqual(model.config.vocab_size, model_vocab_size - 15)
# Check that it actually resizes the embeddings matrix
output_embeds_list = model.get_output_embeddings()
for output_embeds in output_embeds_list:
self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15)
# Check bias if present
if output_embeds.bias is not None:
self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
# Input ids should be clamped to the maximum size of the vocabulary
inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 1)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
@require_flash_attn
@require_torch_gpu
@pytest.mark.flash_attn_test
@slow
def test_flash_attn_2_inference(self):
for model_class in self.all_model_classes:
if not model_class._supports_flash_attn_2:
return
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model_fa = model_class.from_pretrained(
tmpdirname, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2"
)
model_fa.to(torch_device)
model = model_class.from_pretrained(tmpdirname, torch_dtype=torch.bfloat16)
model.to(torch_device)
dummy_input = inputs_dict["input_ids"][:1]
if dummy_input.dtype in [torch.float32, torch.float16]:
dummy_input = dummy_input.to(torch.bfloat16)
dummy_attention_mask = inputs_dict.get("attention_mask", None)
if dummy_attention_mask is not None:
dummy_attention_mask = dummy_attention_mask[:1]
dummy_attention_mask[:, 1:] = 1
dummy_attention_mask[:, :1] = 0
outputs = model(inputs_dict["codebook_idx"], dummy_input, output_hidden_states=True)
outputs_fa = model_fa(inputs_dict["codebook_idx"], dummy_input, output_hidden_states=True)
logits = outputs.hidden_states[-1]
logits_fa = outputs_fa.hidden_states[-1]
assert torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2)
other_inputs = {"output_hidden_states": True}
if dummy_attention_mask is not None:
other_inputs["attention_mask"] = dummy_attention_mask
outputs = model(inputs_dict["codebook_idx"], dummy_input, **other_inputs)
outputs_fa = model_fa(inputs_dict["codebook_idx"], dummy_input, **other_inputs)
logits = outputs.hidden_states[-1]
logits_fa = outputs_fa.hidden_states[-1]
assert torch.allclose(logits_fa[1:], logits[1:], atol=4e-2, rtol=4e-2)
# check with inference + dropout
model.train()
_ = model_fa(inputs_dict["codebook_idx"], dummy_input, **other_inputs)
@require_flash_attn
@require_torch_gpu
@pytest.mark.flash_attn_test
@slow
def test_flash_attn_2_inference_padding_right(self):
for model_class in self.all_model_classes:
if not model_class._supports_flash_attn_2:
return
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model_fa = model_class.from_pretrained(
tmpdirname, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2"
)
model_fa.to(torch_device)
model = model_class.from_pretrained(
tmpdirname,
torch_dtype=torch.bfloat16,
)
model.to(torch_device)
dummy_input = inputs_dict["input_ids"][:1]
if dummy_input.dtype in [torch.float32, torch.float16]:
dummy_input = dummy_input.to(torch.bfloat16)
dummy_attention_mask = inputs_dict.get("attention_mask", None)
if dummy_attention_mask is not None:
dummy_attention_mask = dummy_attention_mask[:1]
dummy_attention_mask[:, :-1] = 1
dummy_attention_mask[:, -1:] = 0
outputs = model(inputs_dict["codebook_idx"], dummy_input, output_hidden_states=True)
outputs_fa = model_fa(inputs_dict["codebook_idx"], dummy_input, output_hidden_states=True)
logits = outputs.hidden_states[-1]
logits_fa = outputs_fa.hidden_states[-1]
assert torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2)
other_inputs = {
"output_hidden_states": True,
}
if dummy_attention_mask is not None:
other_inputs["attention_mask"] = dummy_attention_mask
outputs = model(inputs_dict["codebook_idx"], dummy_input, **other_inputs)
outputs_fa = model_fa(inputs_dict["codebook_idx"], dummy_input, **other_inputs)
logits = outputs.hidden_states[-1]
logits_fa = outputs_fa.hidden_states[-1]
assert torch.allclose(logits_fa[:-1], logits[:-1], atol=4e-2, rtol=4e-2)
@require_torch
class BarkModelIntegrationTests(unittest.TestCase):
@cached_property
def model(self):
return BarkModel.from_pretrained("suno/bark").to(torch_device)
@cached_property
def processor(self):
return BarkProcessor.from_pretrained("suno/bark")
@cached_property
def inputs(self):
input_ids = self.processor("In the light of the moon, a little egg lay on a leaf", voice_preset="en_speaker_6")
input_ids = input_ids.to(torch_device)
return input_ids
@cached_property
def semantic_generation_config(self):
semantic_generation_config = BarkSemanticGenerationConfig(**self.model.generation_config.semantic_config)
return semantic_generation_config
@cached_property
def coarse_generation_config(self):
coarse_generation_config = BarkCoarseGenerationConfig(**self.model.generation_config.coarse_acoustics_config)
return coarse_generation_config
@cached_property
def fine_generation_config(self):
fine_generation_config = BarkFineGenerationConfig(**self.model.generation_config.fine_acoustics_config)
return fine_generation_config
@slow
def test_generate_semantic(self):
input_ids = self.inputs
# check first ids
expected_output_ids = [7363, 321, 41, 1461, 6915, 952, 326, 41, 41, 927,] # fmt: skip
# greedy decoding
with torch.no_grad():
output_ids = self.model.semantic.generate(
**input_ids,
do_sample=False,
temperature=1.0,
semantic_generation_config=self.semantic_generation_config,
)
self.assertListEqual(output_ids[0, : len(expected_output_ids)].tolist(), expected_output_ids)
@slow
def test_generate_semantic_early_stop(self):
input_ids = self.inputs
min_eos_p = 0.01
# check first ids
expected_output_ids = [7363, 321, 41, 1461, 6915, 952, 326, 41, 41, 927,] # fmt: skip
# Should be able to read min_eos_p from kwargs
with torch.no_grad():
torch.manual_seed(0)
output_ids_without_min_eos_p = self.model.semantic.generate(
**input_ids,
do_sample=False,
temperature=0.9,
semantic_generation_config=self.semantic_generation_config,
)
torch.manual_seed(0)
output_ids_kwargs = self.model.semantic.generate(
**input_ids,
do_sample=False,
temperature=0.9,
semantic_generation_config=self.semantic_generation_config,
min_eos_p=min_eos_p,
)
self.assertListEqual(output_ids_without_min_eos_p[0, : len(expected_output_ids)].tolist(), expected_output_ids)
self.assertLess(len(output_ids_kwargs[0, :].tolist()), len(output_ids_without_min_eos_p[0, :].tolist()))
# Should be able to read min_eos_p from the semantic generation config
self.semantic_generation_config.min_eos_p = min_eos_p
with torch.no_grad():
torch.manual_seed(0)
output_ids = self.model.semantic.generate(
**input_ids,
do_sample=False,
temperature=0.9,
semantic_generation_config=self.semantic_generation_config,
)
self.assertEqual(output_ids.shape, output_ids_kwargs.shape)
self.assertLess(len(output_ids[0, :].tolist()), len(output_ids_without_min_eos_p[0, :].tolist()))
self.assertListEqual(output_ids[0, : len(expected_output_ids)].tolist(), expected_output_ids)
@slow
def test_generate_coarse(self):
input_ids = self.inputs
history_prompt = input_ids["history_prompt"]
# check first ids
expected_output_ids = [11018, 11391, 10651, 11418, 10857, 11620, 10642, 11366, 10312, 11528, 10531, 11516, 10474, 11051, 10524, 11051, ] # fmt: skip
with torch.no_grad():
output_ids = self.model.semantic.generate(
**input_ids,
do_sample=False,
temperature=1.0,
semantic_generation_config=self.semantic_generation_config,
)
output_ids = self.model.coarse_acoustics.generate(
output_ids,
history_prompt=history_prompt,
do_sample=False,
temperature=1.0,
semantic_generation_config=self.semantic_generation_config,
coarse_generation_config=self.coarse_generation_config,
codebook_size=self.model.generation_config.codebook_size,
)
self.assertListEqual(output_ids[0, : len(expected_output_ids)].tolist(), expected_output_ids)
@slow
def test_generate_fine(self):
input_ids = self.inputs
history_prompt = input_ids["history_prompt"]
# fmt: off
expected_output_ids = [
[1018, 651, 857, 642, 312, 531, 474, 524, 524, 776,],
[367, 394, 596, 342, 504, 492, 27, 27, 822, 822,],
[961, 955, 221, 955, 955, 686, 939, 939, 479, 176,],
[638, 365, 218, 944, 853, 363, 639, 22, 884, 456,],
[302, 912, 524, 38, 174, 209, 879, 23, 910, 227,],
[440, 673, 861, 666, 372, 558, 49, 172, 232, 342,],
[244, 358, 123, 356, 586, 520, 499, 877, 542, 637,],
[806, 685, 905, 848, 803, 810, 921, 208, 625, 203,],
]
# fmt: on
with torch.no_grad():
output_ids = self.model.semantic.generate(
**input_ids,
do_sample=False,
temperature=1.0,
semantic_generation_config=self.semantic_generation_config,
)
output_ids = self.model.coarse_acoustics.generate(
output_ids,
history_prompt=history_prompt,
do_sample=False,
temperature=1.0,
semantic_generation_config=self.semantic_generation_config,
coarse_generation_config=self.coarse_generation_config,
codebook_size=self.model.generation_config.codebook_size,
)
# greedy decoding
output_ids = self.model.fine_acoustics.generate(
output_ids,
history_prompt=history_prompt,
temperature=None,
semantic_generation_config=self.semantic_generation_config,
coarse_generation_config=self.coarse_generation_config,
fine_generation_config=self.fine_generation_config,
codebook_size=self.model.generation_config.codebook_size,
)
self.assertListEqual(output_ids[0, :, : len(expected_output_ids[0])].tolist(), expected_output_ids)
@slow
def test_generate_end_to_end(self):
input_ids = self.inputs
with torch.no_grad():
self.model.generate(**input_ids)
self.model.generate(**{key: val for (key, val) in input_ids.items() if key != "history_prompt"})
@slow
def test_generate_end_to_end_with_args(self):
input_ids = self.inputs
with torch.no_grad():
self.model.generate(**input_ids, do_sample=True, temperature=0.6, penalty_alpha=0.6)
self.model.generate(**input_ids, do_sample=True, temperature=0.6, num_beams=4)
@slow
def test_generate_batching(self):
args = {"do_sample": False, "temperature": None}
s1 = "I love HuggingFace"
s2 = "In the light of the moon, a little egg lay on a leaf"
voice_preset = "en_speaker_6"
input_ids = self.processor([s1, s2], voice_preset=voice_preset).to(torch_device)
# generate in batch
outputs, audio_lengths = self.model.generate(**input_ids, **args, return_output_lengths=True)
# generate one-by-one
s1 = self.processor(s1, voice_preset=voice_preset).to(torch_device)
s2 = self.processor(s2, voice_preset=voice_preset).to(torch_device)
output1 = self.model.generate(**s1, **args)
output2 = self.model.generate(**s2, **args)
# up until the coarse acoustic model (included), results are the same
# the fine acoustic model introduces small differences
# first verify if same length (should be the same because it's decided in the coarse model)
self.assertEqual(tuple(audio_lengths), (output1.shape[1], output2.shape[1]))
# then assert almost equal
self.assertTrue(torch.allclose(outputs[0, : audio_lengths[0]], output1.squeeze(), atol=2e-3))
self.assertTrue(torch.allclose(outputs[1, : audio_lengths[1]], output2.squeeze(), atol=2e-3))
# now test single input with return_output_lengths = True
outputs, _ = self.model.generate(**s1, **args, return_output_lengths=True)
self.assertTrue((outputs == output1).all().item())
@slow
def test_generate_end_to_end_with_sub_models_args(self):
input_ids = self.inputs
with torch.no_grad():
torch.manual_seed(0)
self.model.generate(
**input_ids, do_sample=False, temperature=1.0, coarse_do_sample=True, coarse_temperature=0.7
)
output_ids_without_min_eos_p = self.model.generate(
**input_ids,
do_sample=True,
temperature=0.9,
coarse_do_sample=True,
coarse_temperature=0.7,
fine_temperature=0.3,
)
output_ids_with_min_eos_p = self.model.generate(
**input_ids,
do_sample=True,
temperature=0.9,
coarse_temperature=0.7,
fine_temperature=0.3,
min_eos_p=0.1,
)
self.assertLess(
len(output_ids_with_min_eos_p[0, :].tolist()), len(output_ids_without_min_eos_p[0, :].tolist())
)
@require_torch_gpu
@slow
def test_generate_end_to_end_with_offload(self):
input_ids = self.inputs
with torch.no_grad():
# standard generation
output_with_no_offload = self.model.generate(**input_ids, do_sample=False, temperature=1.0)
torch.cuda.empty_cache()
memory_before_offload = torch.cuda.memory_allocated()
model_memory_footprint = self.model.get_memory_footprint()
# activate cpu offload
self.model.enable_cpu_offload()
memory_after_offload = torch.cuda.memory_allocated()
# checks if the model have been offloaded
# CUDA memory usage after offload should be near 0, leaving room to small differences
room_for_difference = 1.1
self.assertGreater(
(memory_before_offload - model_memory_footprint) * room_for_difference, memory_after_offload
)
# checks if device is the correct one
self.assertEqual(self.model.device.type, torch_device)
# checks if hooks exist
self.assertTrue(hasattr(self.model.semantic, "_hf_hook"))
# output with cpu offload
output_with_offload = self.model.generate(**input_ids, do_sample=False, temperature=1.0)
# checks if same output
self.assertListEqual(output_with_no_offload.tolist(), output_with_offload.tolist())
| 0 |
hf_public_repos/transformers/tests/models | hf_public_repos/transformers/tests/models/convnextv2/test_modeling_convnextv2.py | # coding=utf-8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch ConvNextV2 model. """
import unittest
from transformers import ConvNextV2Config
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextV2Backbone, ConvNextV2ForImageClassification, ConvNextV2Model
from transformers.models.convnextv2.modeling_convnextv2 import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class ConvNextV2ModelTester:
def __init__(
self,
parent,
batch_size=13,
image_size=32,
num_channels=3,
num_stages=4,
hidden_sizes=[10, 20, 30, 40],
depths=[2, 2, 3, 2],
is_training=True,
use_labels=True,
intermediate_size=37,
hidden_act="gelu",
num_labels=10,
initializer_range=0.02,
out_features=["stage2", "stage3", "stage4"],
out_indices=[2, 3, 4],
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.num_channels = num_channels
self.num_stages = num_stages
self.hidden_sizes = hidden_sizes
self.depths = depths
self.is_training = is_training
self.use_labels = use_labels
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.num_labels = num_labels
self.initializer_range = initializer_range
self.out_features = out_features
self.out_indices = out_indices
self.scope = scope
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.num_labels)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return ConvNextV2Config(
num_channels=self.num_channels,
hidden_sizes=self.hidden_sizes,
depths=self.depths,
num_stages=self.num_stages,
hidden_act=self.hidden_act,
is_decoder=False,
initializer_range=self.initializer_range,
out_features=self.out_features,
out_indices=self.out_indices,
num_labels=self.num_labels,
)
def create_and_check_model(self, config, pixel_values, labels):
model = ConvNextV2Model(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape,
(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32),
)
def create_and_check_for_image_classification(self, config, pixel_values, labels):
model = ConvNextV2ForImageClassification(config)
model.to(torch_device)
model.eval()
result = model(pixel_values, labels=labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_backbone(self, config, pixel_values, labels):
model = ConvNextV2Backbone(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# verify hidden states
self.parent.assertEqual(len(result.feature_maps), len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels), len(config.out_features))
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:])
# verify backbone works with out_features=None
config.out_features = None
model = ConvNextV2Backbone(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps), 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels), 1)
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]])
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, labels = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
def prepare_config_and_inputs_with_labels(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, labels = config_and_inputs
inputs_dict = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class ConvNextV2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as ConvNextV2 does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (
(
ConvNextV2Model,
ConvNextV2ForImageClassification,
ConvNextV2Backbone,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{"feature-extraction": ConvNextV2Model, "image-classification": ConvNextV2ForImageClassification}
if is_torch_available()
else {}
)
fx_compatible = False
test_pruning = False
test_resize_embeddings = False
test_head_masking = False
has_attentions = False
def setUp(self):
self.model_tester = ConvNextV2ModelTester(self)
self.config_tester = ConfigTester(self, config_class=ConvNextV2Config, has_text_modality=False, hidden_size=37)
def test_config(self):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def create_and_test_config_common_properties(self):
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings")
def test_model_common_attributes(self):
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking")
def test_feed_forward_chunking(self):
pass
def test_training(self):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_with_labels()
config.return_dict = True
if model_class.__name__ in [
*get_values(MODEL_MAPPING_NAMES),
*get_values(MODEL_FOR_BACKBONE_MAPPING_NAMES),
]:
continue
model = model_class(config)
model.to(torch_device)
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
loss = model(**inputs).loss
loss.backward()
def test_training_gradient_checkpointing(self):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_with_labels()
config.use_cache = False
config.return_dict = True
if (
model_class.__name__
in [*get_values(MODEL_MAPPING_NAMES), *get_values(MODEL_FOR_BACKBONE_MAPPING_NAMES)]
or not model_class.supports_gradient_checkpointing
):
continue
model = model_class(config)
model.to(torch_device)
model.gradient_checkpointing_enable()
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
loss = model(**inputs).loss
loss.backward()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_stages = self.model_tester.num_stages
self.assertEqual(len(hidden_states), expected_num_stages + 1)
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[self.model_tester.image_size // 4, self.model_tester.image_size // 4],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_for_image_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = ConvNextV2Model.from_pretrained(model_name)
self.assertIsNotNone(model)
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
class ConvNextV2ModelIntegrationTest(unittest.TestCase):
@cached_property
def default_image_processor(self):
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224") if is_vision_available() else None
@slow
def test_inference_image_classification_head(self):
model = ConvNextV2ForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224").to(torch_device)
preprocessor = self.default_image_processor
image = prepare_img()
inputs = preprocessor(images=image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
expected_shape = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([0.9996, 0.1966, -0.4386]).to(torch_device)
self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
| 0 |
hf_public_repos/transformers/tests/models | hf_public_repos/transformers/tests/models/convnextv2/test_modeling_tf_convnextv2.py | # coding=utf-8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the TensorFlow ConvNext model. """
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
import numpy as np
from transformers import ConvNextV2Config
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFConvNextV2ForImageClassification, TFConvNextV2Model
if is_vision_available():
from PIL import Image
from transformers import ConvNextImageProcessor
class TFConvNextV2ModelTester:
def __init__(
self,
parent,
batch_size=13,
image_size=32,
num_channels=3,
num_stages=4,
hidden_sizes=[10, 20, 30, 40],
depths=[2, 2, 3, 2],
is_training=True,
use_labels=True,
intermediate_size=37,
hidden_act="gelu",
type_sequence_label_size=10,
initializer_range=0.02,
num_labels=3,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.num_channels = num_channels
self.num_stages = num_stages
self.hidden_sizes = hidden_sizes
self.depths = depths
self.is_training = is_training
self.use_labels = use_labels
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.scope = scope
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return ConvNextV2Config(
num_channels=self.num_channels,
hidden_sizes=self.hidden_sizes,
depths=self.depths,
num_stages=self.num_stages,
hidden_act=self.hidden_act,
is_decoder=False,
initializer_range=self.initializer_range,
)
def create_and_check_model(self, config, pixel_values, labels):
model = TFConvNextV2Model(config=config)
result = model(pixel_values, training=False)
# expected last hidden states: batch_size, channels, height // 32, width // 32
self.parent.assertEqual(
result.last_hidden_state.shape,
(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32),
)
def create_and_check_for_image_classification(self, config, pixel_values, labels):
config.num_labels = self.type_sequence_label_size
model = TFConvNextV2ForImageClassification(config)
result = model(pixel_values, labels=labels, training=False)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, labels = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class TFConvNextV2ModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as ConvNext does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (TFConvNextV2Model, TFConvNextV2ForImageClassification) if is_tf_available() else ()
pipeline_model_mapping = (
{"feature-extraction": TFConvNextV2Model, "image-classification": TFConvNextV2ForImageClassification}
if is_tf_available()
else {}
)
test_pruning = False
test_onnx = False
test_resize_embeddings = False
test_head_masking = False
has_attentions = False
def setUp(self):
self.model_tester = TFConvNextV2ModelTester(self)
self.config_tester = ConfigTester(
self,
config_class=ConvNextV2Config,
has_text_modality=False,
hidden_size=37,
)
@unittest.skip(reason="ConvNext does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0,
reason="TF does not support backprop for grouped convolutions on CPU.",
)
@slow
def test_keras_fit(self):
super().test_keras_fit()
@unittest.skip(reason="ConvNext does not support input and output embeddings")
def test_model_common_attributes(self):
pass
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0,
reason="TF does not support backprop for grouped convolutions on CPU.",
)
def test_dataset_conversion(self):
super().test_dataset_conversion()
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_stages = self.model_tester.num_stages
self.assertEqual(len(hidden_states), expected_num_stages + 1)
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[self.model_tester.image_size // 4, self.model_tester.image_size // 4],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
# Since ConvNext does not have any attention we need to rewrite this test.
def test_model_outputs_equivalence(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
tuple_output = model(tuple_inputs, return_dict=False, **additional_kwargs)
dict_output = model(dict_inputs, return_dict=True, **additional_kwargs).to_tuple()
def recursive_check(tuple_object, dict_object):
if isinstance(tuple_object, (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(tuple_object, dict_object)),
msg=(
"Tuple and dict output are not equal. Difference:"
f" {tf.math.reduce_max(tf.abs(tuple_object - dict_object))}"
),
)
recursive_check(tuple_output, dict_output)
for model_class in self.all_model_classes:
model = model_class(config)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
def test_for_image_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
model = TFConvNextV2Model.from_pretrained("facebook/convnextv2-tiny-1k-224")
self.assertIsNotNone(model)
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_tf
@require_vision
class TFConvNextV2ModelIntegrationTest(unittest.TestCase):
@cached_property
def default_image_processor(self):
return (
ConvNextImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224")
if is_vision_available()
else None
)
@slow
def test_inference_image_classification_head(self):
model = TFConvNextV2ForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224")
image_processor = self.default_image_processor
image = prepare_img()
inputs = image_processor(images=image, return_tensors="tf")
# forward pass
outputs = model(**inputs)
# verify the logits
expected_shape = tf.TensorShape((1, 1000))
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = np.array([0.9996, 0.1966, -0.4386])
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy(), expected_slice, atol=1e-4))
| 0 |
hf_public_repos/transformers/tests/models | hf_public_repos/transformers/tests/models/graphormer/test_modeling_graphormer.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch Graphormer model. """
import copy
import inspect
import os
import tempfile
import unittest
from transformers import GraphormerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import tensor
from transformers import GraphormerForGraphClassification, GraphormerModel
from transformers.models.graphormer.modeling_graphormer import GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class GraphormerModelTester:
def __init__(
self,
parent,
num_classes=1,
num_atoms=32 * 9,
num_edges=32 * 3,
num_in_degree=32,
num_out_degree=32,
num_spatial=32,
num_edge_dis=16,
multi_hop_max_dist=5, # sometimes is 20
spatial_pos_max=32,
edge_type="multi_hop",
init_fn=None,
max_nodes=32,
share_input_output_embed=False,
num_hidden_layers=2,
embedding_dim=32,
ffn_embedding_dim=32,
num_attention_heads=4,
dropout=0.1,
attention_dropout=0.1,
activation_dropout=0.1,
layerdrop=0.0,
encoder_normalize_before=False,
pre_layernorm=False,
apply_graphormer_init=False,
activation_fn="gelu",
embed_scale=None,
freeze_embeddings=False,
num_trans_layers_to_freeze=0,
traceable=False,
q_noise=0.0,
qn_block_size=8,
kdim=None,
vdim=None,
bias=True,
self_attention=True,
batch_size=10,
graph_size=20,
is_training=True,
):
self.parent = parent
self.num_classes = num_classes
self.num_labels = num_classes
self.num_atoms = num_atoms
self.num_in_degree = num_in_degree
self.num_out_degree = num_out_degree
self.num_edges = num_edges
self.num_spatial = num_spatial
self.num_edge_dis = num_edge_dis
self.edge_type = edge_type
self.multi_hop_max_dist = multi_hop_max_dist
self.spatial_pos_max = spatial_pos_max
self.max_nodes = max_nodes
self.num_hidden_layers = num_hidden_layers
self.embedding_dim = embedding_dim
self.hidden_size = embedding_dim
self.ffn_embedding_dim = ffn_embedding_dim
self.num_attention_heads = num_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.layerdrop = layerdrop
self.encoder_normalize_before = encoder_normalize_before
self.pre_layernorm = pre_layernorm
self.apply_graphormer_init = apply_graphormer_init
self.activation_fn = activation_fn
self.embed_scale = embed_scale
self.freeze_embeddings = freeze_embeddings
self.num_trans_layers_to_freeze = num_trans_layers_to_freeze
self.share_input_output_embed = share_input_output_embed
self.traceable = traceable
self.q_noise = q_noise
self.qn_block_size = qn_block_size
self.init_fn = init_fn
self.kdim = kdim
self.vdim = vdim
self.self_attention = self_attention
self.bias = bias
self.batch_size = batch_size
self.graph_size = graph_size
self.is_training = is_training
def prepare_config_and_inputs(self):
attn_bias = ids_tensor(
[self.batch_size, self.graph_size + 1, self.graph_size + 1], self.num_atoms
) # Def not sure here
attn_edge_type = ids_tensor([self.batch_size, self.graph_size, self.graph_size, 1], self.num_edges)
spatial_pos = ids_tensor([self.batch_size, self.graph_size, self.graph_size], self.num_spatial)
in_degree = ids_tensor([self.batch_size, self.graph_size], self.num_in_degree)
out_degree = ids_tensor([self.batch_size, self.graph_size], self.num_out_degree)
input_nodes = ids_tensor([self.batch_size, self.graph_size, 1], self.num_atoms)
input_edges = ids_tensor(
[self.batch_size, self.graph_size, self.graph_size, self.multi_hop_max_dist, 1], self.num_edges
)
labels = ids_tensor([self.batch_size], self.num_classes)
config = self.get_config()
return config, attn_bias, attn_edge_type, spatial_pos, in_degree, out_degree, input_nodes, input_edges, labels
def get_config(self):
return GraphormerConfig(
num_atoms=self.num_atoms,
num_in_degree=self.num_in_degree,
num_out_degree=self.num_out_degree,
num_edges=self.num_edges,
num_spatial=self.num_spatial,
num_edge_dis=self.num_edge_dis,
edge_type=self.edge_type,
multi_hop_max_dist=self.multi_hop_max_dist,
spatial_pos_max=self.spatial_pos_max,
max_nodes=self.max_nodes,
num_hidden_layers=self.num_hidden_layers,
embedding_dim=self.embedding_dim,
hidden_size=self.embedding_dim,
ffn_embedding_dim=self.ffn_embedding_dim,
num_attention_heads=self.num_attention_heads,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
activation_dropout=self.activation_dropout,
layerdrop=self.layerdrop,
encoder_normalize_before=self.encoder_normalize_before,
pre_layernorm=self.pre_layernorm,
apply_graphormer_init=self.apply_graphormer_init,
activation_fn=self.activation_fn,
embed_scale=self.embed_scale,
freeze_embeddings=self.freeze_embeddings,
num_trans_layers_to_freeze=self.num_trans_layers_to_freeze,
share_input_output_embed=self.share_input_output_embed,
traceable=self.traceable,
q_noise=self.q_noise,
qn_block_size=self.qn_block_size,
init_fn=self.init_fn,
kdim=self.kdim,
vdim=self.vdim,
self_attention=self.self_attention,
bias=self.bias,
)
def create_and_check_model(
self, config, attn_bias, attn_edge_type, spatial_pos, in_degree, out_degree, input_nodes, input_edges, labels
):
model = GraphormerModel(config=config)
model.to(torch_device)
model.eval()
result = model(
input_nodes=input_nodes,
attn_bias=attn_bias,
in_degree=in_degree,
out_degree=out_degree,
spatial_pos=spatial_pos,
input_edges=input_edges,
attn_edge_type=attn_edge_type,
labels=labels,
)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.graph_size + 1, self.hidden_size)
)
def create_and_check_for_graph_classification(
self, config, attn_bias, attn_edge_type, spatial_pos, in_degree, out_degree, input_nodes, input_edges, labels
):
model = GraphormerForGraphClassification(config)
model.to(torch_device)
model.eval()
result = model(
input_nodes=input_nodes,
attn_bias=attn_bias,
in_degree=in_degree,
out_degree=out_degree,
spatial_pos=spatial_pos,
input_edges=input_edges,
attn_edge_type=attn_edge_type,
labels=labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
attn_bias,
attn_edge_type,
spatial_pos,
in_degree,
out_degree,
input_nodes,
input_edges,
labels,
) = config_and_inputs
inputs_dict = {
"attn_bias": attn_bias,
"attn_edge_type": attn_edge_type,
"spatial_pos": spatial_pos,
"in_degree": in_degree,
"out_degree": out_degree,
"input_nodes": input_nodes,
"input_edges": input_edges,
"labels": labels,
}
return config, inputs_dict
@require_torch
class GraphormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (GraphormerForGraphClassification, GraphormerModel) if is_torch_available() else ()
all_generative_model_classes = ()
pipeline_model_mapping = {"feature-extraction": GraphormerModel} if is_torch_available() else {}
test_pruning = False
test_head_masking = False
test_resize_embeddings = False
main_input_name_nodes = "input_nodes"
main_input_name_edges = "input_edges"
has_attentions = False # does not output attention
def setUp(self):
self.model_tester = GraphormerModelTester(self)
self.config_tester = ConfigTester(self, config_class=GraphormerConfig, has_text_modality=False)
# overwrite from common as `Graphormer` requires more input arguments
def _create_and_check_torchscript(self, config, inputs_dict):
if not self.test_torchscript:
return
configs_no_init = _config_zero_init(config) # To be sure we have no Nan
configs_no_init.torchscript = True
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
try:
required_keys = (
"input_nodes",
"input_edges",
"attn_bias",
"in_degree",
"out_degree",
"spatial_pos",
"attn_edge_type",
)
required_inputs = tuple(inputs[k] for k in required_keys)
model(*required_inputs)
traced_model = torch.jit.trace(model, required_inputs)
except RuntimeError:
self.fail("Couldn't trace module.")
with tempfile.TemporaryDirectory() as tmp_dir_name:
pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt")
try:
torch.jit.save(traced_model, pt_file_name)
except Exception:
self.fail("Couldn't save module.")
try:
loaded_model = torch.jit.load(pt_file_name)
except Exception:
self.fail("Couldn't load module.")
model.to(torch_device)
model.eval()
loaded_model.to(torch_device)
loaded_model.eval()
model_state_dict = model.state_dict()
loaded_model_state_dict = loaded_model.state_dict()
non_persistent_buffers = {}
for key in loaded_model_state_dict.keys():
if key not in model_state_dict.keys():
non_persistent_buffers[key] = loaded_model_state_dict[key]
loaded_model_state_dict = {
key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers
}
self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys()))
model_buffers = list(model.buffers())
for non_persistent_buffer in non_persistent_buffers.values():
found_buffer = False
for i, model_buffer in enumerate(model_buffers):
if torch.equal(non_persistent_buffer, model_buffer):
found_buffer = True
break
self.assertTrue(found_buffer)
model_buffers.pop(i)
model_buffers = list(model.buffers())
for non_persistent_buffer in non_persistent_buffers.values():
found_buffer = False
for i, model_buffer in enumerate(model_buffers):
if torch.equal(non_persistent_buffer, model_buffer):
found_buffer = True
break
self.assertTrue(found_buffer)
model_buffers.pop(i)
models_equal = True
for layer_name, p1 in model_state_dict.items():
if layer_name in loaded_model_state_dict:
p2 = loaded_model_state_dict[layer_name]
if p1.data.ne(p2.data).sum() > 0:
models_equal = False
self.assertTrue(models_equal)
# Avoid memory leak. Without this, each call increase RAM usage by ~20MB.
# (Even with this call, there are still memory leak by ~0.04MB)
self.clear_torch_jit_class_registry()
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="Graphormer does not use one single inputs_embedding but three")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Graphormer does not implement feed forward chunking")
def test_feed_forward_chunking(self):
pass
@unittest.skip(reason="Graphormer does not share input and output embeddings")
def test_model_common_attributes(self):
pass
def test_initialization(self):
def _config_zero_init(config):
configs_no_init = copy.deepcopy(config)
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(configs_no_init, key, 1e-10)
return configs_no_init
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
if param.requires_grad:
self.assertTrue(
-1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0,
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
batch_size = self.model_tester.batch_size
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[batch_size, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Always returns hidden_states
check_hidden_states_output(inputs_dict, config, model_class)
def test_retain_grad_hidden_states_attentions(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = False
# no need to test all models as different heads yield the same functionality
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
outputs = model(**inputs_dict)
output = outputs[0]
hidden_states = outputs.hidden_states[0]
hidden_states.retain_grad()
output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(hidden_states.grad)
# Inputs are 'input_nodes' and 'input_edges' not 'input_ids'
def test_model_main_input_name(self):
for model_class in self.all_model_classes:
model_signature = inspect.signature(getattr(model_class, "forward"))
# The main input is the name of the argument after `self`
observed_main_input_name_nodes = list(model_signature.parameters.keys())[1]
observed_main_input_name_edges = list(model_signature.parameters.keys())[2]
self.assertEqual(model_class.main_input_name_nodes, observed_main_input_name_nodes)
self.assertEqual(model_class.main_input_name_edges, observed_main_input_name_edges)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["input_nodes", "input_edges"]
self.assertListEqual(arg_names[:2], expected_arg_names)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_graph_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_graph_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = GraphormerForGraphClassification.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_torch
class GraphormerModelIntegrationTest(unittest.TestCase):
@slow
def test_inference_graph_classification(self):
model = GraphormerForGraphClassification.from_pretrained("clefourrier/graphormer-base-pcqm4mv2")
# Actual real graph data from the MUTAG dataset
# fmt: off
model_input = {
"attn_bias": tensor(
[
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, float("-inf"), float("-inf"), float("-inf"), float("-inf")],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, float("-inf"), float("-inf"), float("-inf"), float("-inf")],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, float("-inf"), float("-inf"), float("-inf"), float("-inf")],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, float("-inf"), float("-inf"), float("-inf"), float("-inf")],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, float("-inf"), float("-inf"), float("-inf"), float("-inf")],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, float("-inf"), float("-inf"), float("-inf"), float("-inf")],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, float("-inf"), float("-inf"), float("-inf"), float("-inf")],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, float("-inf"), float("-inf"), float("-inf"), float("-inf")],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, float("-inf"), float("-inf"), float("-inf"), float("-inf")],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, float("-inf"), float("-inf"), float("-inf"), float("-inf")],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, float("-inf"), float("-inf"), float("-inf"), float("-inf")],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, float("-inf"), float("-inf"), float("-inf"), float("-inf")],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, float("-inf"), float("-inf"), float("-inf"), float("-inf")],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, float("-inf"), float("-inf"), float("-inf"), float("-inf")],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, float("-inf"), float("-inf"), float("-inf"), float("-inf")],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, float("-inf"), float("-inf"), float("-inf"), float("-inf")],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, float("-inf"), float("-inf"), float("-inf"), float("-inf")],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, float("-inf"), float("-inf"), float("-inf"), float("-inf")],
],
]
),
"attn_edge_type": tensor(
[
[
[[0], [3], [0], [0], [0], [3], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0]],
[[3], [0], [3], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0]],
[[0], [3], [0], [3], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0]],
[[0], [0], [3], [0], [3], [0], [0], [0], [0], [3], [0], [0], [0], [0], [0], [0], [0]],
[[0], [0], [0], [3], [0], [3], [3], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0]],
[[3], [0], [0], [0], [3], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [3], [0], [0], [3], [0], [0], [0], [0], [0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0], [0], [3], [0], [3], [0], [0], [0], [0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0], [0], [0], [3], [0], [3], [0], [0], [0], [3], [0], [0], [0]],
[[0], [0], [0], [3], [0], [0], [0], [0], [3], [0], [3], [0], [0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0], [0], [0], [0], [0], [3], [0], [3], [0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [3], [0], [3], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [3], [0], [3], [3], [0], [0]],
[[0], [0], [0], [0], [0], [0], [0], [0], [3], [0], [0], [0], [3], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [3], [0], [0], [3], [3]],
[[0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [3], [0], [0]],
[[0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [3], [0], [0]],
],
[
[[0], [3], [0], [0], [0], [0], [0], [0], [0], [3], [0], [0], [0], [0], [0], [0], [0]],
[[3], [0], [3], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0]],
[[0], [3], [0], [3], [0], [0], [0], [3], [0], [0], [0], [0], [0], [0], [0], [0], [0]],
[[0], [0], [3], [0], [3], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0]],
[[0], [0], [0], [3], [0], [3], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [3], [0], [3], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0], [3], [0], [3], [0], [0], [0], [0], [0], [0], [0], [0], [0]],
[[0], [0], [3], [0], [0], [0], [3], [0], [3], [0], [0], [0], [0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0], [0], [0], [3], [0], [3], [3], [0], [0], [0], [0], [0], [0]],
[[3], [0], [0], [0], [0], [0], [0], [0], [3], [0], [0], [0], [0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0], [0], [0], [0], [3], [0], [0], [3], [3], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [3], [0], [0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [3], [0], [0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0]],
],
]
),
# fmt: on
"spatial_pos": tensor(
[
[
[1, 2, 3, 4, 3, 2, 4, 5, 6, 5, 6, 7, 8, 7, 9, 10, 10],
[2, 1, 2, 3, 4, 3, 5, 6, 5, 4, 5, 6, 7, 6, 8, 9, 9],
[3, 2, 1, 2, 3, 4, 4, 5, 4, 3, 4, 5, 6, 5, 7, 8, 8],
[4, 3, 2, 1, 2, 3, 3, 4, 3, 2, 3, 4, 5, 4, 6, 7, 7],
[3, 4, 3, 2, 1, 2, 2, 3, 4, 3, 4, 5, 6, 5, 7, 8, 8],
[2, 3, 4, 3, 2, 1, 3, 4, 5, 4, 5, 6, 7, 6, 8, 9, 9],
[4, 5, 4, 3, 2, 3, 1, 2, 3, 4, 5, 6, 5, 4, 6, 7, 7],
[5, 6, 5, 4, 3, 4, 2, 1, 2, 3, 4, 5, 4, 3, 5, 6, 6],
[6, 5, 4, 3, 4, 5, 3, 2, 1, 2, 3, 4, 3, 2, 4, 5, 5],
[5, 4, 3, 2, 3, 4, 4, 3, 2, 1, 2, 3, 4, 3, 5, 6, 6],
[6, 5, 4, 3, 4, 5, 5, 4, 3, 2, 1, 2, 3, 4, 4, 5, 5],
[7, 6, 5, 4, 5, 6, 6, 5, 4, 3, 2, 1, 2, 3, 3, 4, 4],
[8, 7, 6, 5, 6, 7, 5, 4, 3, 4, 3, 2, 1, 2, 2, 3, 3],
[7, 6, 5, 4, 5, 6, 4, 3, 2, 3, 4, 3, 2, 1, 3, 4, 4],
[9, 8, 7, 6, 7, 8, 6, 5, 4, 5, 4, 3, 2, 3, 1, 2, 2],
[10, 9, 8, 7, 8, 9, 7, 6, 5, 6, 5, 4, 3, 4, 2, 1, 3],
[10, 9, 8, 7, 8, 9, 7, 6, 5, 6, 5, 4, 3, 4, 2, 3, 1],
],
[
[1, 2, 3, 4, 5, 6, 5, 4, 3, 2, 4, 5, 5, 0, 0, 0, 0],
[2, 1, 2, 3, 4, 5, 4, 3, 4, 3, 5, 6, 6, 0, 0, 0, 0],
[3, 2, 1, 2, 3, 4, 3, 2, 3, 4, 4, 5, 5, 0, 0, 0, 0],
[4, 3, 2, 1, 2, 3, 4, 3, 4, 5, 5, 6, 6, 0, 0, 0, 0],
[5, 4, 3, 2, 1, 2, 3, 4, 5, 6, 6, 7, 7, 0, 0, 0, 0],
[6, 5, 4, 3, 2, 1, 2, 3, 4, 5, 5, 6, 6, 0, 0, 0, 0],
[5, 4, 3, 4, 3, 2, 1, 2, 3, 4, 4, 5, 5, 0, 0, 0, 0],
[4, 3, 2, 3, 4, 3, 2, 1, 2, 3, 3, 4, 4, 0, 0, 0, 0],
[3, 4, 3, 4, 5, 4, 3, 2, 1, 2, 2, 3, 3, 0, 0, 0, 0],
[2, 3, 4, 5, 6, 5, 4, 3, 2, 1, 3, 4, 4, 0, 0, 0, 0],
[4, 5, 4, 5, 6, 5, 4, 3, 2, 3, 1, 2, 2, 0, 0, 0, 0],
[5, 6, 5, 6, 7, 6, 5, 4, 3, 4, 2, 1, 3, 0, 0, 0, 0],
[5, 6, 5, 6, 7, 6, 5, 4, 3, 4, 2, 3, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
],
]
),
"in_degree": tensor(
[
[3, 3, 3, 4, 4, 3, 3, 3, 4, 4, 3, 3, 4, 3, 4, 2, 2],
[3, 3, 4, 3, 3, 3, 3, 4, 4, 3, 4, 2, 2, 0, 0, 0, 0],
]
),
"out_degree": tensor(
[
[3, 3, 3, 4, 4, 3, 3, 3, 4, 4, 3, 3, 4, 3, 4, 2, 2],
[3, 3, 4, 3, 3, 3, 3, 4, 4, 3, 4, 2, 2, 0, 0, 0, 0],
]
),
"input_nodes": tensor(
[
[[3], [3], [3], [3], [3], [3], [3], [3], [3], [3], [3], [3], [3], [3], [3], [3], [3]],
[[3], [3], [3], [3], [3], [3], [3], [3], [3], [3], [3], [3], [3], [0], [0], [0], [0]],
]
),
"input_edges": tensor(
[
[
[
[[0], [0], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
],
[
[[4], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
],
[
[[4], [4], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
],
[
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
],
[
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
],
[
[[4], [0], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
],
[
[[4], [4], [4], [0], [0]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
],
[
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
],
[
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [4], [4], [0]],
[[4], [4], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [4], [0]],
],
[
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
],
[
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [4], [0]],
],
[
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [4], [0], [0]],
],
[
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
],
[
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [4], [0], [0]],
],
[
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
],
[
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
],
[
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [0], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
],
],
[
[
[[0], [0], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [4], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
],
[
[[4], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
],
[
[[4], [4], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [4], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
],
[
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
],
[
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
],
[
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
],
[
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [4], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
],
[
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [4], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
],
[
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
],
[
[[4], [0], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [4], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
],
[
[[4], [4], [4], [0], [0]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[4], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
],
[
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
],
[
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [4]],
[[4], [4], [4], [4], [0]],
[[4], [4], [4], [0], [0]],
[[4], [4], [0], [0], [0]],
[[4], [4], [4], [0], [0]],
[[4], [0], [0], [0], [0]],
[[4], [4], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
],
[
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
],
[
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
],
[
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
],
[
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
[[0], [0], [0], [0], [0]],
],
],
]
),
"labels": tensor([1, 0]),
}
output = model(**model_input)["logits"]
expected_shape = torch.Size((2, 1))
self.assertEqual(output.shape, expected_shape)
expected_logs = torch.tensor(
[[7.6060], [7.4126]]
)
self.assertTrue(torch.allclose(output, expected_logs, atol=1e-4))
| 0 |
hf_public_repos/transformers/tests/models | hf_public_repos/transformers/tests/models/glpn/test_modeling_glpn.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch GLPN model. """
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MODEL_MAPPING, GLPNConfig, GLPNForDepthEstimation, GLPNModel
from transformers.models.glpn.modeling_glpn import GLPN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class GLPNConfigTester(ConfigTester):
def create_and_test_config_common_properties(self):
config = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(config, "hidden_sizes"))
self.parent.assertTrue(hasattr(config, "num_attention_heads"))
self.parent.assertTrue(hasattr(config, "num_encoder_blocks"))
class GLPNModelTester:
def __init__(
self,
parent,
batch_size=13,
image_size=64,
num_channels=3,
num_encoder_blocks=4,
depths=[2, 2, 2, 2],
sr_ratios=[8, 4, 2, 1],
hidden_sizes=[16, 32, 64, 128],
downsampling_rates=[1, 4, 8, 16],
num_attention_heads=[1, 2, 4, 8],
is_training=True,
use_labels=True,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
decoder_hidden_size=16,
num_labels=3,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.num_channels = num_channels
self.num_encoder_blocks = num_encoder_blocks
self.sr_ratios = sr_ratios
self.depths = depths
self.hidden_sizes = hidden_sizes
self.downsampling_rates = downsampling_rates
self.num_attention_heads = num_attention_heads
self.is_training = is_training
self.use_labels = use_labels
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.decoder_hidden_size = decoder_hidden_size
self.num_labels = num_labels
self.scope = scope
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return GLPNConfig(
image_size=self.image_size,
num_channels=self.num_channels,
num_encoder_blocks=self.num_encoder_blocks,
depths=self.depths,
hidden_sizes=self.hidden_sizes,
num_attention_heads=self.num_attention_heads,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
initializer_range=self.initializer_range,
decoder_hidden_size=self.decoder_hidden_size,
)
def create_and_check_model(self, config, pixel_values, labels):
model = GLPNModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
expected_height = expected_width = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width)
)
def create_and_check_for_depth_estimation(self, config, pixel_values, labels):
config.num_labels = self.num_labels
model = GLPNForDepthEstimation(config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size))
result = model(pixel_values, labels=labels)
self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, labels = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class GLPNModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (GLPNModel, GLPNForDepthEstimation) if is_torch_available() else ()
pipeline_model_mapping = (
{"depth-estimation": GLPNForDepthEstimation, "feature-extraction": GLPNModel} if is_torch_available() else {}
)
test_head_masking = False
test_pruning = False
test_resize_embeddings = False
def setUp(self):
self.model_tester = GLPNModelTester(self)
self.config_tester = GLPNConfigTester(self, config_class=GLPNConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_depth_estimation(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*config_and_inputs)
@unittest.skip("GLPN does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip("GLPN does not have get_input_embeddings method and get_output_embeddings methods")
def test_model_common_attributes(self):
pass
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
expected_num_attentions = sum(self.model_tester.depths)
self.assertEqual(len(attentions), expected_num_attentions)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), expected_num_attentions)
# verify the first attentions (first block, first layer)
expected_seq_len = (self.model_tester.image_size // 4) ** 2
expected_reduced_seq_len = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len],
)
# verify the last attentions (last block, last layer)
expected_seq_len = (self.model_tester.image_size // 32) ** 2
expected_reduced_seq_len = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:]),
[self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len],
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(out_len + 1, len(outputs))
self_attentions = outputs.attentions
self.assertEqual(len(self_attentions), expected_num_attentions)
# verify the first attentions (first block, first layer)
expected_seq_len = (self.model_tester.image_size // 4) ** 2
expected_reduced_seq_len = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len],
)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
expected_num_layers = self.model_tester.num_encoder_blocks
self.assertEqual(len(hidden_states), expected_num_layers)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]),
[
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_training(self):
if not self.model_tester.is_training:
return
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
for model_class in self.all_model_classes:
if model_class in get_values(MODEL_MAPPING):
continue
# TODO: remove the following 3 lines once we have a MODEL_FOR_DEPTH_ESTIMATION_MAPPING
# this can then be incorporated into _prepare_for_class in test_modeling_common.py
if model_class.__name__ == "GLPNForDepthEstimation":
batch_size, num_channels, height, width = inputs_dict["pixel_values"].shape
inputs_dict["labels"] = torch.zeros(
[self.model_tester.batch_size, height, width], device=torch_device
).long()
model = model_class(config)
model.to(torch_device)
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
loss = model(**inputs).loss
loss.backward()
@slow
def test_model_from_pretrained(self):
for model_name in GLPN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = GLPNModel.from_pretrained(model_name)
self.assertIsNotNone(model)
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
@slow
class GLPNModelIntegrationTest(unittest.TestCase):
@slow
def test_inference_depth_estimation(self):
image_processor = GLPNImageProcessor.from_pretrained(GLPN_PRETRAINED_MODEL_ARCHIVE_LIST[0])
model = GLPNForDepthEstimation.from_pretrained(GLPN_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(torch_device)
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the predicted depth
expected_shape = torch.Size([1, 480, 640])
self.assertEqual(outputs.predicted_depth.shape, expected_shape)
expected_slice = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]]
).to(torch_device)
self.assertTrue(torch.allclose(outputs.predicted_depth[0, :3, :3], expected_slice, atol=1e-4))
| 0 |
hf_public_repos/transformers/tests/models | hf_public_repos/transformers/tests/models/glpn/test_image_processing_glpn.py | # coding=utf-8
# Copyright 2022 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class GLPNImageProcessingTester(unittest.TestCase):
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size_divisor=32,
do_rescale=True,
):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size_divisor = size_divisor
self.do_rescale = do_rescale
def prepare_image_processor_dict(self):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
def expected_output_image_shape(self, images):
if isinstance(images[0], Image.Image):
width, height = images[0].size
else:
height, width = images[0].shape[1], images[0].shape[2]
height = height // self.size_divisor * self.size_divisor
width = width // self.size_divisor * self.size_divisor
return self.num_channels, height, width
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
size_divisor=self.size_divisor,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
class GLPNImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = GLPNImageProcessor if is_vision_available() else None
def setUp(self):
self.image_processor_tester = GLPNImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
image_processing = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size_divisor"))
self.assertTrue(hasattr(image_processing, "resample"))
self.assertTrue(hasattr(image_processing, "do_rescale"))
def test_call_pil(self):
# Initialize image_processing
image_processing = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
# Test not batched input (GLPNImageProcessor doesn't support batching)
encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs)
self.assertTrue(tuple(encoded_images.shape) == (1, *expected_output_image_shape))
def test_call_numpy(self):
# Initialize image_processing
image_processing = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
# Test not batched input (GLPNImageProcessor doesn't support batching)
encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs)
self.assertTrue(tuple(encoded_images.shape) == (1, *expected_output_image_shape))
def test_call_pytorch(self):
# Initialize image_processing
image_processing = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
# Test not batched input (GLPNImageProcessor doesn't support batching)
encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs)
self.assertTrue(tuple(encoded_images.shape) == (1, *expected_output_image_shape))
def test_call_numpy_4_channels(self):
# Initialize image_processing
image_processing = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
self.image_processing_class.num_channels = 4
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
# Test not batched input (GLPNImageProcessor doesn't support batching)
encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs)
self.assertTrue(tuple(encoded_images.shape) == (1, *expected_output_image_shape))
self.image_processing_class.num_channels = 3
| 0 |
hf_public_repos/transformers/tests/models | hf_public_repos/transformers/tests/models/bigbird_pegasus/test_modeling_bigbird_pegasus.py | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch BigBirdPegasus model. """
import copy
import tempfile
import unittest
from transformers import BigBirdPegasusConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
require_torch_fp16,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
PegasusTokenizer,
)
from transformers.models.bigbird_pegasus.modeling_bigbird_pegasus import (
BigBirdPegasusDecoder,
BigBirdPegasusEncoder,
)
MODEL_ID = "google/bigbird-pegasus-large-pubmed"
def prepare_bigbird_pegasus_inputs_dict(
config,
input_ids,
decoder_input_ids,
attention_mask=None,
decoder_attention_mask=None,
):
if attention_mask is None:
attention_mask = input_ids.ne(config.pad_token_id)
if decoder_attention_mask is None:
decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id)
input_dict = {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
input_dict = {k: input_dict[k].to(torch_device) for k in input_dict}
return input_dict
class BigBirdPegasusModelTester:
def __init__(
self,
parent,
batch_size=7,
seq_length=256,
is_training=True,
use_labels=False,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=31,
hidden_act="gelu_fast",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=260,
eos_token_id=1,
pad_token_id=0,
bos_token_id=2,
attention_type="block_sparse",
use_bias=False,
block_size=16,
num_random_blocks=3,
scale_embedding=True,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.attention_type = attention_type
self.use_bias = use_bias
self.block_size = block_size
self.num_random_blocks = num_random_blocks
self.scale_embedding = scale_embedding
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp(
3,
)
input_ids[:, -1] = self.eos_token_id # Eos Token
decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
config = self.get_config()
inputs_dict = prepare_bigbird_pegasus_inputs_dict(config, input_ids, decoder_input_ids)
return config, inputs_dict
def get_config(self):
return BigBirdPegasusConfig(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
attention_type=self.attention_type,
use_bias=self.use_bias,
block_size=self.block_size,
num_random_blocks=self.num_random_blocks,
scale_embedding=self.scale_embedding,
)
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict):
model = BigBirdPegasusModel(config=config).get_decoder().to(torch_device).eval()
input_ids = inputs_dict["input_ids"]
attention_mask = inputs_dict["attention_mask"]
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = ids_tensor((self.batch_size, 3), 2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[
"last_hidden_state"
]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2))
def check_encoder_decoder_model_standalone(self, config, inputs_dict):
model = BigBirdPegasusModel(config=config).to(torch_device).eval()
outputs = model(**inputs_dict)
encoder_last_hidden_state = outputs.encoder_last_hidden_state
last_hidden_state = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
encoder = model.get_encoder()
encoder.save_pretrained(tmpdirname)
encoder = BigBirdPegasusEncoder.from_pretrained(tmpdirname).to(torch_device)
encoder_last_hidden_state_2 = encoder(inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"])[
0
]
self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3)
with tempfile.TemporaryDirectory() as tmpdirname:
decoder = model.get_decoder()
decoder.save_pretrained(tmpdirname)
decoder = BigBirdPegasusDecoder.from_pretrained(tmpdirname).to(torch_device)
last_hidden_state_2 = decoder(
input_ids=inputs_dict["decoder_input_ids"],
attention_mask=inputs_dict["decoder_attention_mask"],
encoder_hidden_states=encoder_last_hidden_state,
encoder_attention_mask=inputs_dict["attention_mask"],
)[0]
self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3)
def create_and_check_model(self, config, inputs_dict):
model = BigBirdPegasusModel(config=config).to(torch_device).eval()
input_ids = inputs_dict["input_ids"]
decoder_input_ids = inputs_dict["decoder_input_ids"]
result = model(input_ids, decoder_input_ids=decoder_input_ids, use_cache=True)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
@require_torch
class BigBirdPegasusModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
BigBirdPegasusModel,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusForQuestionAnswering,
)
if is_torch_available()
else ()
)
all_generative_model_classes = (BigBirdPegasusForConditionalGeneration,) if is_torch_available() else ()
pipeline_model_mapping = (
{
"conversational": BigBirdPegasusForConditionalGeneration,
"feature-extraction": BigBirdPegasusModel,
"question-answering": BigBirdPegasusForQuestionAnswering,
"summarization": BigBirdPegasusForConditionalGeneration,
"text-classification": BigBirdPegasusForSequenceClassification,
"text-generation": BigBirdPegasusForCausalLM,
"text2text-generation": BigBirdPegasusForConditionalGeneration,
"translation": BigBirdPegasusForConditionalGeneration,
"zero-shot": BigBirdPegasusForSequenceClassification,
}
if is_torch_available()
else {}
)
is_encoder_decoder = True
test_missing_keys = False
test_pruning = False
test_head_masking = False
# torchscript tests are not passing for now.
# Also torchscript is not an important feature to have in the beginning.
test_torchscript = False
# TODO: Fix the failed tests
def is_pipeline_test_to_skip(
self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name
):
if pipeline_test_casse_name == "QAPipelineTests" and not tokenizer_name.endswith("Fast"):
return True
return False
# overwrite from GenerationTesterMixin to solve problem
# with conflicting random seeds
def _get_input_ids_and_config(self, batch_size=2):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.attention_type = "original_full"
input_ids = inputs_dict[self.input_name]
attention_mask = torch.ones_like(input_ids, dtype=torch.long)
# cut to half length & take max batch_size 3
sequence_length = input_ids.shape[-1] // 2
input_ids = input_ids[:batch_size, :sequence_length]
attention_mask = attention_mask[:batch_size, :sequence_length]
# generate max 3 tokens
max_length = input_ids.shape[-1] + 3
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
config.pad_token_id = config.eos_token_id
return config, input_ids, attention_mask, max_length
def setUp(self):
self.model_tester = BigBirdPegasusModelTester(self)
self.config_tester = ConfigTester(self, config_class=BigBirdPegasusConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_save_load_strict(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True)
self.assertEqual(info["missing_keys"], [])
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_encoder_decoder_model_standalone(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs)
def test_model_various_attn_type(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
for type in ["original_full", "block_sparse"]:
config_and_inputs[0].attention_type = type
self.model_tester.create_and_check_model(*config_and_inputs)
def test_generate_without_input_ids(self):
if self.model_tester.attention_type == "block_sparse":
# this test can never pass for BigBird-block-sparse attention since input_ids must be multiple of block_size
return
super().test_generate_without_input_ids()
def test_retain_grad_hidden_states_attentions(self):
if self.model_tester.attention_type == "block_sparse":
# this test can't pass since attention matrix (which is getting returned) can't have gradients (& just 0 at many locations)
return
super().test_retain_grad_hidden_states_attentions()
# BigBirdPegasusForSequenceClassification does not support inputs_embeds
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (
BigBirdPegasusModel,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
):
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
if not self.is_encoder_decoder:
input_ids = inputs["input_ids"]
del inputs["input_ids"]
else:
encoder_input_ids = inputs["input_ids"]
decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
del inputs["input_ids"]
inputs.pop("decoder_input_ids", None)
wte = model.get_input_embeddings()
if not self.is_encoder_decoder:
inputs["inputs_embeds"] = wte(input_ids)
else:
inputs["inputs_embeds"] = wte(encoder_input_ids)
inputs["decoder_inputs_embeds"] = wte(decoder_input_ids)
with torch.no_grad():
model(**inputs)[0]
@require_torch_fp16
def test_generate_fp16(self):
config, input_dict = self.model_tester.prepare_config_and_inputs()
input_dict.pop("decoder_attention_mask")
input_dict.pop("decoder_input_ids")
model = BigBirdPegasusForConditionalGeneration(config).eval().to(torch_device)
model.half()
model.generate(**input_dict)
model.generate(**input_dict, do_sample=True, early_stopping=False, num_return_sequences=3)
@slow
def test_batched_forward_original_full(self):
self._check_batched_forward(attn_type="original_full")
@slow
def test_batched_forward_block_sparse(self):
self._check_batched_forward(attn_type="block_sparse", tolerance=1e-1)
def _check_batched_forward(self, attn_type, tolerance=1e-3):
config, _ = self.model_tester.prepare_config_and_inputs()
config.max_position_embeddings = 128
config.block_size = 16
config.attention_type = attn_type
model = BigBirdPegasusForConditionalGeneration(config).to(torch_device)
model.eval()
chunk_length = 32
sample_with_padding = [3, 8, 11] * chunk_length + [0] * chunk_length
sample_without_padding = [4, 7, 9, 13] * chunk_length
target_ids_without_padding = [2, 3] * 8
target_ids_with_padding = [7, 8] * 6 + 4 * [-100]
attention_mask = torch.tensor(
[[1] * 3 * chunk_length + [0] * chunk_length, [1] * 4 * chunk_length],
device=torch_device,
dtype=torch.long,
)
input_ids = torch.tensor([sample_with_padding, sample_without_padding], device=torch_device, dtype=torch.long)
labels = torch.tensor(
[target_ids_without_padding, target_ids_with_padding], device=torch_device, dtype=torch.long
)
with torch.no_grad():
logits_batched = model(input_ids=input_ids, attention_mask=attention_mask, labels=labels).logits
with torch.no_grad():
logits_single_first = model(input_ids=input_ids[:1, :-chunk_length], labels=labels[:1]).logits
self.assertTrue(torch.allclose(logits_batched[0, -3:], logits_single_first[0, -3:], atol=tolerance))
with torch.no_grad():
logits_single_second = model(input_ids=input_ids[1:], labels=labels[1:, :-4]).logits
self.assertTrue(torch.allclose(logits_batched[1, :3], logits_single_second[0, :3], atol=tolerance))
def test_auto_padding(self):
ids = [[7, 6, 9] * 65]
config, _ = self.model_tester.prepare_config_and_inputs()
input_ids = torch.tensor(ids, device=torch_device, dtype=torch.long)
attention_mask = input_ids.new_ones(input_ids.shape)
decoder_input_ids = torch.tensor([[33, 5, 8] * 3], device=torch_device, dtype=torch.long)
config.block_size = 8
model = BigBirdPegasusForConditionalGeneration(config).eval().to(torch_device)
output1 = model(input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids)[
"logits"
]
ids = [[7, 6, 9] * 65 + [0] * 5]
input_ids = torch.tensor(ids, device=torch_device, dtype=torch.long)
attention_mask = torch.tensor([[1] * 3 * 65 + [0] * 5], device=torch_device, dtype=torch.long)
output2 = model(input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids)[
"logits"
]
self.assertTrue(torch.allclose(output1, output2, atol=1e-5))
def test_for_change_to_full_attn(self):
self.model_tester.seq_length = 9
config, input_dict = self.model_tester.prepare_config_and_inputs()
# automatic switch will happen
config.attention_type = "block_sparse"
model = BigBirdPegasusForConditionalGeneration(config).eval().to(torch_device)
state_dict = model.state_dict()
outputs1 = model(**input_dict)["logits"]
config.attention_type = "original_full"
model = BigBirdPegasusForConditionalGeneration(config).eval().to(torch_device)
model.load_state_dict(state_dict)
outputs2 = model(**input_dict)["logits"]
self.assertTrue(torch.allclose(outputs1, outputs2, atol=1e-5))
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class BigBirdPegasusModelIntegrationTests(unittest.TestCase):
def _get_dummy_input_ids(self):
# fmt: off
ids = torch.tensor(
[[685, 560, 630, 193, 836, 764, 708, 360, 10, 724, 278, 755, 805, 600, 71, 473, 601, 397, 315, 706, 487, 552, 88, 175, 601, 850, 678, 538, 846, 73, 778, 917, 116, 977, 756, 710, 1023, 848, 432, 449, 851, 100, 985, 178, 756, 798, 660, 148, 911, 424, 289, 962, 266, 698, 640, 545, 544, 715, 245, 152, 676, 511, 460, 883, 184, 29, 803, 129, 129, 933, 54, 902, 551, 489, 757, 274, 336, 389, 618, 43, 443, 544, 889, 258, 322, 1000, 938, 58, 292, 871, 120, 780, 431, 83, 92, 897, 399, 612, 566, 909, 634, 939, 85, 204, 325, 775, 965, 48, 640, 1013, 132, 973, 869, 181, 1001, 847, 144, 661, 228, 955, 792, 720, 910, 374, 854, 561, 306, 582, 170, 676, 449, 96, 198, 607, 257, 882, 691, 293, 931, 817, 862, 388, 611, 555, 974, 369, 1000, 918, 202, 384, 513, 907, 371, 556, 955, 384, 24, 700, 131, 378, 99, 575, 932, 735, 124, 964, 595, 943, 740, 149, 210, 563, 412, 783, 42, 59, 706, 37, 779, 87, 44, 873, 12, 771, 308, 81, 33, 183, 129, 807, 276, 175, 555, 372, 185, 445, 489, 590, 287, 281, 638, 771, 516, 95, 227, 876, 270, 881, 297, 329, 20, 608, 841, 411, 451, 249, 181, 324, 1005, 830, 783, 865, 261, 964, 750, 140, 1021, 599, 462, 890, 622, 844, 697, 529, 153, 926, 150, 111, 26, 465, 957, 890, 887, 118, 446, 596, 674, 873, 929, 229, 508, 764, 122, 327, 470, 288, 526, 840, 697, 153, 592, 42, 275, 553, 439, 208, 780, 167, 112, 350, 1018, 130, 736, 887, 813, 217, 382, 25, 68, 979, 1008, 772, 235, 717, 999, 292, 727, 1023, 702, 710, 728, 556, 33, 12, 617, 213, 139, 695, 1004, 422, 638, 669, 624, 489, 771, 540, 980, 218, 664, 822, 308, 175, 149, 950, 542, 580, 548, 808, 394, 74, 298, 920, 900, 815, 731, 947, 877, 772, 800, 778, 395, 540, 430, 200, 424, 62, 342, 866, 45, 803, 931, 89, 34, 646, 233, 768, 37, 769, 460, 291, 198, 895, 950, 255, 81, 447, 137, 190, 130, 210, 369, 292, 377, 348, 169, 885, 805, 177, 538, 324, 872, 509, 804, 115, 799, 30, 754, 290, 147, 274, 222, 341, 510, 515, 70, 358, 909, 557, 886, 766, 323, 624, 92, 342, 424, 552, 972, 663, 415, 658, 711, 968, 275, 861, 44, 84, 434, 810, 94, 175, 406, 202, 858, 499, 481, 988, 330, 541, 1004, 210, 618, 955, 897, 983, 576, 17, 107, 165, 607, 537, 629, 192, 196, 308, 137, 953, 860, 94, 892, 751, 88, 161, 148, 585, 456, 88, 14, 315, 594, 121, 885, 952, 833, 716, 733, 933, 282, 801, 427, 783, 471, 285, 277, 979, 325, 535, 228, 891, 596, 648, 969, 574, 654, 518, 257, 137, 208, 464, 950, 140, 5, 424, 349, 942, 283, 587, 821, 1007, 434, 220, 820, 740, 874, 787, 374, 291, 564, 671, 438, 827, 940, 824, 509, 1021, 787, 942, 856, 450, 327, 491, 54, 817, 95, 60, 337, 667, 637, 164, 571, 946, 107, 202, 301, 782, 890, 839, 551, 680, 649, 14, 1017, 904, 721, 1017, 535, 505, 848, 986, 777, 740, 775, 210, 456, 469, 474, 963, 573, 401, 57, 883, 750, 664, 281, 5, 613, 1005, 306, 344, 543, 567, 154, 789, 354, 358, 698, 408, 412, 30, 930, 372, 822, 632, 948, 855, 503, 8, 618, 1010, 138, 695, 897, 852, 377, 933, 722, 149, 886, 1009, 260, 127, 811, 578, 533, 805, 325, 977, 113, 944, 651, 238, 361, 991, 860, 556, 64, 928, 917, 455, 266, 445, 604, 624, 420, 340, 845, 275, 370, 843, 227, 226, 940, 644, 909, 229, 827, 898, 370, 129, 808, 25, 699, 293, 356, 838, 135, 4, 227, 890, 681, 445, 418, 285, 837, 27, 737, 249, 366, 948, 202, 438, 198, 930, 648, 638, 607, 73, 247, 853, 136, 708, 214, 476, 621, 324, 103, 853, 328, 596, 224, 257, 646, 348, 108, 927, 970, 980, 520, 150, 998, 477, 393, 684, 559, 1, 361, 692, 551, 90, 75, 500, 739, 636, 344, 97, 852, 283, 719, 33, 116, 455, 866, 429, 828, 826, 691, 174, 746, 133, 442, 94, 348, 402, 420, 707, 405, 942, 186, 976, 376, 677, 874, 703, 517, 498, 499, 206, 415, 366, 856, 739, 420, 586, 219, 952, 539, 375, 23, 461, 720, 355, 603, 52, 999, 815, 721, 574, 445, 816, 1019, 105, 641, 395, 972, 910, 328, 607, 519, 686, 246, 415, 528, 170, 167, 310, 940, 595, 392, 221, 834, 682, 835, 115, 861, 335, 742, 220, 247, 101, 416, 222, 179, 509, 175, 606, 627, 674, 781, 737, 746, 849, 67, 457, 1012, 126, 139, 625, 731, 156, 697, 121, 322, 449, 710, 857, 291, 976, 4, 701, 239, 678, 172, 724, 857, 583, 661, 903, 797, 628, 903, 835, 605, 989, 615, 870, 380, 710, 110, 330, 101, 695, 846, 918, 508, 672, 594, 36, 238, 244, 251, 393, 767, 282, 22, 430, 230, 983, 401, 154, 1007, 120, 678, 896, 386, 390, 711, 397, 347, 587, 1020, 951, 79, 831, 585, 200, 814, 134, 560, 700, 171, 452, 139, 755, 314, 476, 346, 388, 126, 719, 851, 198, 699, 901, 18, 710, 448, 351, 665, 644, 326, 425, 165, 571, 178, 440, 665, 674, 915, 866, 463, 754, 136, 950, 748, 47, 497, 1013, 640, 930, 338, 158, 525, 631, 815, 887, 289, 803, 116, 600, 637, 410, 175, 499, 876, 565, 1002, 623, 577, 333, 887, 586, 147, 773, 776, 644, 49, 77, 294, 117, 494, 561, 110, 979, 180, 562, 72, 859, 434, 1007, 286, 516, 75, 597, 491, 322, 888, 533, 209, 43, 499, 29, 411, 856, 181, 305, 963, 615, 778, 259, 373, 877, 746, 858, 381, 886, 613, 91, 69, 618, 523, 13, 617, 226, 422, 168, 929, 379, 290, 923, 100, 218, 307, 345, 211, 789, 735, 669, 585, 275, 410, 921, 552, 235, 636, 285, 665, 659, 708, 173, 724, 302, 823, 1, 139, 708, 903, 732, 868, 442, 967, 916, 163, 51, 243, 871]], # noqa: E231
dtype=torch.long,
device=torch_device,
)
# fmt: on
return ids
def _get_dummy_target_ids(self):
# fmt: off
ids = torch.tensor(
[[13, 6, 1, 4, 12, 4, 8, 10, 4, 6, 3, 5, 8, 7, 9, 9]], # noqa: E231
dtype=torch.long,
device=torch_device,
)
# fmt: on
return ids
def test_inference_block_sparse(self):
model = BigBirdPegasusForConditionalGeneration.from_pretrained(
MODEL_ID, attention_type="block_sparse", block_size=16, num_random_blocks=3
)
model.to(torch_device)
input_ids = self._get_dummy_input_ids()
target_ids = self._get_dummy_target_ids()
outputs = model(input_ids, labels=target_ids)
prediction_logits = outputs.logits
self.assertEqual(prediction_logits.shape, torch.Size((1, 16, 96103)))
# fmt: off
expected_prediction_logits_slice = torch.tensor(
[[1.5118, 5.5227, 4.8125, 1.7603, 8.1704, 3.996, 4.8118, 6.7806, 2.2297, 6.9834, 3.1906, 0.103, 7.1515, 6.3679, 3.1896, 6.3054, 3.9741, 6.3772, 5.0042, -0.6338, 6.7868, 0.592, 0.5363, 1.87, -0.331, -2.4518, 1.8263, 3.1899], [1.5702, 5.8135, 4.6675, 2.3674, 8.9828, 3.7913, 5.4027, 7.6567, 1.9007, 7.3706, 3.8824, 0.0247, 7.6094, 6.6985, 3.2826, 7.0094, 3.8713, 5.6555, 5.0439, -0.3519, 7.1525, 0.4062, -0.2419, 2.2194, -0.6447, -2.9614, 2.0713, 3.248], [1.4527, 5.6003, 4.5381, 2.6382, 9.2809, 3.2969, 5.6811, 8.4011, 1.6909, 7.4937, 4.3185, -0.0878, 7.61, 6.6822, 3.4753, 7.3962, 3.5336, 4.9216, 4.943, -0.2043, 7.3326, 0.2199, -0.6016, 2.4367, -0.7043, -3.0689, 2.3215, 3.0611], [1.1084, 5.6308, 4.4886, 2.717, 9.4103, 3.0733, 5.5825, 8.4325, 1.3075, 7.5495, 4.4782, -0.1092, 7.8115, 6.6285, 3.5311, 7.6853, 3.509, 4.4994, 4.9224, -0.1384, 7.3069, -0.0473, -0.8578, 2.4632, -0.5249, -3.4627, 2.2671, 2.8818]], # noqa: E231
device=torch_device,
)
# fmt: on
self.assertTrue(
torch.allclose(prediction_logits[0, 4:8, 128:156], expected_prediction_logits_slice, atol=1e-4)
)
def test_inference_full_attn(self):
model = BigBirdPegasusForConditionalGeneration.from_pretrained(MODEL_ID, attention_type="original_full")
model.to(torch_device)
input_ids = self._get_dummy_input_ids()
target_ids = self._get_dummy_target_ids()
outputs = model(input_ids, labels=target_ids)
prediction_logits = outputs.logits
self.assertEqual(prediction_logits.shape, torch.Size((1, 16, 96103)))
# fmt: off
expected_prediction_logits_slice = torch.tensor(
[[1.3418, 5.8304, 6.5662, 2.0448, 8.7702, 4.6579, 4.9947, 6.429, 2.4296, 7.9431, 4.217, 0.0672, 7.334, 5.1966, 2.9603, 6.0814, 4.6756, 7.5522, 5.076, 0.213, 6.6638, 0.6577, 0.244, 2.1221, 0.7531, -2.4076, 1.8731, 3.5594], [1.5525, 6.0524, 6.309, 2.6245, 9.229, 4.5213, 5.0913, 7.0622, 1.7992, 8.0962, 4.7994, -0.0248, 7.7168, 5.5878, 3.0883, 6.5248, 4.7895, 6.9974, 4.8787, 0.5445, 6.6686, 0.0102, -0.1659, 2.6195, 0.7389, -2.8956, 1.9928, 3.3777], [1.6407, 6.2104, 6.0331, 2.8076, 9.4074, 3.9772, 5.0574, 7.5316, 1.4201, 8.3035, 5.0212, -0.1031, 7.553, 5.5023, 3.1427, 6.7674, 4.4409, 6.457, 4.525, 0.728, 6.5422, -0.6234, -0.4726, 2.7486, 0.6985, -3.0804, 1.9669, 3.2365], [1.5065, 6.1271, 5.8296, 2.8405, 9.5649, 3.6834, 5.1214, 7.546, 0.9758, 8.3335, 5.1952, -0.1395, 7.4348, 5.6893, 3.2942, 7.0356, 4.1665, 5.9695, 4.3898, 0.8931, 6.3988, -0.8957, -0.7522, 2.8924, 0.6498, -3.4358, 1.8654, 2.9735]], # noqa: E231
device=torch_device,
)
# fmt: on
self.assertTrue(
torch.allclose(prediction_logits[0, 4:8, 128:156], expected_prediction_logits_slice, atol=1e-4)
)
def test_seq_to_seq_generation(self):
MODEL_ID = "google/bigbird-pegasus-large-arxiv"
model = BigBirdPegasusForConditionalGeneration.from_pretrained(MODEL_ID).to(torch_device)
tokenizer = PegasusTokenizer.from_pretrained(MODEL_ID)
ARTICLE_LEP = r"""the lep experiments at the resonance of @xmath1-boson have tested the standard model ( sm ) at quantum level , measuring the @xmath1-decay into fermion pairs with an accuracy of one part in ten thousands . the good agreement of the lep data with the sm predictions have severely constrained the behavior of new physics at the @xmath1-pole . taking these achievements into account one can imagine that the physics of @xmath1-boson will again play the central role in the frontier of particle physics if the next generation @xmath1 factory comes true with the generated @xmath1 events several orders of magnitude higher than that of the lep . this factory can be realized in the gigaz option of the international linear collider ( ilc)@xcite . the ilc is a proposed electron - positron collider with tunable energy ranging from @xmath12 to @xmath13 and polarized beams in its first phase , and the gigaz option corresponds to its operation on top of the resonance of @xmath1 boson by adding a bypass to its main beam line . given the high luminosity , @xmath14 , and the cross section at the resonance of @xmath1 boson , @xmath15 , about @xmath16 @xmath1 events can be generated in an operational year of @xmath17 of gigaz , which implies that the expected sensitivity to the branching ratio of @xmath1-decay can be improved from @xmath18 at the lep to @xmath19 at the gigaz@xcite . in light of this , the @xmath1-boson properties , especially its exotic or rare decays which are widely believed to be sensitive to new physics , should be investigated comprehensively to evaluate their potential in probing new physics . among the rare @xmath1-decays , the flavor changing ( fc ) processes were most extensively studied to explore the flavor texture in new physics @xcite , and it was found that , although these processes are severely suppressed in the sm , their branching ratios in new physics models can be greatly enhanced to @xmath19 for lepton flavor violation decays @xcite and @xmath20 for quark flavor violation decays @xcite . besides the fc processes , the @xmath1-decay into light higgs boson(s ) is another type of rare process that was widely studied , e.g. the decay @xmath21 ( @xmath22 ) with the particle @xmath0 denoting a light higgs boson was studied in @xcite , the decay @xmath23 was studied in the two higgs doublet model ( 2hdm)@xcite and the minimal supersymmetric standard model ( mssm)@xcite , and the decay @xmath4 was studied in a model independent way @xcite , in 2hdm@xcite and also in mssm@xcite . these studies indicate that , in contrast with the kinematic forbidden of these decays in the sm , the rates of these decays can be as large as @xmath18 in new physics models , which lie within the expected sensitivity of the gigaz . in this work , we extend the previous studies of these decays to some new models and investigate these decays altogether . we are motivated by some recent studies on the singlet extension of the mssm , such as the next - to - minimal supersymmetric standard model ( nmssm ) @xcite and the nearly minimal supersymmetric standard model ( nmssm ) @xcite , where a light cp - odd higgs boson @xmath0 with singlet - dominant component may naturally arise from the spontaneous breaking of some approximate global symmetry like @xmath24 or peccei - quuin symmetry @xcite . these non - minimal supersymmetric models can not only avoid the @xmath25-problem , but also alleviate the little hierarchy by having such a light higgs boson @xmath0 @xcite . we are also motivated by that , with the latest experiments , the properties of the light higgs boson are more stringently constrained than before . so it is worth updating the previous studies . so far there is no model - independent lower bound on the lightest higgs boson mass . in the sm , it must be heavier than @xmath26 gev , obtained from the null observation of the higgs boson at lep experiments . however , due to the more complex structure of the higgs sector in the extensions of the sm , this lower bound can be significantly relaxed according to recent studies , e.g. , for the cp - odd higgs boson @xmath0 we have @xmath27 gev in the nmssm @xcite , @xmath28 gev in the nmssm @xcite , and @xmath29 gev in the lepton - specific 2hdm ( l2hdm ) @xcite . with such a light cp - odd higgs boson , the z - decay into one or more @xmath0 is open up . noting that the decay @xmath30 is forbidden due to bose symmetry , we in this work study the rare @xmath1-decays @xmath6 ( @xmath22 ) , @xmath31 and @xmath4 in a comparative way for four models , namely the type - ii 2hdm@xcite , the l2hdm @xcite , the nmssm and the nmssm . in our study , we examine carefully the constraints on the light @xmath0 from many latest experimental results . this work is organized as follows . in sec . ii we briefly describe the four new physics models . in sec . iii we present the calculations of the rare @xmath1-decays . in sec . iv we list the constraints on the four new physics models . in sec . v we show the numerical results for the branching ratios of the rare @xmath1-decays in various models . finally , the conclusion is given in sec . as the most economical way , the sm utilizes one higgs doublet to break the electroweak symmetry . as a result , the sm predicts only one physical higgs boson with its properties totally determined by two free parameters . in new physics models , the higgs sector is usually extended by adding higgs doublets and/or singlets , and consequently , more physical higgs bosons are predicted along with more free parameters involved in . the general 2hdm contains two @xmath32 doublet higgs fields @xmath33 and @xmath34 , and with the assumption of cp - conserving , its scalar potential can be parameterized as@xcite : @xmath35,\end{aligned}\ ] ] where @xmath36 ( @xmath37 ) are free dimensionless parameters , and @xmath38 ( @xmath39 ) are the parameters with mass dimension . after the electroweak symmetry breaking , the spectrum of this higgs sector includes three massless goldstone modes , which become the longitudinal modes of @xmath40 and @xmath1 bosons , and five massive physical states : two cp - even higgs bosons @xmath41 and @xmath42 , one neutral cp - odd higgs particle @xmath0 and a pair of charged higgs bosons @xmath43 . noting the constraint @xmath44 with @xmath45 and @xmath46 denoting the vacuum expectation values ( vev ) of @xmath33 and @xmath34 respectively , we choose @xmath47 as the input parameters with @xmath48 , and @xmath49 being the mixing angle that diagonalizes the mass matrix of the cp - even higgs fields . the difference between the type - ii 2hdm and the l2hdm comes from the yukawa coupling of the higgs bosons to quark / lepton . in the type - ii 2hdm , one higgs doublet @xmath34 generates the masses of up - type quarks and the other doublet @xmath33 generates the masses of down - type quarks and charged leptons ; while in the l2hdm one higgs doublet @xmath33 couples only to leptons and the other doublet @xmath34 couples only to quarks . so the yukawa interactions of @xmath0 to fermions in these two models are given by @xcite @xmath50 with @xmath51 denoting generation index . obviously , in the type - ii 2hdm the @xmath52 coupling and the @xmath53 coupling can be simultaneously enhanced by @xmath54 , while in the l2hdm only the @xmath53 coupling is enhanced by @xmath55 . the structures of the nmssm and the nmssm are described by their superpotentials and corresponding soft - breaking terms , which are given by @xcite @xmath56 where @xmath57 is the superpotential of the mssm without the @xmath25 term , @xmath58 and @xmath59 are higgs doublet and singlet superfields with @xmath60 and @xmath61 being their scalar component respectively , @xmath62 , @xmath63 , @xmath64 , @xmath65 , @xmath66 and @xmath67 are soft breaking parameters , and @xmath68 and @xmath69 are coefficients of the higgs self interactions . with the superpotentials and the soft - breaking terms , one can get the higgs potentials of the nmssm and the nmssm respectively . like the 2hdm , the higgs bosons with same cp property will mix and the mass eigenstates are obtained by diagonalizing the corresponding mass matrices : @xmath70 where the fields on the right hands of the equations are component fields of @xmath71 , @xmath72 and @xmath61 defined by @xmath73 @xmath74 and @xmath75 are respectively the cp - even and cp - odd neutral higgs bosons , @xmath76 and @xmath77 are goldstone bosons eaten by @xmath1 and @xmath78 , and @xmath79 is the charged higgs boson . so both the nmssm and nmssm predict three cp - even higgs bosons , two cp - odd higgs bosons and one pair of charged higgs bosons . in general , the lighter cp - odd higgs @xmath0 in these model is the mixture of the singlet field @xmath80 and the doublet field combination , @xmath81 , i.e. @xmath82 and its couplings to down - type quarks are then proportional to @xmath83 . so for singlet dominated @xmath0 , @xmath84 is small and the couplings are suppressed . as a comparison , the interactions of @xmath0 with the squarks are given by@xcite @xmath85 i.e. the interaction does not vanish when @xmath86 approaches zero . just like the 2hdm where we use the vevs of the higgs fields as fundamental parameters , we choose @xmath68 , @xmath69 , @xmath87 , @xmath88 , @xmath66 and @xmath89 as input parameters for the nmssm@xcite and @xmath68 , @xmath54 , @xmath88 , @xmath65 , @xmath90 and @xmath91 as input parameters for the nmssm@xcite . about the nmssm and the nmssm , three points should be noted . the first is for the two models , there is no explicit @xmath92term , and the effective @xmath25 parameter ( @xmath93 ) is generated when the scalar component of @xmath59 develops a vev . the second is , the nmssm is actually same as the nmssm with @xmath94@xcite , because the tadpole terms @xmath95 and its soft breaking term @xmath96 in the nmssm do not induce any interactions , except for the tree - level higgs boson masses and the minimization conditions . and the last is despite of the similarities , the nmssm has its own peculiarity , which comes from its neutralino sector . in the basis @xmath97 , its neutralino mass matrix is given by @xcite @xmath98 where @xmath99 and @xmath100 are @xmath101 and @xmath102 gaugino masses respectively , @xmath103 , @xmath104 , @xmath105 and @xmath106 . after diagonalizing this matrix one can get the mass eigenstate of the lightest neutralino @xmath107 with mass taking the following form @xcite @xmath108 this expression implies that @xmath107 must be lighter than about @xmath109 gev for @xmath110 ( from lower bound on chargnio mass ) and @xmath111 ( perturbativity bound ) . like the other supersymmetric models , @xmath107 as the lightest sparticle acts as the dark matter in the universe , but due to its singlino - dominated nature , it is difficult to annihilate sufficiently to get the correct density in the current universe . so the relic density of @xmath107 plays a crucial way in selecting the model parameters . for example , as shown in @xcite , for @xmath112 , there is no way to get the correct relic density , and for the other cases , @xmath107 mainly annihilates by exchanging @xmath1 boson for @xmath113 , or by exchanging a light cp - odd higgs boson @xmath0 with mass satisfying the relation @xmath114 for @xmath115 . for the annihilation , @xmath54 and @xmath25 are required to be less than 10 and @xmath116 respectively because through eq.([mass - exp ] ) a large @xmath87 or @xmath25 will suppress @xmath117 to make the annihilation more difficult . the properties of the lightest cp - odd higgs boson @xmath0 , such as its mass and couplings , are also limited tightly since @xmath0 plays an important role in @xmath107 annihilation . the phenomenology of the nmssm is also rather special , and this was discussed in detail in @xcite . in the type - ii 2hdm , l2hdm , nmssm and nmssm , the rare @xmath1-decays @xmath118 ( @xmath22 ) , @xmath3 and @xmath4 may proceed by the feynman diagrams shown in fig.[fig1 ] , fig.[fig2 ] and fig.[fig3 ] respectively . for these diagrams , the intermediate state @xmath119 represents all possible cp - even higgs bosons in the corresponding model , i.e. @xmath41 and @xmath42 in type - ii 2hdm and l2hdm and @xmath41 , @xmath42 and @xmath120 in nmssm and nmssm . in order to take into account the possible resonance effects of @xmath119 in fig.[fig1](c ) for @xmath2 and fig.[fig3 ] ( a ) for @xmath11 , we have calculated all the decay modes of @xmath119 and properly included the width effect in its propagator . as to the decay @xmath121 , two points should be noted . one is , unlike the decays @xmath6 and @xmath11 , this process proceeds only through loops mediated by quarks / leptons in the type - ii 2hdm and l2hdm , and additionally by sparticles in the nmssm and nmssm . so in most cases its rate should be much smaller than the other two . the other is due to cp - invariance , loops mediated by squarks / sleptons give no contribution to the decay@xcite . in actual calculation , this is reflected by the fact that the coupling coefficient of @xmath122 differs from that of @xmath123 by a minus sign ( see eq.([asqsq ] ) ) , and as a result , the squark - mediated contributions to @xmath121 are completely canceled out . with regard to the rare decay @xmath11 , we have more explanations . in the lowest order , this decay proceeds by the diagram shown in fig.[fig3 ] ( a ) , and hence one may think that , as a rough estimate , it is enough to only consider the contributions from fig.[fig3](a ) . however , we note that in some cases of the type - ii 2hdm and l2hdm , due to the cancelation of the contributions from different @xmath119 in fig.[fig3 ] ( a ) and also due to the potentially largeness of @xmath124 couplings ( i.e. larger than the electroweak scale @xmath125 ) , the radiative correction from the higgs - mediated loops may dominate over the tree level contribution even when the tree level prediction of the rate , @xmath126 , exceeds @xmath20 . on the other hand , we find the contribution from quark / lepton - mediated loops can be safely neglected if @xmath127 in the type - ii 2hdm and the l2hdm . in the nmssm and the nmssm , besides the corrections from the higgs- and quark / lepton - mediated loops , loops involving sparticles such as squarks , charginos and neutralinos can also contribute to the decay . we numerically checked that the contributions from squarks and charginos can be safely neglected if @xmath127 . we also calculated part of potentially large neutralino correction ( note that there are totally about @xmath128 diagrams for such correction ! ) and found they can be neglected too . since considering all the radiative corrections will make our numerical calculation rather slow , we only include the most important correction , namely that from higgs - mediated loops , in presenting our results for the four models . one can intuitively understand the relative smallness of the sparticle contribution to @xmath11 as follows . first consider the squark contribution which is induced by the @xmath129 interaction ( @xmath130 denotes the squark in chirality state ) and the @xmath131 interaction through box diagrams . because the @xmath132 interaction conserves the chirality of the squarks while the @xmath133 interaction violates the chirality , to get non - zero contribution to @xmath11 from the squark loops , at least four chiral flippings are needed , with three of them provided by @xmath131 interaction and the rest provided by the left - right squark mixing . this means that , if one calculates the amplitude in the chirality basis with the mass insertion method , the amplitude is suppressed by the mixing factor @xmath134 with @xmath135 being the off diagonal element in squark mass matrix . next consider the chargino / neutralino contributions . since for a light @xmath0 , its doublet component , parameterized by @xmath84 in eq.([mixing ] ) , is usually small , the couplings of @xmath0 with the sparticles will never be tremendously large@xcite . so the chargino / neutralino contributions are not important too . in our calculation of the decays , we work in the mass eigenstates of sparticles instead of in the chirality basis . for the type - ii 2hdm and the l2hdm , we consider the following constraints @xcite : * theoretical constraints on @xmath136 from perturbativity , unitarity and requirements that the scalar potential is finit at large field values and contains no flat directions @xcite , which imply that @xmath137 * the constraints from the lep search for neutral higgs bosons . we compute the signals from the higgs - strahlung production @xmath138 ( @xmath139 ) with @xmath140 @xcite and from the associated production @xmath141 with @xmath142 @xcite , and compare them with the corresponding lep data which have been inputted into our code . we also consider the constraints from @xmath138 by looking for a peak of @xmath143 recoil mass distribution of @xmath1-boson @xcite and the constraint of @xmath144 mev when @xmath145 @xcite . + these constraints limit the quantities such as @xmath146 \times br ( h_i \to \bar{b } b ) $ ] on the @xmath147 plane with the the subscript @xmath148 denoting the coupling coefficient of the @xmath149 interaction . they also impose a model - dependent lower bound on @xmath150 , e.g. , @xmath151 for the type - ii 2hdm ( from our scan results ) , @xmath152 for the l2hdm@xcite , and @xmath153 for the nmssm @xcite . these bounds are significantly lower than that of the sm , i.e. @xmath154 , partially because in new physics models , unconventional decay modes of @xmath155 such as @xmath156 are open up . as to the nmssm , another specific reason for allowing a significantly lighter cp - even higgs boson is that the boson may be singlet - dominated in this model . + with regard to the lightest cp - odd higgs boson @xmath0 , we checked that there is no lower bound on its mass so long as the @xmath157 interaction is weak or @xmath155 is sufficiently heavy . * the constraints from the lep search for a light higgs boson via the yukawa process @xmath158 with @xmath22 and @xmath61 denoting a scalar @xcite . these constraints can limit the @xmath159 coupling versus @xmath160 in new physics models . * the constraints from the cleo - iii limit on @xmath161 and the latest babar limits on @xmath162 . these constraints will put very tight constraints on the @xmath163 coupling for @xmath164 . in our analysis , we use the results of fig.8 in the second paper of @xcite to excluded the unfavored points . * the constraints from @xmath165 couplings . since the higgs sector can give sizable higher order corrections to @xmath165 couplings , we calculate them to one loop level and require the corrected @xmath165 couplings to lie within the @xmath166 range of their fitted value . the sm predictions for the couplings at @xmath1-pole are given by @xmath167 and @xmath168 @xcite , and the fitted values are given by @xmath169 and @xmath170 , respectively@xcite . we adopt the formula in @xcite to the 2hdm in our calculation . * the constraints from @xmath171 leptonic decay . we require the new physics correction to the branching ratio @xmath172 to be in the range of @xmath173 @xcite . we use the formula in @xcite in our calculation . + about the constraints ( 5 ) and ( 6 ) , two points should be noted . one is all higgs bosons are involved in the constraints by entering the self energy of @xmath171 lepton , the @xmath174 vertex correction or the @xmath175 vertex correction , and also the box diagrams for @xmath176@xcite . since the yukawa couplings of the higgs bosons to @xmath171 lepton get enhanced by @xmath54 and so do the corrections , @xmath54 must be upper bounded for given spectrum of the higgs sector . generally speaking , the lighter @xmath0 is , the more tightly @xmath54 is limited@xcite . the other point is in the type - ii 2hdm , @xmath177 , b - physics observables as well as @xmath178 decays discussed above can constraint the model in a tighter way than the constraints ( 5 ) and ( 6 ) since the yukawa couplings of @xmath171 lepton and @xmath179 quark are simultaneously enhanced by @xmath54 . but for the l2hdm , because only the yukawa couplings of @xmath171 lepton get enhanced ( see eq.[yukawa ] ) , the constraints ( 5 ) and ( 6 ) are more important in limiting @xmath54 . * indirect constraints from the precision electroweak observables such as @xmath180 , @xmath181 and @xmath182 , or their combinations @xmath183 @xcite . we require @xmath184 to be compatible with the lep / sld data at @xmath185 confidence level@xcite . we also require new physics prediction of @xmath186 is within the @xmath187 range of its experimental value . the latest results for @xmath188 are @xmath189 ( measured value ) and @xmath190 ( sm prediction ) for @xmath191 gev @xcite . in our code , we adopt the formula for these observables presented in @xcite to the type - ii 2hdm and the l2hdm respectively . + in calculating @xmath180 , @xmath181 and @xmath182 , we note that these observables get dominant contributions from the self energies of the gauge bosons @xmath1 , @xmath192 and @xmath193 . since there is no @xmath194 coupling or @xmath195 coupling , @xmath0 must be associated with the other higgs bosons to contribute to the self energies . so by the uv convergence of these quantities , one can infer that , for the case of a light @xmath0 and @xmath196 , these quantities depend on the spectrum of the higgs sector in a way like @xmath197 at leading order , which implies that a light @xmath0 can still survive the constraints from the precision electroweak observables given the splitting between @xmath150 and @xmath198 is moderate@xcite . * the constraints from b physics observables such as the branching ratios for @xmath199 , @xmath200 and @xmath201 , and the mass differences @xmath202 and @xmath203 . we require their theoretical predications to agree with the corresponding experimental values at @xmath187 level . + in the type - ii 2hdm and the l2hdm , only the charged higgs boson contributes to these observables by loops , so one can expect that @xmath198 versus @xmath54 is to be limited . combined analysis of the limits in the type - ii 2hdm has been done by the ckmfitter group , and the lower bound of @xmath204 as a function of @xmath87 was given in fig.11 of @xcite . this analysis indicates that @xmath198 must be heavier than @xmath205 at @xmath185 c.l . regardless the value of @xmath54 . in this work , we use the results of fig.11 in @xcite to exclude the unfavored points . as for the l2hdm , b physics actually can not put any constraints@xcite because in this model the couplings of the charged higgs boson to quarks are proportional to @xmath206 and in the case of large @xmath54 which we are interested in , they are suppressed . in our analysis of the l2hdm , we impose the lep bound on @xmath198 , i.e. @xmath207@xcite . * the constraints from the muon anomalous magnetic moment @xmath208 . now both the theoretical prediction and the experimental measured value of @xmath208 have reached a remarkable precision , but a significant deviation still exists : @xmath209 @xcite . in the 2hdm , @xmath208 gets additional contributions from the one - loop diagrams induced by the higgs bosons and also from the two - loop barr - zee diagrams mediated by @xmath0 and @xmath155@xcite . if the higgs bosons are much heavier than @xmath25 lepton mass , the contributions from the barr - zee diagrams are more important , and to efficiently alleviate the discrepancy of @xmath208 , one needs a light @xmath0 along with its enhanced couplings to @xmath25 lepton and also to heavy fermions such as bottom quark and @xmath171 lepton to push up the effects of the barr - zee diagram@xcite . the cp - even higgs bosons are usually preferred to be heavy since their contributions to @xmath208 are negative . + in the type - ii 2hdm , because @xmath54 is tightly constrained by the process @xmath210 at the lep@xcite and the @xmath178 decay@xcite , the barr - zee diagram contribution is insufficient to enhance @xmath208 to @xmath187 range around its measured value@xcite . so in our analysis , we require the type - ii 2hdm to explain @xmath208 at @xmath211 level . while for the l2hdm , @xmath54 is less constrained compared with the type - ii 2hdm , and the barr - zee diagram involving the @xmath171-loop is capable to push up greatly the theoretical prediction of @xmath208@xcite . therefore , we require the l2hdm to explain the discrepancy at @xmath187 level . + unlike the other constraints discussed above , the @xmath208 constraint will put a two - sided bound on @xmath54 since on the one hand , it needs a large @xmath54 to enhance the barr - zee contribution , but on the other hand , too large @xmath54 will result in an unacceptable large @xmath208 . * since this paper concentrates on a light @xmath0 , the decay @xmath212 is open up with a possible large decay width . we require the width of any higgs boson to be smaller than its mass to avoid a too fat higgs boson@xcite . we checked that for the scenario characterized by @xmath213 , the coefficient of @xmath214 interaction is usually larger than the electroweak scale @xmath125 , and consequently a large decay width is resulted . for the nmssm and nmssm , the above constraints become more complicated because in these models , not only more higgs bosons are involved in , but also sparticles enter the constraints . so it is not easy to understand some of the constraints intuitively . take the process @xmath199 as an example . in the supersymmetric models , besides the charged higgs contribution , chargino loops , gluino loops as well as neutralino loops also contribute to the process@xcite , and depending on the susy parameters , any of these contributions may become dominated over or be canceled by other contributions . as a result , although the charged higgs affects the process in the same way as that in the type - ii 2hdm , charged higgs as light as @xmath215 is still allowed even for @xmath216@xcite . since among the constraints , @xmath208 is rather peculiar in that it needs new physics to explain the discrepancy between @xmath217 and @xmath218 , we discuss more about its dependence on susy parameters . in the nmssm and the nmssm , @xmath208 receives contributions from higgs loops and neutralino / chargino loops . for the higgs contribution , it is quite similar to that of the type - ii 2hdm except that more higgs bosons are involved in@xcite . for the neutralino / chargino contribution , in the light bino limit ( i.e. @xmath219 ) , it can be approximated by@xcite @xmath220 for @xmath221 with @xmath222 being smuon mass . so combining the two contributions together , one can learn that a light @xmath0 along with large @xmath54 and/or light smuon with moderate @xmath87 are favored to dilute the discrepancy . because more parameters are involved in the constraints on the supersymmetric models , we consider following additional constraints to further limit their parameters : * direct bounds on sparticle masses from the lep1 , the lep2 and the tevatron experiments @xcite . * the lep1 bound on invisible z decay @xmath223 ; the lep2 bound on neutralino production @xmath224 and @xmath225@xcite . * dark matter constraints from the wmap relic density 0.0975 @xmath226 0.1213 @xcite . note that among the above constraints , the constraint ( 2 ) on higgs sector and the constraint ( c ) on neutralino sector are very important . this is because in the supersymmetric models , the sm - like higgs is upper bounded by about @xmath227 at tree level and by about @xmath228 at loop level , and that the relic density restricts the lsp annihilation cross section in a certain narrow range . in our analysis of the nmssm , we calculate the constraints ( 3 ) and ( 5 - 7 ) by ourselves and utilize the code nmssmtools @xcite to implement the rest constraints . we also extend nmssmtools to the nmssm to implement the constraints . for the extension , the most difficult thing we faced is how to adapt the code micromegas@xcite to the nmssm case . we solve this problem by noting the following facts : * as we mentioned before , the nmssm is actually same as the nmssm with the trilinear singlet term setting to zero . so we can utilize the model file of the nmssm as the input of the micromegas and set @xmath229 . * since in the nmssm , the lsp is too light to annihilate into higgs pairs , there is no need to reconstruct the effective higgs potential to calculate precisely the annihilation channel @xmath230 with @xmath61 denoting any of higgs bosons@xcite . we thank the authors of the nmssmtools for helpful discussion on this issue when we finish such extension@xcite . with the above constraints , we perform four independent random scans over the parameter space of the type - ii 2hdm , the l2hdm , the nmssm and the nmssm respectively . we vary the parameters in following ranges : @xmath231 for the type - ii 2hdm , @xmath232 for the l2hdm , @xmath233 for the nmssm , and @xmath234 for the nmssm . in performing the scans , we note that for the nmssm and the nmssm , some constraints also rely on the gaugino masses and the soft breaking parameters in the squark sector and the slepton sector . since these parameters affect little on the properties of @xmath0 , we fix them to reduce the number of free parameters in our scan . for the squark sector , we adopt the @xmath235 scenario which assumes that the soft mass parameters for the third generation squarks are degenerate : @xmath236 800 gev , and that the trilinear couplings of the third generation squarks are also degenerate , @xmath237 with @xmath238 . for the slepton sector , we assume all the soft - breaking masses and trilinear parameters to be 100 gev . this setting is necessary for the nmssm since this model is difficult to explain the muon anomalous moment at @xmath239 level for heavy sleptons@xcite . finally , we assume the grand unification relation @xmath240 for the gaugino masses with @xmath241 being fine structure constants of the different gauge group . with large number of random points in the scans , we finally get about @xmath242 , @xmath243 , @xmath244 and @xmath242 samples for the type - ii 2hdm , the l2hdm , the nmssm and the nmssm respectively which survive the constraints and satisfy @xmath245 . analyzing the properties of the @xmath0 indicates that for most of the surviving points in the nmssm and the nmssm , its dominant component is the singlet field ( numerically speaking , @xmath246 ) so that its couplings to the sm fermions are suppressed@xcite . our analysis also indicates that the main decay products of @xmath0 are @xmath247 for the l2hdm@xcite , @xmath248 ( dominant ) and @xmath247 ( subdominant ) for the type - ii 2hdm , the nmssm and the nmssm , and in some rare cases , neutralino pairs in the nmssm@xcite . in fig.[fig4 ] , we project the surviving samples on the @xmath249 plane . this figure shows that the allowed range of @xmath54 is from @xmath250 to @xmath251 in the type - ii 2hdm , and from @xmath252 to @xmath253 in the l2hdm . just as we introduced before , the lower bounds of @xmath254 come from the fact that we require the models to explain the muon anomalous moment , while the upper bound is due to we have imposed the constraint from the lep process @xmath255 , which have limited the upper reach of the @xmath256 coupling for light @xmath61 @xcite(for the dependence of @xmath256 coupling on @xmath54 , see sec . this figure also indicates that for the nmssm and the nmssm , @xmath54 is upper bounded by @xmath257 . for the nmssm , this is because large @xmath87 can suppress the dark matter mass to make its annihilation difficult ( see @xcite and also sec . ii ) , but for the nmssm , this is because we choose a light slepton mass so that large @xmath54 can enhance @xmath208 too significantly to be experimentally unacceptable . we checked that for the slepton mass as heavy as @xmath258 , @xmath259 is still allowed for the nmssm . in fig.[fig5 ] and fig.[fig6 ] , we show the branching ratios of @xmath260 and @xmath261 respectively . fig.[fig5 ] indicates , among the four models , the type - ii 2hdm predicts the largest ratio for @xmath260 with its value varying from @xmath262 to @xmath263 . the underlying reason is in the type - ii 2hdm , the @xmath264 coupling is enhanced by @xmath54 ( see fig.[fig4 ] ) , while in the other three model , the coupling is suppressed either by @xmath265 or by the singlet component of the @xmath0 . fig.[fig6 ] shows that the l2hdm predicts the largest rate for @xmath266 with its value reaching @xmath5 in optimum case , and for the other three models , the ratio of @xmath261 is at least about one order smaller than that of @xmath267 . this feature can be easily understood from the @xmath268 coupling introduced in sect . we emphasize that , if the nature prefers a light @xmath0 , @xmath260 and/or @xmath269 in the type - ii 2hdm and the l2hdm will be observable at the gigaz . then by the rates of the two decays , one can determine whether the type - ii 2hdm or the l2hdm is the right theory . on the other hand , if both decays are observed with small rates or fail to be observed , the singlet extensions of the mssm are favored . in fig.[fig7 ] , we show the rate of @xmath3 as the function of @xmath270 . this figure indicates that the branching ratio of @xmath121 can reach @xmath271 , @xmath272 , @xmath273 and @xmath274 for the optimal cases of the type - ii 2hdm , the l2hdm , the nmssm and the nmssm respectively , which implies that the decay @xmath121 will never be observable at the gigaz if the studied model is chosen by nature . the reason for the smallness is , as we pointed out before , that the decay @xmath121 proceeds only at loop level . comparing the optimum cases of the type - ii 2hdm , the nmssm and the nmssm shown in fig.5 - 7 , one may find that the relation @xmath275 holds for any of the decays . this is because the decays are all induced by the yukawa couplings with similar structure for the models . in the supersymmetric models , the large singlet component of the light @xmath0 is to suppress the yukawa couplings , and the @xmath0 in the nmssm has more singlet component than that in the nmssm . next we consider the decay @xmath11 , which , unlike the above decays , depends on the higgs self interactions . in fig.[fig8 ] we plot its rate as a function of @xmath270 and this figure indicates that the @xmath276 may be the largest among the ratios of the exotic @xmath1 decays , reaching @xmath277 in the optimum cases of the type - ii 2hdm , the l2hdm and the nmssm . the underlying reason is , in some cases , the intermediate state @xmath119 in fig.[fig3 ] ( a ) may be on - shell . in fact , we find this is one of the main differences between the nmssm and the nmssm , that is , in the nmssm , @xmath119 in fig.[fig3 ] ( a ) may be on - shell ( corresponds to the points with large @xmath278 ) while in the nmssm , this seems impossible . so we conclude that the decay @xmath11 may serve as an alternative channel to test new physics models , especially it may be used to distinguish the nmssm from the nmssm if the supersymmetry is found at the lhc and the @xmath11 is observed at the gigaz with large rate . before we end our discussion , we note that in the nmssm , the higgs boson @xmath0 may be lighter than @xmath279 without conflicting with low energy data from @xmath178 decays and the other observables ( see fig.[fig4]-[fig8 ] ) . in this case , @xmath0 is axion - like as pointed out in @xcite . we checked that , among the rare @xmath1 decays discussed in this paper , the largest branching ratio comes from @xmath280 which can reach @xmath281 . since in this case , the decay product of @xmath0 is highly collinear muon pair , detecting the decay @xmath280 may need some knowledge about detectors , which is beyond our discussion . in this paper , we studied the rare @xmath1-decays @xmath2 ( @xmath7 ) , @xmath282 and @xmath4 in the type - ii 2hdm , lepton - specific 2hdm , nmssm and nmssm , which predict a light cp - odd higgs boson @xmath0 . in the parameter space allowed by current experiments , the branching ratio can be as large as @xmath5 for @xmath118 , @xmath8 for @xmath3 and @xmath9 for @xmath4 , which implies that the decays @xmath2 and @xmath283 may be accessible at the gigaz option . since different models predict different size of branching ratios , these decays can be used to distinguish different model through the measurement of these rare decays . this work was supported in part by hastit under grant no . 2009hastit004 , by the national natural science foundation of china ( nnsfc ) under grant nos . 10821504 , 10725526 , 10635030 , 10775039 , 11075045 and by the project of knowledge innovation program ( pkip ) of chinese academy of sciences under grant no . . for some reviews , see , e.g. , m. a. perez , g. tavares - velasco and j. j. toscano , int . j. mod . a * 19 * , 159 ( 2004 ) ; j. m. yang , arxiv:1006.2594 . j. i. illana , m. masip , 67 , 035004 ( 2003 ) ; j. cao , z. xiong , j. m. yang , 32 , 245 ( 2004 ) . d. atwood _ et al_. , 66 , 093005 ( 2002 ) . j. kalinowski , and s. pokorski , 219 , 116 ( 1989 ) ; a. djouadi , p. m. zerwas and j. zunft , 259 , 175 ( 1991 ) ; a. djouadi , j. kalinowski , and p. m. zerwas , z. phys . c * 54 * , 255 ( 1992 ) . m. krawczyk , _ et al . _ , 19 , 463 ( 2001 ) ; 8 , 495 ( 1999 ) . j. f. gunion , g. gamberini and s. f. novaes , 38 , 3481 ( 1988 ) ; thomas j. weiler and tzu - chiang yuan , 318 , 337 ( 1989 ) ; a. djouadi , _ et al . _ , 1 , 163 ( 1998)[hep - ph/9701342 ] . d. chang and w. y. keung , phys . lett . * 77 * , 3732 ( 1996 ) . e. keith and e. ma , 57 , 2017 ( 1998 ) ; m. a. perez , g. tavares - velasco and j. j. toscano , int . j. mod.phys . a * 19 * , 159 ( 2004 ) . f. larios , g. tavares - velasco and c. p. yuan , 64 , 055004 ( 2001 ) ; 66 , 075006 ( 2002 ) . a. djouadi , _ et al . _ , 10 , 27 ( 1999 ) [ hep - ph/9903229 ] . for a detailed introduction of the nmssm , see f. franke and h. fraas , int . j. mod . a * 12 * ( 1997 ) 479 ; for a recent review of the nmssm , see for example , u. ellwanger , c. hugonie , and a. m. teixeira , arxiv : 0910.1785 . see , e.g. , j. r. ellis , j. f. gunion , h. e. haber , l. roszkowski and f. zwirner , phys . rev . d * 39 * ( 1989 ) 844 ; m. drees , int . j. mod . phys . a * 4 * ( 1989 ) 3635 ; u. ellwanger , m. rausch de traubenberg and c. a. savoy , phys . b * 315 * ( 1993 ) 331 ; nucl . b * 492 * ( 1997 ) 21 ; d.j . miller , r. nevzorov , p.m. zerwas , 681 , 3 ( 2004 ) . c. panagiotakopoulos , k. tamvakis , 446 , 224 ( 1999 ) ; 469 , 145 ( 1999 ) ; c. panagiotakopoulos , a. pilaftsis , 63 , 055003 ( 2001 ) ; a. dedes , _ et al . _ , 63 , 055009 ( 2001 ) ; a. menon , _ et al . _ , 70 , 035005 ( 2004 ) ; v. barger , _ et al . _ , 630 , 85 ( 2005 ) . c. balazs , _ et al . _ , 0706 , 066 ( 2007 ) . b. a. dobrescu , k. t. matchev , 0009 , 031 ( 2000 ) ; a. arhrib , k. cheung , t. j. hou , k. w. song , hep - ph/0611211 ; 0703 , 073 ( 2007 ) ; x. g. he , j. tandean , and g. valencia , 98 , 081802 ( 2007 ) ; 0806 , 002 ( 2008 ) ; f. domingo _ et al_. , 0901 , 061 ( 2009 ) ; gudrun hiller , 70 , 034018 ( 2004 ) ; r. dermisek , and john f. gunion , 75 , 075019 ( 2007 ) ; 79 , 055014 ( 2009 ) ; 81 , 055001 ( 2010 ) ; r. dermisek , john f. gunion , and b. mcelrath , 76 , 051105 ( 2007 ) ; z. heng , _ et al_. , 77 , 095012 ( 2008 ) ; a. belyaev _ et al_. , 81 , 075021 ( 2010 ) ; d. das and u. ellwanger , arxiv:1007.1151 [ hep - ph ] . s. andreas , o. lebedev , s. ramos - sanchez and a. ringwald , arxiv:1005.3978 [ hep - ph ] . j. f. gunion , jhep * 0908 * , 032 ( 2009 ) ; r. dermisek and j. f. gunion , phys . rev . d * 81 * , 075003 ( 2010 ) . r. dermisek and j. f. gunion , phys . lett . * 95 * , 041801 ( 2005 ) ; phys . d * 73 * , 111701 ( 2006 ) . j. cao , h. e. logan , j. m. yang , 79 , 091701 ( 2009 ) . j. cao , p. wan , l. wu , j. m. yang , 80 , 071701 ( 2009 ) . j. f. gunion and h. e. haber , 67 , 075019 ( 2003 ) . r. m. barnett , _ et al . _ , phys . b * 136 * , 191 ( 1984 ) ; r. m. barnett , g. senjanovic and d. wyler , phys . d * 30 * , 1529 ( 1984 ) ; y. grossman , nucl . b * 426 * , 355 ( 1994 ) . h. s. goh , l. j. hall and p. kumar , jhep * 0905 * , 097 ( 2009 ) ; a. g. akeroyd and w. j. stirling , nucl . b * 447 * , 3 ( 1995 ) ; a. g. akeroyd , phys . b * 377 * , 95 ( 1996 ) ; h. e. logan and d. maclennan , phys . rev . d * 79 * , 115022 ( 2009 ) ; m. aoki , _ et al . _ , arxiv:0902.4665 [ hep - ph ] . v. barger , p. langacker , h. s. lee and g. shaughnessy , phys . d * 73 * , 115010 ( 2006 ) . s. hesselbach , _ et . _ , arxiv:0810.0511v2 [ hep - ph ] . de vivie and p. janot [ aleph collaboration ] , pa13 - 027 contribution to the international conference on high energy physics , warsaw , poland , 2531 july 1996 ; j. kurowska , o. grajek and p. zalewski [ delphi collaboration ] , cern - open-99 - 385 . [ aleph collaboration and delphi collaboration and l3 collaboration ] , phys . rept . * 427 * , 257 ( 2006 ) . j. cao and j. m. yang , jhep * 0812 * , 006 ( 2008 ) . m. krawczyk and d. temes , eur . j. c * 44 * , 435 ( 2005 ) . g. altarelli and r. barbieri , 253 , 161 ( 1991 ) ; m. e. peskin , t. takeuchi , 46 , 381 ( 1992 ) . c. amsler , _ et al . _ , ( particle data group ) , 667 , 1 ( 2008 ) . o. deschamps , s. descotes - genon , s. monteil , v. niess , s. tjampens and v. tisserand , arxiv:0907.5135 [ hep - ph ] . s. su and b. thomas , phys . d * 79 * , 095014 ( 2009 ) . g. abbiendi , _ et al . _ , eur . phys . j. c * 32 * , 453 ( 2004 ) . m. davier , _ et al . _ , 66 , 1 ( 2010 ) . k. cheung , _ et al . _ , phys . d * 64 * , 111301 ( 2001 ) . k. cheung and o. c. w. kong , phys . d * 68 * , 053003 ( 2003 ) . t. besmer , c. greub , t.hurth , 609 , 359 ( 2001 ) ; f. borzumati , _ et al . _ , 62 , 075005(2000 ) . j. cao , k. i. hikasa , w. wang , j. m. yang and l. x. yu , phys . d * 82 * , 051701 ( 2010 ) [ arxiv:1006.4811 [ hep - ph ] ] . j. f. gunion , _ et . d * 73 * , 015011 ( 2006 ) . martin and j. d. wells , phys . d * 64 * , 035003 ( 2001 ) . j. abdallah _ et al . _ , eur . j. c * 31 * , 421 ( 2004 ) ; g. abbiendi _ et al . _ , eur . j. c * 35 * , 1 ( 2004 ) . j. dunkley _ et al . _ [ wmap collaboration ] , astrophys . j. suppl . * 180 * , 306 ( 2009 ) [ arxiv:0803.0586 [ astro - ph ] ] . u. ellwanger _ et al . _ , 02 , 066 ( 2005 ) . g. belanger , f. boudjema , a. pukhov and a. semenov , comput . commun . * 174 * , 577 ( 2006 ) ; comput . phys . commun . * 176 * , 367 ( 2007 ) . g. belanger , f. boudjema , c. hugonie , a. pukhov and a. semenov , jcap * 0509 * , 001 ( 2005 ) ."""
ARTICLE_MAGNET = r"""it is well known that the classical magnetoresistance ( mr ) in metals or semiconductors with a closed free electron fermi surface increases quadratically with increasing magnetic field @xmath2 for @xmath3 and saturates when @xmath4 . here @xmath5 is the zero - magnetic - field mobility . hence , the extraordinarily high and linear mr ( lmr ) , which breaks this familiar rule , has been gaining much attention as soon as its discovery . in the past decade , this unexpected lmr has been reported in silver chalcogenide,@xcite indium antimonide,@xcite silicon,@xcite mnas - gaas composite material,@xcite and graphene.@xcite kapitza s linear law@xcite indicates that the metal shows a magnetoresistance linear in perpendicular magnetic field when it has an open fermi surface and a mean free path longer than the electronic larmor radius . recently , another two models , irrespective of the open fermi surface , have been constructed to provide possible mechanisms for the lmr phenomenon . abrikosov suggested a quantum - limit origin of lmr for the homogenous system with a gapless linear energy spectrum.@xcite his model requires that landau levels are well formed and the carrier concentration is small that all electrons occupy only the lowest landau band . alternatively , parish and littlewood developed a classical model without involving linear spectrum.@xcite ignoring the concrete microscopic mechanism , they attributed this unusual mr to the mobility fluctuations in a strongly inhomogenous system . topological insulators@xcite ( tis ) are novel materials with a full energy gap in bulk , while there are gapless surface states . due to its unique band structure with only one helical dirac cone and linear energy dispersion,@xcite the surface states of the ti bi@xmath0se@xmath1 become an excellent platform for the study of quantum - limit lmr . the recent experiment in this flat surface system , however , reported that a large positive mr , which becomes very linear above a characteristic field of @xmath6@xmath7@xmath8 t , was observed even in an opposite situation where the carrier sheet density is high that electrons occupy more than one landau levels.@xcite moreover , they found that raising temperature to room temperature almost has no influence on the observed lmr . it is striking that this observation is in conflict with abrikosov s model and also with the classical parish - littlewood model . so far a reliable theoretical scheme capable of explaining this novel experiment has still been lacking . in this paper , we generalize the balance - equation approach@xcite to a system modeling the surface states of a three - dimensional ti to investigate the two - dimensional magnetotransport in it . we find that a positive , nonsaturating and dominantly linear magnetoresistance can appear within quite wide magnetic - field range in the ti surface state having a positive and finite effective g - factor . this linear magnetoresistance shows up in the system of high carrier concentration and low mobility when electrons are in extended states and spread over many smeared landau levels , and persists up to room temperature , providing a possible mechanism for the recently observed linear magnetoresistance in topological insulator bi@xmath0se@xmath1 nanoribbons.@xcite we consider the surface state of a bi@xmath0se@xmath1-type large bulk gap ti in the @xmath9-@xmath10 plane under the influence of a uniform magnetic field @xmath11 applied along the @xmath12 direction.@xcite following the experimental observation,@xcite we assume that the fermi energy locates in the gap of the bulk band and above the dirac point , i.e. the surface carriers are electrons . further , the separations of the fermi energy from the bottom of bulk band and dirac point are much larger than the highest temperature ( @xmath13 ) considered in this work . hence , the contribution from the bulk band to the magnetotransport is negligible . these electrons , scattered by randomly distributed impurities and by phonons , are driven by a uniform in - plane electric field @xmath14 in the topological surface . the hamiltonian of this many - electron and phonon system consists of an electron part @xmath15 , a phonon part @xmath16 , and electron - impurity and electron - phonon interactions @xmath17 and @xmath18 : @xmath19 here , the electron hamiltonian is taken in the form @xmath20 , \ ] ] in which @xmath21 , @xmath22 , @xmath23 and @xmath24 , stand , respectively , for the canonical momentum , coordinate , momentum and spin operators of the @xmath25th electron having charge @xmath26 , @xmath27 is the vector potential of the perpendicular magnetic field @xmath28 in the landau gauge , @xmath29 is the fermi velocity , @xmath30 is the effective g - factor of the surface electron , and @xmath31 is the bohr magneton with @xmath32 the free electron mass . the sum index @xmath25 in eq.([helectron ] ) goes over all electrons of total number @xmath33 in the surface state of unit area . in the frame work of balance equation approach,@xcite the two - dimensional center - of - mass ( c.m . ) momentum and coordinate @xmath34 and @xmath35 , and the relative - electron momenta and coordinates @xmath36 and @xmath37 are introduced to write the hamiltonian @xmath15 into the sum of a single - particle c.m . part @xmath38 and a many - particle relative - electron part @xmath39 : @xmath40 , with @xmath41.\end{aligned}\ ] ] in this , @xmath42 is the canonical momentum of the center - of - mass and @xmath43 is the canonical momentum for the @xmath25th relative electron . here we have also introduced c.m . spin operators @xmath44 and @xmath45 . the commutation relations between the c.m . spin operators @xmath46 and @xmath47 and the spin operators @xmath48 , @xmath49 and @xmath50 of the @xmath25th electron are of order of @xmath51 : @xmath52= n^{-1}2\,{\rm i}\,\varepsi lon_{\beta_1\beta_2\beta_3}\sigma_j^{\beta_3}$ ] with @xmath53 . therefore , for a macroscopic large @xmath33 system , the c.m . part @xmath38 actually commutes with the relative - electron part @xmath54 in the hamiltonian , i.e. the c.m . motion and the relative motion of electrons are truly separated from each other . the couplings between the two emerge only through the electron impurity and electron phonon interactions . furthermore , the electric field @xmath55 shows up only in @xmath38 . and , in view of @xmath56={\rm i}\delta_{\alpha \beta}(\delta_{ij}-1/n)\simeq { \rm i}\delta_{\alpha\beta}\delta_{ij}$ ] , i.e. the relative - electron momenta and coordinates can be treated as canonical conjugate variables , the relative - motion part @xmath54 is just the hamiltonian of @xmath33 electrons in the surface state of ti in the magnetic field without the presence of the electric field . in terms of the c.m . coordinate @xmath57 and the relative electron density operator @xmath58 , the electron impurity and electron phonon interactions can be written as@xcite @xmath59 here @xmath60 and @xmath61 are respectively the impurity potential ( an impurity at randomly distributed position @xmath62 ) and electron phonon coupling matrix element in the plane - wave representation , and @xmath63 with @xmath64 and @xmath65 being the creation and annihilation operators for a phonon of wavevector @xmath66 in branch @xmath67 having frequency @xmath68 . velocity ( operator ) @xmath69 is the time variation of its coordinate : @xmath70= v_{\rm f}(\sigma_{\rm c}^y\ , \hat{i}-\sigma_{\rm c}^x\ , \hat{j})$ ] . to derive a force - balance equation for steady state transport we consider the heisenberg equation for the rate of change of the c.m . canonical momentum @xmath71 : @xmath72= - n e({\bm v}\times { \bm b})- n e{\bm e}+{\bm { f}}_{\rm i}+{\bm { f}}_{\rm p},\ ] ] in which the frictional forces @xmath73 and @xmath74 share the same expressions as given in ref .. the statistical average of the operator equation can be determined to linear order in the electron impurity and electron phonon interactions @xmath17 and @xmath18 with the initial density matrix @xmath75 at temperature @xmath76 when the in - plane electric field @xmath77 is not strong . for steady - transport states we have @xmath78 , leading to a force - balance equation of the form @xmath79 here @xmath80 , the statistically averaged velocity of the moving center - of - mass , is identified as the average rate of change of its position , i.e. the drift velocity of the electron system driven by the electric field @xmath77 , and @xmath81 and @xmath82 are frictional forces experienced by the center - of - mass due to impurity and phonon scatterings : @xmath83,\label{fp}\end{aligned}\ ] ] in which @xmath84 is the bose distribution function , @xmath85 , and @xmath86 stands for the imaginary part of the fourier spectrum of the relative - electron density correlation function defined by @xmath87\big\rangle_{0},\ ] ] where @xmath88 and @xmath89 denotes the statistical averaging over the initial density matrix @xmath90.@xcite the force - balance equation describes the steady - state two - dimensional magnetotransport in the surface state of a ti . note that the frictional forces @xmath81 and @xmath82 are in the opposite direction of the drift velocity @xmath91 and their magnitudes are functions of @xmath92 only . with the drift velocity @xmath93 in the @xmath9 direction , the force - balance equation eq . yields a transverse resistivity @xmath94 , and a longitudinal resistivity @xmath95 . the linear one is in the form @xmath96 for calculating the electron density correlation function @xmath97 we proceed in the landau representation.@xcite the landau levels of the single - particle hamiltonian @xmath98 of the relative - electron system in the absence of electric field are composed of a positive `` @xmath99 '' and a negative `` @xmath100 '' branch@xcite @xmath101 with @xmath102 and @xmath103 , and a zero ( @xmath104 ) level @xmath105 the corresponding landau wave functions are @xmath106 and @xmath107 for @xmath108 ; and @xmath109 for @xmath104 . here @xmath110 is the wavevector of the system along @xmath9 direction ; @xmath111 with @xmath112 ; and @xmath113 is the harmonic oscillator eigenfunction with @xmath114 being the hermite polynomial , @xmath115 , and @xmath116 . each landau level contains @xmath117 electron states for system of unit surface area . the positive branch @xmath118 and the @xmath104 level @xmath119 of the above energy spectra are indeed quite close to those of the surface states in the bulk gap of bi@xmath0se@xmath1-family materials derived from microscopic band calculation.@xcite the landau levels are broadened due to impurity , phonon and electron - electron scatterings . we model the imaginary part of the retarded green s function , or the density - of - states , of the broadened landau level @xmath120 ( written for `` + ' ' -branch and @xmath104 levels ) , using a gaussian - type form:@xcite @xmath121,\ ] ] with a half - width @xmath122 of the form:@xcite @xmath123^{1/2}$ ] . here @xmath124 is the single - particle lifetime and @xmath125 is the cyclotron frequency of linear - energy - dispersion system with @xmath126 being the zero - temperature fermi level . using a semi - empirical parameter @xmath127 to relate @xmath124 with the transport scattering time @xmath128 , and expressing @xmath129 with the zero - field mobility @xmath5 at finite temperature,@xcite we can write the landau - level broadening as @xmath130^{1/2}.\ ] ] in the present study we consider the case of @xmath120-doping , i.e. the fermi level is high enough above the energy zero of the dirac cone in the range of `` + ' ' -branch levels and the states of `` @xmath100''-branch levels are completely filled , that they are irrelevant to electron transport . special attention has to be paid to the @xmath104 level , since , depending on the direction of exchange potential the effective g - factor of a ti surface state , @xmath30 , can be positive , zero or negative.@xcite the sign and magnitude of the effective g - factor determines how many states of the zero level should be included in or excluded from the available states for electron occupation in the case of @xmath120-doping at a magnetic field . ( i ) if @xmath131 , the @xmath104 level center is exactly at @xmath132 and the system is electron - hole symmetric . the total number of negative energy states ( including the states of the lower half of the @xmath104 level and states of the @xmath100"-branch levels ) and that of positive energy states ( including the states of the upper half of the @xmath104 level and states of the @xmath99"-branch levels ) do not change when changing magnetic field . therefore , the lower - half negative energy states of this level are always filled and the upper - half positive - energy states of it are available for the occupation of particles which are counted as electrons participating in transport in the case of @xmath120-doping . ( ii ) for a finite positive @xmath133 , the @xmath104 level @xmath134 moves downward to negative energy and its distance to the nearest @xmath100"-branch level is @xmath135 closer than to the nearest + " -branch level at finite magnetic field strength @xmath2 . this is equivalent to the opening of an increasingly enlarged ( with increasing @xmath2 ) energy gap between the + " -branch states and the states of the zero - level and the @xmath100"-branch levels . the opening of a sufficient energy gap implies that with increasing magnetic field the states in the + " -branch levels would no longer shrink into the zero - level , and thus the @xmath104 level should be completely excluded from the conduction band , i.e. only particles occupying the + " -branch states are counted as electrons participating in transport in the case of @xmath120-doping , when the magnetic field @xmath2 gets larger than a certain value ( depending on the magnitude of @xmath30 ) . ( iii ) for a finite negative @xmath136 , the @xmath104 level @xmath134 moves upward to positive energy and an increasingly enlarged energy gap will be opened between the states of the zero - level and the + " -branch and the states of @xmath100"-branch levels , and particles occupying the @xmath104 level and + " -branch states are electrons participating in transport when the magnetic field @xmath2 gets larger than a certain value . as a result , the experimentally accessible sheet density @xmath33 of electrons participating in transport is related to the fermi energy @xmath137 by the following equation valid at finite @xmath30 for the magnetic field @xmath2 larger than a certain value : @xmath138 in which @xmath139 + 1\}^{-1}$ ] is the fermi distribution function at temperature @xmath76 and the summation index @xmath120 goes over @xmath140 for @xmath133 , or @xmath141 for @xmath136 . in the case of @xmath131 , @xmath142\ ] ] valid for arbitrary magnetic field , in which @xmath143 . the imaginary part of relative - electron density correlation function in the presence of a magnetic field , @xmath86 , can be expressed in the landau representation as@xcite @xmath144 in which the transform factor @xmath145 ^ 2,\end{aligned}\ ] ] with @xmath146 , @xmath147 , @xmath148 , and @xmath149 being associated laguerre polynomials . the landau - representation correlation function @xmath150 in eq.([piqw ] ) can be constructed with the imaginary part of the retarded green s function @xmath151 , or the density - of - states , of the @xmath120th landau level as@xcite @xmath152\nonumber\\ & \hspace{1.2cm}\times{\rm im}g_n(\epsilon+\omega){\rm im}g_{n'}(\epsilon).\end{aligned}\ ] ] the summation indices @xmath120 and @xmath153 in eq.([piqw ] ) are taken over @xmath140 for @xmath133 , or @xmath154 for @xmath136 . in the case of @xmath131 , eq.([piqw ] ) still works and the summation indices @xmath120 and @xmath153 go over @xmath154 but with @xmath155 replaced by @xmath156 in eq.([p2nn ] ) . numerical calculations are performed for the magnetoresistivity @xmath157 of surface state in a uniform ti bi@xmath0se@xmath1 . at zero temperature the elastic scattering contributing to the resistivity is modeled by a coulomb potential due to charged impurities:@xcite @xmath158 with @xmath159 being the impurity density , which is determined by the zero - magnetic - field mobility @xmath5 . at temperatures higher than @xmath160,@xcite phonon scatterings play increasingly important role and the dominant inelastic contribution comes from optical phonons . for this polar material , the scattering by optical phonons via the deformation potential can be neglected . hence , we take account of inelastic scattering from optical phonons via frhlich coupling : @xmath161 . in the numerical calculation we use the following parameters:@xcite fermi velocity @xmath162 , static dielectric constant @xmath163 , optical dielectric constant @xmath164 , and phonon energy @xmath165 . the broadening parameter is taken to be @xmath166 . as a function of the magnetic field @xmath2 having different effective g - factors : @xmath167 and @xmath168 for a ti surface system with electron sheet density @xmath169 in the cases of zero - magnetic - field mobility @xmath170 ( a ) and @xmath171 ( b ) . several integer - number positions of filling factor @xmath172 are marked in ( b).,scaledwidth=40.0% ] fig.[diffg ] shows the calculated magnetoresistivity @xmath157 versus the magnetic field strength @xmath2 for a ti surface system with electron sheet density @xmath169 but having different effective g - factors : @xmath167 and @xmath168 for two values of zero - magnetic - field mobility @xmath170 and @xmath171 , representing different degree of landau - level broadening . in the case without zeeman splitting ( @xmath131 ) the resistivity @xmath157 exhibits almost no change with changing magnetic field up to 10 t , except the shubnikov - de haas ( sdh ) oscillation showing up in the case of @xmath171 . this kind of magnetoresistance behavior was indeed seen experimentally in the electron - hole symmetrical massless system of single - layer graphene.@xcite in the case of a positive g - factor , @xmath173 , the magnetoresistivity increases linearly with increasing magnetic field ; while for a negative g - factor , @xmath174 , the magnetoresistivity decreases linearly with increasing magnetic field . is shown as a function of the magnetic field @xmath2 for different values of zero - magnetic - field mobility : ( a ) @xmath175 , ( b ) @xmath176 , ( c ) @xmath177 , ( d ) @xmath178 , ( e ) @xmath179 , and ( f ) @xmath180 . the inset of ( a ) illustrates the same for a larger magnetic - field range @xmath181 . the filling factor @xmath182 is plotted versus the magnetic field in ( f ) ; and several integer - number positions of @xmath182 are also marked in ( d ) and ( e ) . here the surface electron density @xmath169 and the lattice temperature @xmath183.,scaledwidth=47.0% ] in the following we will give more detailed examination on the linearly increasing magnetoresistance in the positive @xmath30 case . fig.[rhob ] shows the calculated resistivity @xmath157 versus the magnetic field strength @xmath2 at lattice temperature @xmath183 for system of carrier sheet density @xmath169 and @xmath173 , having different zero - field mobility @xmath184 and @xmath180 . all resistivity curves for mobility @xmath185 exhibit clear linearity in the magnetic - field range and appear no tendency of saturation at the highest field shown in the figure . especially , for the case @xmath170 , the linear behavior extends even up to the magnetic field of @xmath186 , as illustrated in the inset of fig.[rhob](a ) . this feature contradicts the classical mr which saturates at sufficiently large magnetic field @xmath187 . note that here we only present the calculated @xmath157 for magnetic field @xmath2 larger than @xmath188 t , for which a sufficient energy gap @xmath135 is assumed to open that with further increase of the magnetic field the states in the `` + ' ' -branch levels no longer shrink into the zero level and thus it should be excluded from the conduction band . this is of course not true for very weak magnetic field . when @xmath189 the energy gap @xmath190 , the situation becomes similar to the case of @xmath131 : the whole upper half of the zero - level states are available to electron occupation and we should have a flat resistivity @xmath157 when changing magnetic field . with increasing @xmath2 the portion of the zero - level states available to conduction electrons decreases until the magnetic field reaches @xmath191 . as a result the resistivity @xmath157 should exhibit a crossover from a flat changing at small @xmath2 to positively linear increasing at @xmath192 . this is just the behavior observed in the ti bi@xmath0se@xmath1.@xcite note that in the case of @xmath170 , the broadened landau - level widths are always larger than the neighboring level interval : @xmath193 , which requires @xmath194 ^ 2 $ ] , even for the lowest landau level @xmath195 , i.e. the whole landau - level spectrum is smeared . with increasing the zero - field mobility the magnitude of resistivity @xmath157 decreases , and when the broadened landau - level width becomes smaller than the neighboring level interval , @xmath196 , a weak sdh oscillation begin to occur around the linearly - dependent average value of @xmath157 at higher portion of the magnetic field range , as seen in fig.[rhob](c ) , ( d ) and ( e ) for @xmath197 and @xmath198 . on the other hand , in the case of large mobility , e.g. @xmath199 , where the broadened landau - level widths @xmath200 are much smaller than the neighboring level interval even for level index @xmath120 as large as @xmath201 , the magnetoresistivity shows pronounced sdh oscillation and the linear - dependent behavior disappears , before the appearance of quantum hall effect,@xcite as shown in fig.[rhob](f ) . abrikosov s model for the lmr requires the applied magnetic field large enough to reach the quantum limit at which all the carriers are within the lowest landau level,@xcite while it is obvious that more than one landau levels are occupied in the experimental samples in the field range in which the linear and non - saturating magnetoresistivity was observed.@xcite for the given electron surface density @xmath202 , the number of occupied landau levels , or the filling factor @xmath172 , at different magnetic fields is shown in fig.[rhob](f ) , as well as in the fig.[rhob](d ) and ( e ) , where the integer - number positions of @xmath203 , i.e. filling up to entire @xmath182 landau levels , coincide with the minima of the density - of - states or the dips of sdh oscillation . this is in contrast with @xmath131 case , where the integer number of @xmath203 , which implies a filling up to the center position of the @xmath182th landau levels , locates at a peak of sdh oscillation , as shown in fig.[diffg]b . the observed sdh oscillations in the bi@xmath0se@xmath1 nanoribbon exhibiting nonsaturating surface lmr in the experiment@xcite favor the former case : a finite positive effective @xmath133 . is plotted as a function of the surface electron density @xmath33 at magnetic field @xmath204 : ( a ) at different values of zero - field mobility @xmath5 , and ( b ) at different values of zero - field conductivity @xmath205.,scaledwidth=40.0% ] at various lattice temperatures . here the zero - magnetic - field mobility at zero temperature is @xmath206.,scaledwidth=35.0% ] next , we examine the density - dependence of the linear magnetoresistivity . to compare with abrikosov s quantum magnetoresistance which suggests a @xmath207 behavior,@xcite we show the calculated @xmath208 for above lmr versus the carrier sheet density @xmath33 in fig.[rhon ] at fixed magnetic field @xmath209 t . the mobility is taken respectively to be @xmath210 and @xmath211m@xmath212/vs to make the resistivity in the lmr regime . a clearly linear dependence of @xmath213 on the surface density @xmath33 is seen in all cases , indicating that this non - saturating linear resistivity is almost inversely proportional to the carrier density . in the figure we also show @xmath208 versus @xmath33 under the condition of different given conductivity @xmath214 and @xmath215 . in this case the half - width @xmath216 is independent of surface density . the linear dependence still holds , indicating that this linear behavior is not sensitive to the modest @xmath33-dependence of landau level broadening @xmath216 as long as the system is in the overlapped landau level regime . from the above discussion , it is obvious that lmr shows up in the system having overlapped landau levels and the separation of landau levels makes the mr departure from the linear increase . at high temperature , the thermal energy would smear the level separation and phonon scatterings further broaden landau levels . hence , it is believed that this lmr will be robust against raising temperature . this is indeed the case as seen in fig.[rhot ] , where we plot the calculated magnetoresistivity @xmath157 for the above system with zero - temperature linear mobility @xmath217m@xmath212/vs versus the magnetic field at different lattice temperatures . we can see that raising temperature to room temperature has little effect on the linearity of mr . due to the decreased mobility at higher temperature from phonon scattering , the weak sdh oscillation on the linear background tends to vanish . these features are in good agreement with the experimental report.@xcite in summary , we have studied the two - dimensional magnetotransport in the flat surface of a three - dimensional ti , which arises from the surface states with a wavevector - linear energy dispersion and a finite , positive zeeman splitting within the bulk energy gap . when the level broadening is comparable to or larger than the landau - level separation and the conduction electrons spread over many landau levels , a positive , dominantly linear and non - saturating magnetoresistance appears within a quite wide range of magnetic field and persists up to room temperature . this remarkable lmr provides a possible mechanism for the recently observed linear magnetoresistance in topological insulator bi@xmath0se@xmath1 nanoribbons.@xcite in contrast to quantum hall effect which appears in the case of well formed landau levels and to abrikosov s quantum magnetotransport,@xcite which is limited to the extreme quantum limit that all electrons coalesce into the lowest landau level , the discussed lmr is a phenomena of pure classical two - dimensional magnetotransport in a system having linear - energy - dispersion , appearing in the regime of overlapped landau levels , irrespective of its showing up in relatively high magnetic field range . furthermore , the present scheme deals with spatially uniform case without invoking the mobility fluctuation in a strongly inhomogeneous system , which is required in the classical parish and littlewood model to produce a lmr.@xcite the appearance of this significant positive - increasing linear magnetoresistance depends on the existence of a positive and sizable effective g - factor . if the zeeman energy splitting is quite small the resistivity @xmath157 would exhibit little change with changing magnetic field . in the case of a negative and sizable effective g - factor the magnetoresistivity would decrease linearly with increasing magnetic field . therefore , the behavior of the longitudinal resistivity versus magnetic field may provide a useful way for judging the direction and the size of the effective zeeman energy splitting in ti surface states . this work was supported by the national science foundation of china ( grant no . 11104002 ) , the national basic research program of china ( grant no . 2012cb927403 ) and by the program for science&technology innovation talents in universities of henan province ( grant no . 2012hastit029 ) ."""
inputs = tokenizer(
[ARTICLE_LEP, ARTICLE_MAGNET],
max_length=1024,
padding="max_length",
truncation=True,
return_tensors="pt",
)
inputs = {k: inputs[k].to(torch_device) for k in inputs}
hypotheses_batch = model.generate(**inputs)
EXPECTED_LEP = (
"we study the rare decays @xmath0 ( @xmath1 ) at the gigaz option of the international linear collider "
"( ilc ).<n> we calculate the branching ratios of @xmath2 in the two higgs doublet model ( 2hdm ), the "
"minimal supersymmetric standard model ( mssm ), the next - to - minimal supersymmetric standard model "
"( nmssm ) and the nearly minimal supersymmetric standard model ( nmssm ).<n> we find that the branching "
"ratios of @xmath3 can reach @xmath4 in 2hdm, @xmath5 in mssm, @xmath6 in nmssm and @xmath7 in nmssm, "
"while they are much smaller than @xmath8 in 2hdm, @xmath9 in mssm, @xmath10 in nmssm and @xmath11 in "
"nmssm."
)
EXPECTED_MAGNET = (
"we investigate the two - dimensional magnetotransport in the surface state of a topological insulator "
"( ti ).<n> we find that a positive, nonsaturating and dominantly linear magnetoresistance can appear "
"within quite wide magnetic - field range in the ti surface state having a positive and finite effective g "
"- factor.<n> this linear magnetoresistance shows up in the system of high carrier concentration and low "
"mobility when electrons are in extended states and spread over many smeared landau levels, and persists "
"up to room temperature, providing a possible mechanism for the recently observed linear magnetoresistance "
"in topological insulator bi@xmath0se@xmath1 nanoribbons."
)
generated = tokenizer.batch_decode(
hypotheses_batch.tolist(), clean_up_tokenization_spaces=True, skip_special_tokens=True
)
self.assertTrue(generated == [EXPECTED_LEP, EXPECTED_MAGNET])
class BigBirdPegasusStandaloneDecoderModelTester:
def __init__(
self,
parent,
vocab_size=99,
batch_size=7,
d_model=32,
decoder_seq_length=7,
is_training=True,
is_decoder=True,
use_attention_mask=True,
use_cache=False,
use_labels=True,
decoder_start_token_id=2,
decoder_ffn_dim=32,
decoder_layers=2,
encoder_attention_heads=4,
decoder_attention_heads=4,
max_position_embeddings=30,
is_encoder_decoder=False,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
scope=None,
attention_type="original_full",
use_bias=True,
block_size=16,
num_random_blocks=3,
):
self.parent = parent
self.batch_size = batch_size
self.decoder_seq_length = decoder_seq_length
# For common tests
self.seq_length = self.decoder_seq_length
self.is_training = is_training
self.use_attention_mask = use_attention_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.d_model = d_model
self.hidden_size = d_model
self.num_hidden_layers = decoder_layers
self.decoder_layers = decoder_layers
self.decoder_ffn_dim = decoder_ffn_dim
self.encoder_attention_heads = encoder_attention_heads
self.decoder_attention_heads = decoder_attention_heads
self.num_attention_heads = decoder_attention_heads
self.eos_token_id = eos_token_id
self.bos_token_id = bos_token_id
self.pad_token_id = pad_token_id
self.decoder_start_token_id = decoder_start_token_id
self.use_cache = use_cache
self.max_position_embeddings = max_position_embeddings
self.is_encoder_decoder = is_encoder_decoder
self.scope = None
self.decoder_key_length = decoder_seq_length
self.base_model_out_len = 2
self.decoder_attention_idx = 1
self.attention_type = attention_type
self.use_bias = use_bias
self.block_size = block_size
self.num_random_blocks = num_random_blocks
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
attention_mask = None
if self.use_attention_mask:
attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2)
lm_labels = None
if self.use_labels:
lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
config = BigBirdPegasusConfig(
vocab_size=self.vocab_size,
d_model=self.d_model,
decoder_layers=self.decoder_layers,
decoder_ffn_dim=self.decoder_ffn_dim,
encoder_attention_heads=self.encoder_attention_heads,
decoder_attention_heads=self.decoder_attention_heads,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
use_cache=self.use_cache,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.decoder_start_token_id,
max_position_embeddings=self.max_position_embeddings,
is_encoder_decoder=self.is_encoder_decoder,
attention_type=self.attention_type,
use_bias=self.use_bias,
block_size=self.block_size,
num_random_blocks=self.num_random_blocks,
)
return (
config,
input_ids,
attention_mask,
lm_labels,
)
def create_and_check_decoder_model_past(
self,
config,
input_ids,
attention_mask,
lm_labels,
):
config.use_cache = True
model = BigBirdPegasusDecoder(config=config).to(torch_device).eval()
# first forward pass
outputs = model(input_ids, use_cache=True)
outputs_use_cache_conf = model(input_ids)
outputs_no_past = model(input_ids, use_cache=False)
self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))
self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)
past_key_values = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
output_from_no_past = model(next_input_ids)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)
def create_and_check_decoder_model_attention_mask_past(
self,
config,
input_ids,
attention_mask,
lm_labels,
):
model = BigBirdPegasusDecoder(config=config).to(torch_device).eval()
# create attention mask
attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
half_seq_length = input_ids.shape[-1] // 2
attn_mask[:, half_seq_length:] = 0
# first forward pass
past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# change a random masked slice from input_ids
random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1
random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1)
input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens
# append to next input_ids and attn_mask
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
attn_mask = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)],
dim=1,
)
# get two different outputs
output_from_no_past = model(next_input_ids)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
# big bird has extremely high logits which requires
# such a high error tolerance here
assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=5e-1)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, attention_mask, lm_labels = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class BigBirdPegasusStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (BigBirdPegasusDecoder, BigBirdPegasusForCausalLM) if is_torch_available() else ()
all_generative_model_classes = (BigBirdPegasusForCausalLM,) if is_torch_available() else ()
test_pruning = False
is_encoder_decoder = False
def setUp(
self,
):
self.model_tester = BigBirdPegasusStandaloneDecoderModelTester(self, is_training=False)
self.config_tester = ConfigTester(self, config_class=BigBirdPegasusConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_decoder_model_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*config_and_inputs)
def test_decoder_model_attn_mask_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs)
def test_retain_grad_hidden_states_attentions(self):
# decoder cannot keep gradients
return
@unittest.skip("The model doesn't support left padding") # and it's not used enough to be worth fixing :)
def test_left_padding_compatibility(self):
pass
| 0 |
hf_public_repos/transformers/tests/models | hf_public_repos/transformers/tests/models/biogpt/test_modeling_biogpt.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch BioGPT model. """
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class BioGptModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=False,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def get_config(self):
return BioGptConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
)
def create_and_check_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = BioGptModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_causal_lm(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
model = BioGptForCausalLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_biogpt_model_attention_mask_past(
self, config, input_ids, input_mask, head_mask, token_type_ids, *args
):
model = BioGptModel(config=config)
model.to(torch_device)
model.eval()
# create attention mask
attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
half_seq_length = self.seq_length // 2
attn_mask[:, half_seq_length:] = 0
# first forward pass
output, past = model(input_ids, attention_mask=attn_mask).to_tuple()
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# change a random masked slice from input_ids
random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1
random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1)
input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens
# append to next input_ids and attn_mask
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
attn_mask = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)],
dim=1,
)
# get two different outputs
output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past, attention_mask=attn_mask)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_biogpt_model_past_large_inputs(
self, config, input_ids, input_mask, head_mask, token_type_ids, *args
):
model = BioGptModel(config=config).to(torch_device).eval()
attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = ids_tensor((self.batch_size, 3), 2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[
"last_hidden_state"
]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_forward_and_backwards(
self, config, input_ids, input_mask, head_mask, token_type_ids, *args, gradient_checkpointing=False
):
model = BioGptForCausalLM(config)
model.to(torch_device)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
result = model(input_ids, labels=input_ids)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def create_and_check_biogpt_weight_initialization(self, config, *args):
model = BioGptModel(config)
model_std = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers)
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std), 0.001)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0), 0.01)
def create_and_check_biogpt_for_token_classification(
self, config, input_ids, input_mask, head_mask, token_type_ids, *args
):
config.num_labels = self.num_labels
model = BioGptForTokenClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class BioGptModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
all_generative_model_classes = (BioGptForCausalLM,) if is_torch_available() else ()
pipeline_model_mapping = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
test_pruning = False
def setUp(self):
self.model_tester = BioGptModelTester(self)
self.config_tester = ConfigTester(self, config_class=BioGptConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_model_various_embeddings(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
config_and_inputs[0].position_embedding_type = type
self.model_tester.create_and_check_model(*config_and_inputs)
def test_biogpt_model_att_mask_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*config_and_inputs)
def test_biogpt_gradient_checkpointing(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs, gradient_checkpointing=True)
def test_biogpt_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*config_and_inputs)
def test_biogpt_weight_initialization(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*config_and_inputs)
def test_biogpt_token_classification_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*config_and_inputs)
@slow
def test_batch_generation(self):
model = BioGptForCausalLM.from_pretrained("microsoft/biogpt")
model.to(torch_device)
tokenizer = BioGptTokenizer.from_pretrained("microsoft/biogpt")
tokenizer.padding_side = "left"
# Define PAD Token = EOS Token = 50256
tokenizer.pad_token = tokenizer.eos_token
model.config.pad_token_id = model.config.eos_token_id
# use different length sentences to test batching
sentences = [
"Hello, my dog is a little",
"Today, I",
]
inputs = tokenizer(sentences, return_tensors="pt", padding=True)
input_ids = inputs["input_ids"].to(torch_device)
outputs = model.generate(
input_ids=input_ids,
attention_mask=inputs["attention_mask"].to(torch_device),
)
inputs_non_padded = tokenizer(sentences[0], return_tensors="pt").input_ids.to(torch_device)
output_non_padded = model.generate(input_ids=inputs_non_padded)
num_paddings = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item()
inputs_padded = tokenizer(sentences[1], return_tensors="pt").input_ids.to(torch_device)
output_padded = model.generate(input_ids=inputs_padded, max_length=model.config.max_length - num_paddings)
batch_out_sentence = tokenizer.batch_decode(outputs, skip_special_tokens=True)
non_padded_sentence = tokenizer.decode(output_non_padded[0], skip_special_tokens=True)
padded_sentence = tokenizer.decode(output_padded[0], skip_special_tokens=True)
expected_output_sentence = [
"Hello, my dog is a little bit bigger than a little bit.",
"Today, I have a good idea of how to use the information",
]
self.assertListEqual(expected_output_sentence, batch_out_sentence)
self.assertListEqual(expected_output_sentence, [non_padded_sentence, padded_sentence])
@slow
def test_model_from_pretrained(self):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = BioGptModel.from_pretrained(model_name)
self.assertIsNotNone(model)
# Copied from tests.models.opt.test_modeling_opt.OPTModelTest.test_opt_sequence_classification_model with OPT->BioGpt,opt->biogpt,prepare_config_and_inputs->prepare_config_and_inputs_for_common
def test_biogpt_sequence_classification_model(self):
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.num_labels = 3
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size)
model = BioGptForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels)
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels))
# Copied from tests.models.opt.test_modeling_opt.OPTModelTest.test_opt_sequence_classification_model_for_multi_label with OPT->BioGpt,opt->biogpt,prepare_config_and_inputs->prepare_config_and_inputs_for_common
def test_biogpt_sequence_classification_model_for_multi_label(self):
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.num_labels = 3
config.problem_type = "multi_label_classification"
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
sequence_labels = ids_tensor(
[self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size
).to(torch.float)
model = BioGptForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels)
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels))
@require_torch
class BioGptModelIntegrationTest(unittest.TestCase):
@slow
def test_inference_lm_head_model(self):
model = BioGptForCausalLM.from_pretrained("microsoft/biogpt")
input_ids = torch.tensor([[2, 4805, 9, 656, 21]])
output = model(input_ids)[0]
vocab_size = 42384
expected_shape = torch.Size((1, 5, vocab_size))
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]]
)
self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
@slow
def test_biogpt_generation(self):
tokenizer = BioGptTokenizer.from_pretrained("microsoft/biogpt")
model = BioGptForCausalLM.from_pretrained("microsoft/biogpt")
model.to(torch_device)
torch.manual_seed(0)
tokenized = tokenizer("COVID-19 is", return_tensors="pt").to(torch_device)
output_ids = model.generate(
**tokenized,
min_length=100,
max_length=1024,
num_beams=5,
early_stopping=True,
)
output_str = tokenizer.decode(output_ids[0], skip_special_tokens=True)
EXPECTED_OUTPUT_STR = (
"COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"
" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"
" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"
" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"
" more than 800,000 deaths."
)
self.assertEqual(output_str, EXPECTED_OUTPUT_STR)
| 0 |
hf_public_repos/transformers/tests/models | hf_public_repos/transformers/tests/models/biogpt/test_tokenization_biogpt.py | # coding=utf-8
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class BioGptTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = BioGptTokenizer
test_rust_tokenizer = False
def setUp(self):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
vocab = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(self.merges_file, "w") as fp:
fp.write("\n".join(merges))
def get_input_output_texts(self, tokenizer):
input_text = "lower newer"
output_text = "lower newer"
return input_text, output_text
def test_full_tokenizer(self):
"""Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt"""
tokenizer = BioGptTokenizer(self.vocab_file, self.merges_file)
text = "lower"
bpe_tokens = ["low", "er</w>"]
tokens = tokenizer.tokenize(text)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = tokens + ["<unk>"]
input_bpe_tokens = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
@slow
def test_sequence_builders(self):
tokenizer = BioGptTokenizer.from_pretrained("microsoft/biogpt")
text = tokenizer.encode("sequence builders", add_special_tokens=False)
text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
self.assertTrue(encoded_sentence == [2] + text)
self.assertTrue(encoded_pair == [2] + text + [2] + text_2)
| 0 |
hf_public_repos/transformers/tests/models | hf_public_repos/transformers/tests/models/visual_bert/test_modeling_visual_bert.py | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch VisualBERT model. """
import copy
import unittest
from transformers import VisualBertConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForRegionToPhraseAlignment,
VisualBertForVisualReasoning,
VisualBertModel,
)
from transformers.models.visual_bert.modeling_visual_bert import VISUAL_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
class VisualBertModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
visual_seq_length=5,
is_training=True,
use_attention_mask=True,
use_visual_attention_mask=True,
use_token_type_ids=True,
use_visual_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
visual_embedding_dim=20,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.visual_seq_length = visual_seq_length
self.is_training = is_training
self.use_attention_mask = use_attention_mask
self.use_visual_attention_mask = use_visual_attention_mask
self.use_token_type_ids = use_token_type_ids
self.use_visual_token_type_ids = use_visual_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.visual_embedding_dim = visual_embedding_dim
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
def get_config(self):
return VisualBertConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
visual_embedding_dim=self.visual_embedding_dim,
num_labels=self.num_labels,
is_decoder=False,
initializer_range=self.initializer_range,
)
def prepare_config_and_inputs_for_common(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
visual_embeds = floats_tensor([self.batch_size, self.visual_seq_length, self.visual_embedding_dim])
attention_mask = None
if self.use_attention_mask:
attention_mask = torch.ones((self.batch_size, self.seq_length), dtype=torch.long, device=torch_device)
visual_attention_mask = None
if self.use_visual_attention_mask:
visual_attention_mask = torch.ones(
(self.batch_size, self.visual_seq_length), dtype=torch.long, device=torch_device
)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
visual_token_type_ids = None
if self.use_visual_token_type_ids:
visual_token_type_ids = ids_tensor([self.batch_size, self.visual_seq_length], self.type_vocab_size)
config = self.get_config()
return config, {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
"visual_embeds": visual_embeds,
"visual_token_type_ids": visual_token_type_ids,
"visual_attention_mask": visual_attention_mask,
}
def prepare_config_and_inputs_for_pretraining(self):
masked_lm_labels = None
sentence_image_labels = None
if self.use_labels:
masked_lm_labels = ids_tensor([self.batch_size, self.seq_length + self.visual_seq_length], self.vocab_size)
sentence_image_labels = ids_tensor(
[self.batch_size],
self.type_sequence_label_size,
)
config, input_dict = self.prepare_config_and_inputs_for_common()
input_dict.update({"labels": masked_lm_labels, "sentence_image_labels": sentence_image_labels})
return config, input_dict
def prepare_config_and_inputs_for_multiple_choice(self):
input_ids = ids_tensor([self.batch_size, self.num_choices, self.seq_length], self.vocab_size)
visual_embeds = floats_tensor(
[self.batch_size, self.num_choices, self.visual_seq_length, self.visual_embedding_dim]
)
attention_mask = None
if self.use_attention_mask:
attention_mask = torch.ones(
(self.batch_size, self.num_choices, self.seq_length), dtype=torch.long, device=torch_device
)
visual_attention_mask = None
if self.use_visual_attention_mask:
visual_attention_mask = torch.ones(
(self.batch_size, self.num_choices, self.visual_seq_length), dtype=torch.long, device=torch_device
)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.num_choices, self.seq_length], self.type_vocab_size)
visual_token_type_ids = None
if self.use_visual_token_type_ids:
visual_token_type_ids = ids_tensor(
[self.batch_size, self.num_choices, self.visual_seq_length], self.type_vocab_size
)
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
"visual_embeds": visual_embeds,
"visual_token_type_ids": visual_token_type_ids,
"visual_attention_mask": visual_attention_mask,
"labels": labels,
}
def prepare_config_and_inputs_for_vqa(self):
vqa_labels = None
if self.use_labels:
vqa_labels = floats_tensor([self.batch_size, self.num_labels])
config, input_dict = self.prepare_config_and_inputs_for_common()
input_dict.update({"labels": vqa_labels})
return config, input_dict
def prepare_config_and_inputs_for_nlvr(self):
nlvr_labels = None
if self.use_labels:
nlvr_labels = ids_tensor([self.batch_size], self.num_labels)
config, input_dict = self.prepare_config_and_inputs_for_common()
input_dict.update({"labels": nlvr_labels})
return config, input_dict
def prepare_config_and_inputs_for_flickr(self):
region_to_phrase_position = torch.cat(
(
ids_tensor([self.batch_size, self.seq_length], self.visual_seq_length),
torch.ones(self.batch_size, self.visual_seq_length, dtype=torch.long, device=torch_device) * -1,
),
dim=-1,
)
flickr_labels = None
if self.use_labels:
flickr_labels = floats_tensor(
[self.batch_size, self.seq_length + self.visual_seq_length, self.visual_seq_length]
)
config, input_dict = self.prepare_config_and_inputs_for_common()
input_dict.update({"region_to_phrase_position": region_to_phrase_position, "labels": flickr_labels})
return config, input_dict
def create_and_check_model(self, config, input_dict):
model = VisualBertModel(config=config)
model.to(torch_device)
model.eval()
result = model(**input_dict)
self.parent.assertEqual(
result.last_hidden_state.shape,
(self.batch_size, self.seq_length + self.visual_seq_length, self.hidden_size),
)
def create_and_check_for_pretraining(self, config, input_dict):
model = VisualBertForPreTraining(config=config)
model.to(torch_device)
model.eval()
result = model(**input_dict)
self.parent.assertEqual(
result.prediction_logits.shape,
(self.batch_size, self.seq_length + self.visual_seq_length, self.vocab_size),
)
def create_and_check_for_vqa(self, config, input_dict):
model = VisualBertForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(**input_dict)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_for_multiple_choice(self, config, input_dict):
model = VisualBertForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
result = model(**input_dict)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def create_and_check_for_nlvr(self, config, input_dict):
model = VisualBertForVisualReasoning(config=config)
model.to(torch_device)
model.eval()
result = model(**input_dict)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_for_flickr(self, config, input_dict):
model = VisualBertForRegionToPhraseAlignment(config=config)
model.to(torch_device)
model.eval()
result = model(**input_dict)
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.seq_length + self.visual_seq_length, self.visual_seq_length)
)
@require_torch
class VisualBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
VisualBertModel,
VisualBertForMultipleChoice,
VisualBertForVisualReasoning,
VisualBertForRegionToPhraseAlignment,
VisualBertForQuestionAnswering,
VisualBertForPreTraining,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = {"feature-extraction": VisualBertModel} if is_torch_available() else {}
test_torchscript = False
test_pruning = False
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = copy.deepcopy(inputs_dict)
if model_class == VisualBertForMultipleChoice:
for key in inputs_dict.keys():
value = inputs_dict[key]
if isinstance(value, torch.Tensor) and value.ndim > 1:
if key != "visual_embeds":
inputs_dict[key] = (
inputs_dict[key].unsqueeze(1).expand(-1, self.model_tester.num_choices, -1).contiguous()
)
else:
inputs_dict[key] = (
inputs_dict[key]
.unsqueeze(1)
.expand(-1, self.model_tester.num_choices, -1, self.model_tester.visual_embedding_dim)
.contiguous()
)
elif model_class == VisualBertForRegionToPhraseAlignment:
total_length = self.model_tester.seq_length + self.model_tester.visual_seq_length
batch_size = self.model_tester.batch_size
inputs_dict["region_to_phrase_position"] = torch.zeros(
(batch_size, total_length),
dtype=torch.long,
device=torch_device,
)
if return_labels:
if model_class == VisualBertForMultipleChoice:
inputs_dict["labels"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
elif model_class == VisualBertForPreTraining:
total_length = self.model_tester.seq_length + self.model_tester.visual_seq_length
batch_size = self.model_tester.batch_size
inputs_dict["labels"] = torch.zeros(
(batch_size, total_length),
dtype=torch.long,
device=torch_device,
)
inputs_dict["sentence_image_labels"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
# Flickr expects float labels
elif model_class == VisualBertForRegionToPhraseAlignment:
batch_size = self.model_tester.batch_size
total_length = self.model_tester.seq_length + self.model_tester.visual_seq_length
inputs_dict["labels"] = torch.ones(
(
batch_size,
total_length,
self.model_tester.visual_seq_length,
),
dtype=torch.float,
device=torch_device,
)
# VQA expects float labels
elif model_class == VisualBertForQuestionAnswering:
inputs_dict["labels"] = torch.ones(
(self.model_tester.batch_size, self.model_tester.num_labels),
dtype=torch.float,
device=torch_device,
)
elif model_class == VisualBertForVisualReasoning:
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size), dtype=torch.long, device=torch_device
)
return inputs_dict
def setUp(self):
self.model_tester = VisualBertModelTester(self)
self.config_tester = ConfigTester(self, config_class=VisualBertConfig, hidden_size=37)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
seq_len = getattr(self.model_tester, "seq_length", None)
visual_seq_len = getattr(self.model_tester, "visual_seq_length", None)
encoder_seq_length = (seq_len if seq_len is not None else 0) + (
visual_seq_len if visual_seq_len is not None else 0
)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
chunk_length = getattr(self.model_tester, "chunk_length", None)
if chunk_length is not None and hasattr(self.model_tester, "num_hashes"):
encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]),
[self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length],
)
else:
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if hasattr(self.model_tester, "num_hidden_states_types"):
added_hidden_states = self.model_tester.num_hidden_states_types
elif self.is_encoder_decoder:
added_hidden_states = 2
else:
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
if chunk_length is not None:
self.assertListEqual(
list(self_attentions[0].shape[-4:]),
[self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length],
)
else:
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
if hasattr(self.model_tester, "encoder_seq_length"):
seq_length = self.model_tester.encoder_seq_length
if hasattr(self.model_tester, "chunk_length") and self.model_tester.chunk_length > 1:
seq_length = seq_length * self.model_tester.chunk_length
else:
seq_length = self.model_tester.seq_length + self.model_tester.visual_seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_model_various_embeddings(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
for type in ["absolute", "relative_key", "relative_key_query"]:
config_and_inputs[0].position_embedding_type = type
self.model_tester.create_and_check_model(*config_and_inputs)
def test_model_for_pretraining(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_pretraining()
self.model_tester.create_and_check_for_pretraining(*config_and_inputs)
def test_model_for_vqa(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_vqa()
self.model_tester.create_and_check_for_vqa(*config_and_inputs)
def test_model_for_nlvr(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_nlvr()
self.model_tester.create_and_check_for_nlvr(*config_and_inputs)
def test_model_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_multiple_choice()
self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)
def test_model_for_flickr(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_flickr()
self.model_tester.create_and_check_for_flickr(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in VISUAL_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = VisualBertModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@unittest.skip(
reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@require_torch
class VisualBertModelIntegrationTest(unittest.TestCase):
@slow
def test_inference_vqa_coco_pre(self):
model = VisualBertForPreTraining.from_pretrained("uclanlp/visualbert-vqa-coco-pre")
input_ids = torch.tensor([1, 2, 3, 4, 5, 6], dtype=torch.long).reshape(1, -1)
token_type_ids = torch.tensor([0, 0, 0, 1, 1, 1], dtype=torch.long).reshape(1, -1)
visual_embeds = torch.ones(size=(1, 10, 2048), dtype=torch.float32) * 0.5
visual_token_type_ids = torch.ones(size=(1, 10), dtype=torch.long)
attention_mask = torch.tensor([1] * 6).reshape(1, -1)
visual_attention_mask = torch.tensor([1] * 10).reshape(1, -1)
with torch.no_grad():
output = model(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
visual_embeds=visual_embeds,
visual_attention_mask=visual_attention_mask,
visual_token_type_ids=visual_token_type_ids,
)
vocab_size = 30522
expected_shape = torch.Size((1, 16, vocab_size))
self.assertEqual(output.prediction_logits.shape, expected_shape)
expected_slice = torch.tensor(
[[[-5.1858, -5.1903, -4.9142], [-6.2214, -5.9238, -5.8381], [-6.3027, -5.9939, -5.9297]]]
)
self.assertTrue(torch.allclose(output.prediction_logits[:, :3, :3], expected_slice, atol=1e-4))
expected_shape_2 = torch.Size((1, 2))
self.assertEqual(output.seq_relationship_logits.shape, expected_shape_2)
expected_slice_2 = torch.tensor([[0.7393, 0.1754]])
self.assertTrue(torch.allclose(output.seq_relationship_logits, expected_slice_2, atol=1e-4))
@slow
def test_inference_vqa(self):
model = VisualBertForQuestionAnswering.from_pretrained("uclanlp/visualbert-vqa")
input_ids = torch.tensor([1, 2, 3, 4, 5, 6], dtype=torch.long).reshape(1, -1)
token_type_ids = torch.tensor([0, 0, 0, 1, 1, 1], dtype=torch.long).reshape(1, -1)
visual_embeds = torch.ones(size=(1, 10, 2048), dtype=torch.float32) * 0.5
visual_token_type_ids = torch.ones(size=(1, 10), dtype=torch.long)
attention_mask = torch.tensor([1] * 6).reshape(1, -1)
visual_attention_mask = torch.tensor([1] * 10).reshape(1, -1)
with torch.no_grad():
output = model(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
visual_embeds=visual_embeds,
visual_attention_mask=visual_attention_mask,
visual_token_type_ids=visual_token_type_ids,
)
# vocab_size = 30522
expected_shape = torch.Size((1, 3129))
self.assertEqual(output.logits.shape, expected_shape)
expected_slice = torch.tensor(
[[-8.9898, 3.0803, -1.8016, 2.4542, -8.3420, -2.0224, -3.3124, -4.4139, -3.1491, -3.8997]]
)
self.assertTrue(torch.allclose(output.logits[:, :10], expected_slice, atol=1e-4))
@slow
def test_inference_nlvr(self):
model = VisualBertForVisualReasoning.from_pretrained("uclanlp/visualbert-nlvr2")
input_ids = torch.tensor([1, 2, 3, 4, 5, 6], dtype=torch.long).reshape(1, -1)
token_type_ids = torch.tensor([0, 0, 0, 1, 1, 1], dtype=torch.long).reshape(1, -1)
visual_embeds = torch.ones(size=(1, 10, 1024), dtype=torch.float32) * 0.5
visual_token_type_ids = torch.ones(size=(1, 10), dtype=torch.long)
attention_mask = torch.tensor([1] * 6).reshape(1, -1)
visual_attention_mask = torch.tensor([1] * 10).reshape(1, -1)
with torch.no_grad():
output = model(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
visual_embeds=visual_embeds,
visual_attention_mask=visual_attention_mask,
visual_token_type_ids=visual_token_type_ids,
)
# vocab_size = 30522
expected_shape = torch.Size((1, 2))
self.assertEqual(output.logits.shape, expected_shape)
expected_slice = torch.tensor([[-1.1436, 0.8900]])
self.assertTrue(torch.allclose(output.logits, expected_slice, atol=1e-4))
@slow
def test_inference_vcr(self):
model = VisualBertForMultipleChoice.from_pretrained("uclanlp/visualbert-vcr")
input_ids = torch.tensor([[[1, 2, 3, 4, 5, 6] for i in range(4)]], dtype=torch.long)
attention_mask = torch.ones_like(input_ids)
token_type_ids = torch.ones_like(input_ids)
visual_embeds = torch.ones(size=(1, 4, 10, 512), dtype=torch.float32) * 0.5
visual_token_type_ids = torch.ones(size=(1, 4, 10), dtype=torch.long)
visual_attention_mask = torch.ones_like(visual_token_type_ids)
with torch.no_grad():
output = model(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
visual_embeds=visual_embeds,
visual_attention_mask=visual_attention_mask,
visual_token_type_ids=visual_token_type_ids,
)
# vocab_size = 30522
expected_shape = torch.Size((1, 4))
self.assertEqual(output.logits.shape, expected_shape)
expected_slice = torch.tensor([[-7.7697, -7.7697, -7.7697, -7.7697]])
self.assertTrue(torch.allclose(output.logits, expected_slice, atol=1e-4))
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.