repo_id
stringlengths
15
89
file_path
stringlengths
27
180
content
stringlengths
1
2.23M
__index_level_0__
int64
0
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/tools/test_agent_types.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import unittest import uuid from pathlib import Path from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available if is_torch_available(): import torch if is_soundfile_availble(): import soundfile as sf if is_vision_available(): from PIL import Image def get_new_path(suffix="") -> str: directory = tempfile.mkdtemp() return os.path.join(directory, str(uuid.uuid4()) + suffix) @require_soundfile @require_torch class AgentAudioTests(unittest.TestCase): def test_from_tensor(self): tensor = torch.rand(12, dtype=torch.float64) - 0.5 agent_type = AgentAudio(tensor) path = str(agent_type.to_string()) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(tensor, agent_type.to_raw(), atol=1e-4)) del agent_type # Ensure the path remains even after the object deletion self.assertTrue(os.path.exists(path)) # Ensure that the file contains the same value as the original tensor new_tensor, _ = sf.read(path) self.assertTrue(torch.allclose(tensor, torch.tensor(new_tensor), atol=1e-4)) def test_from_string(self): tensor = torch.rand(12, dtype=torch.float64) - 0.5 path = get_new_path(suffix=".wav") sf.write(path, tensor, 16000) agent_type = AgentAudio(path) self.assertTrue(torch.allclose(tensor, agent_type.to_raw(), atol=1e-4)) self.assertEqual(agent_type.to_string(), path) @require_vision @require_torch class AgentImageTests(unittest.TestCase): def test_from_tensor(self): tensor = torch.randint(0, 256, (64, 64, 3)) agent_type = AgentImage(tensor) path = str(agent_type.to_string()) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(tensor, agent_type._tensor, atol=1e-4)) self.assertIsInstance(agent_type.to_raw(), Image.Image) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(path)) def test_from_string(self): path = Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png" image = Image.open(path) agent_type = AgentImage(path) self.assertTrue(path.samefile(agent_type.to_string())) self.assertTrue(image == agent_type.to_raw()) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(path)) def test_from_image(self): path = Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png" image = Image.open(path) agent_type = AgentImage(image) self.assertFalse(path.samefile(agent_type.to_string())) self.assertTrue(image == agent_type.to_raw()) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(path)) class AgentTextTests(unittest.TestCase): def test_from_string(self): string = "Hey!" agent_type = AgentText(string) self.assertEqual(string, agent_type.to_string()) self.assertEqual(string, agent_type.to_raw()) self.assertEqual(string, agent_type)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/tools/test_text_summarization.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin TEXT = """ Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning. In March 2021, Hugging Face raised $40 million in a Series B funding round.[3] On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5] """ class TextSummarizationToolTester(unittest.TestCase, ToolTesterMixin): def setUp(self): self.tool = load_tool("summarization") self.tool.setup() self.remote_tool = load_tool("summarization", remote=True) def test_exact_match_arg(self): result = self.tool(TEXT) self.assertEqual( result, "Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf. In March 2021, Hugging Face raised $40 million in a Series B funding round. On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model. In 2022, the workshop concluded with the announcement of BLOOM.", ) def test_exact_match_arg_remote(self): result = self.remote_tool(TEXT) self.assertEqual( result, "Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf. In March 2021, Hugging Face raised $40 million in a Series B funding round. On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model. In 2022, the workshop concluded with the announcement of BLOOM.", ) def test_exact_match_kwarg(self): result = self.tool(text=TEXT) self.assertEqual( result, "Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf. In March 2021, Hugging Face raised $40 million in a Series B funding round. On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model. In 2022, the workshop concluded with the announcement of BLOOM.", ) def test_exact_match_kwarg_remote(self): result = self.remote_tool(text=TEXT) self.assertEqual( result, "Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf. In March 2021, Hugging Face raised $40 million in a Series B funding round. On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model. In 2022, the workshop concluded with the announcement of BLOOM.", )
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/utils/test_activations.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class TestActivations(unittest.TestCase): def test_gelu_versions(self): x = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100]) torch_builtin = get_activation("gelu") self.assertTrue(torch.allclose(gelu_python(x), torch_builtin(x))) self.assertFalse(torch.allclose(gelu_python(x), gelu_new(x))) def test_gelu_10(self): x = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100]) torch_builtin = get_activation("gelu") gelu10 = get_activation("gelu_10") y_gelu = torch_builtin(x) y_gelu_10 = gelu10(x) clipped_mask = torch.where(y_gelu_10 < 10.0, 1, 0) self.assertTrue(torch.max(y_gelu_10).item() == 10.0) self.assertTrue(torch.allclose(y_gelu * clipped_mask, y_gelu_10 * clipped_mask)) def test_get_activation(self): get_activation("gelu") get_activation("gelu_10") get_activation("gelu_fast") get_activation("gelu_new") get_activation("gelu_python") get_activation("gelu_pytorch_tanh") get_activation("linear") get_activation("mish") get_activation("quick_gelu") get_activation("relu") get_activation("sigmoid") get_activation("silu") get_activation("swish") get_activation("tanh") with self.assertRaises(KeyError): get_activation("bogus") with self.assertRaises(KeyError): get_activation(None) def test_activations_are_distinct_objects(self): act1 = get_activation("gelu") act1.a = 1 act2 = get_activation("gelu") self.assertEqual(act1.a, 1) with self.assertRaises(AttributeError): _ = act2.a
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/utils/test_file_utils.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import importlib import io import unittest import transformers # Try to import everything from transformers to ensure every object can be loaded. from transformers import * # noqa F406 from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available if is_torch_available(): from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification if is_tf_available(): from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification if is_flax_available(): from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification MODEL_ID = DUMMY_UNKNOWN_IDENTIFIER # An actual model hosted on huggingface.co REVISION_ID_DEFAULT = "main" # Default branch name REVISION_ID_ONE_SPECIFIC_COMMIT = "f2c752cfc5c0ab6f4bdec59acea69eefbee381c2" # One particular commit (not the top of `main`) REVISION_ID_INVALID = "aaaaaaa" # This commit does not exist, so we should 404. PINNED_SHA1 = "d9e9f15bc825e4b2c9249e9578f884bbcb5e3684" # Sha-1 of config.json on the top of `main`, for checking purposes PINNED_SHA256 = "4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3" # Sha-256 of pytorch_model.bin on the top of `main`, for checking purposes # Dummy contexts to test `ContextManagers` @contextlib.contextmanager def context_en(): print("Welcome!") yield print("Bye!") @contextlib.contextmanager def context_fr(): print("Bonjour!") yield print("Au revoir!") class TestImportMechanisms(unittest.TestCase): def test_module_spec_available(self): # If the spec is missing, importlib would not be able to import the module dynamically. assert transformers.__spec__ is not None assert importlib.util.find_spec("transformers") is not None class GenericUtilTests(unittest.TestCase): @unittest.mock.patch("sys.stdout", new_callable=io.StringIO) def test_context_managers_no_context(self, mock_stdout): with ContextManagers([]): print("Transformers are awesome!") # The print statement adds a new line at the end of the output self.assertEqual(mock_stdout.getvalue(), "Transformers are awesome!\n") @unittest.mock.patch("sys.stdout", new_callable=io.StringIO) def test_context_managers_one_context(self, mock_stdout): with ContextManagers([context_en()]): print("Transformers are awesome!") # The output should be wrapped with an English welcome and goodbye self.assertEqual(mock_stdout.getvalue(), "Welcome!\nTransformers are awesome!\nBye!\n") @unittest.mock.patch("sys.stdout", new_callable=io.StringIO) def test_context_managers_two_context(self, mock_stdout): with ContextManagers([context_fr(), context_en()]): print("Transformers are awesome!") # The output should be wrapped with an English and French welcome and goodbye self.assertEqual(mock_stdout.getvalue(), "Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n") @require_torch def test_find_labels_pt(self): self.assertEqual(find_labels(BertForSequenceClassification), ["labels"]) self.assertEqual(find_labels(BertForPreTraining), ["labels", "next_sentence_label"]) self.assertEqual(find_labels(BertForQuestionAnswering), ["start_positions", "end_positions"]) # find_labels works regardless of the class name (it detects the framework through inheritance) class DummyModel(BertForSequenceClassification): pass self.assertEqual(find_labels(DummyModel), ["labels"]) @require_tf def test_find_labels_tf(self): self.assertEqual(find_labels(TFBertForSequenceClassification), ["labels"]) self.assertEqual(find_labels(TFBertForPreTraining), ["labels", "next_sentence_label"]) self.assertEqual(find_labels(TFBertForQuestionAnswering), ["start_positions", "end_positions"]) # find_labels works regardless of the class name (it detects the framework through inheritance) class DummyModel(TFBertForSequenceClassification): pass self.assertEqual(find_labels(DummyModel), ["labels"]) @require_flax def test_find_labels_flax(self): # Flax models don't have labels self.assertEqual(find_labels(FlaxBertForSequenceClassification), []) self.assertEqual(find_labels(FlaxBertForPreTraining), []) self.assertEqual(find_labels(FlaxBertForQuestionAnswering), []) # find_labels works regardless of the class name (it detects the framework through inheritance) class DummyModel(FlaxBertForSequenceClassification): pass self.assertEqual(find_labels(DummyModel), [])
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/utils/test_activations_tf.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers.activations_tf import get_tf_activation @require_tf class TestTFActivations(unittest.TestCase): def test_gelu_10(self): x = tf.constant([-100, -1.0, -0.1, 0, 0.1, 1.0, 100.0]) gelu = get_tf_activation("gelu") gelu10 = get_tf_activation("gelu_10") y_gelu = gelu(x) y_gelu_10 = gelu10(x) clipped_mask = tf.where(y_gelu_10 < 10.0, 1.0, 0.0) self.assertEqual(tf.math.reduce_max(y_gelu_10).numpy().item(), 10.0) self.assertTrue(np.allclose(y_gelu * clipped_mask, y_gelu_10 * clipped_mask)) def test_get_activation(self): get_tf_activation("gelu") get_tf_activation("gelu_10") get_tf_activation("gelu_fast") get_tf_activation("gelu_new") get_tf_activation("glu") get_tf_activation("mish") get_tf_activation("quick_gelu") get_tf_activation("relu") get_tf_activation("sigmoid") get_tf_activation("silu") get_tf_activation("swish") get_tf_activation("tanh") with self.assertRaises(KeyError): get_tf_activation("bogus") with self.assertRaises(KeyError): get_tf_activation(None)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/utils/test_offline.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import subprocess import sys from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class OfflineTests(TestCasePlus): @require_torch def test_offline_mode(self): # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched load = """ from transformers import BertConfig, BertModel, BertTokenizer, pipeline """ run = """ mname = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) BertTokenizer.from_pretrained(mname) pipe = pipeline(task="fill-mask", model=mname) print("success") """ mock = """ import socket def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn't access internet") socket.socket = offline_socket """ # Force fetching the files so that we can use the cache mname = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) BertTokenizer.from_pretrained(mname) pipeline(task="fill-mask", model=mname) # baseline - just load from_pretrained with normal network cmd = [sys.executable, "-c", "\n".join([load, run, mock])] # should succeed env = self.get_env() # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files env["TRANSFORMERS_OFFLINE"] = "1" result = subprocess.run(cmd, env=env, check=False, capture_output=True) self.assertEqual(result.returncode, 0, result.stderr) self.assertIn("success", result.stdout.decode()) @require_torch def test_offline_mode_no_internet(self): # python one-liner segments # this must be loaded before socket.socket is monkey-patched load = """ from transformers import BertConfig, BertModel, BertTokenizer, pipeline """ run = """ mname = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) BertTokenizer.from_pretrained(mname) pipe = pipeline(task="fill-mask", model=mname) print("success") """ mock = """ import socket def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet") socket.socket = offline_socket """ # Force fetching the files so that we can use the cache mname = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) BertTokenizer.from_pretrained(mname) pipeline(task="fill-mask", model=mname) # baseline - just load from_pretrained with normal network cmd = [sys.executable, "-c", "\n".join([load, run, mock])] # should succeed env = self.get_env() result = subprocess.run(cmd, env=env, check=False, capture_output=True) self.assertEqual(result.returncode, 0, result.stderr) self.assertIn("success", result.stdout.decode()) @require_torch def test_offline_mode_sharded_checkpoint(self): # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched load = """ from transformers import BertConfig, BertModel, BertTokenizer """ run = """ mname = "hf-internal-testing/tiny-random-bert-sharded" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) print("success") """ mock = """ import socket def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled") socket.socket = offline_socket """ # baseline - just load from_pretrained with normal network cmd = [sys.executable, "-c", "\n".join([load, run])] # should succeed env = self.get_env() result = subprocess.run(cmd, env=env, check=False, capture_output=True) self.assertEqual(result.returncode, 0, result.stderr) self.assertIn("success", result.stdout.decode()) # next emulate no network cmd = [sys.executable, "-c", "\n".join([load, mock, run])] # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # env["TRANSFORMERS_OFFLINE"] = "0" # result = subprocess.run(cmd, env=env, check=False, capture_output=True) # self.assertEqual(result.returncode, 1, result.stderr) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files env["TRANSFORMERS_OFFLINE"] = "1" result = subprocess.run(cmd, env=env, check=False, capture_output=True) self.assertEqual(result.returncode, 0, result.stderr) self.assertIn("success", result.stdout.decode()) @require_torch def test_offline_mode_pipeline_exception(self): load = """ from transformers import pipeline """ run = """ mname = "hf-internal-testing/tiny-random-bert" pipe = pipeline(model=mname) """ mock = """ import socket def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled") socket.socket = offline_socket """ env = self.get_env() env["TRANSFORMERS_OFFLINE"] = "1" cmd = [sys.executable, "-c", "\n".join([load, mock, run])] result = subprocess.run(cmd, env=env, check=False, capture_output=True) self.assertEqual(result.returncode, 1, result.stderr) self.assertIn( "You cannot infer task automatically within `pipeline` when using offline mode", result.stderr.decode().replace("\n", ""), ) @require_torch def test_offline_model_dynamic_model(self): load = """ from transformers import AutoModel """ run = """ mname = "hf-internal-testing/test_dynamic_model" AutoModel.from_pretrained(mname, trust_remote_code=True) print("success") """ # baseline - just load from_pretrained with normal network cmd = [sys.executable, "-c", "\n".join([load, run])] # should succeed env = self.get_env() result = subprocess.run(cmd, env=env, check=False, capture_output=True) self.assertEqual(result.returncode, 0, result.stderr) self.assertIn("success", result.stdout.decode()) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files env["TRANSFORMERS_OFFLINE"] = "1" result = subprocess.run(cmd, env=env, check=False, capture_output=True) self.assertEqual(result.returncode, 0, result.stderr) self.assertIn("success", result.stdout.decode())
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/utils/test_backbone_utils.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class BackboneUtilsTester(unittest.TestCase): def test_get_aligned_output_features_output_indices(self): stage_names = ["a", "b", "c"] # Defaults to last layer if both are None out_features, out_indices = get_aligned_output_features_output_indices(None, None, stage_names) self.assertEqual(out_features, ["c"]) self.assertEqual(out_indices, [2]) # Out indices set to match out features out_features, out_indices = get_aligned_output_features_output_indices(["a", "c"], None, stage_names) self.assertEqual(out_features, ["a", "c"]) self.assertEqual(out_indices, [0, 2]) # Out features set to match out indices out_features, out_indices = get_aligned_output_features_output_indices(None, [0, 2], stage_names) self.assertEqual(out_features, ["a", "c"]) self.assertEqual(out_indices, [0, 2]) # Out features selected from negative indices out_features, out_indices = get_aligned_output_features_output_indices(None, [-3, -1], stage_names) self.assertEqual(out_features, ["a", "c"]) self.assertEqual(out_indices, [-3, -1]) def test_verify_out_features_out_indices(self): # Stage names must be set with self.assertRaises(ValueError): verify_out_features_out_indices(["a", "b"], (0, 1), None) # Out features must be a list with self.assertRaises(ValueError): verify_out_features_out_indices(("a", "b"), (0, 1), ["a", "b"]) # Out features must be a subset of stage names with self.assertRaises(ValueError): verify_out_features_out_indices(["a", "b"], (0, 1), ["a"]) # Out indices must be a list or tuple with self.assertRaises(ValueError): verify_out_features_out_indices(None, 0, ["a", "b"]) # Out indices must be a subset of stage names with self.assertRaises(ValueError): verify_out_features_out_indices(None, (0, 1), ["a"]) # Out features and out indices must be the same length with self.assertRaises(ValueError): verify_out_features_out_indices(["a", "b"], (0,), ["a", "b", "c"]) # Out features should match out indices with self.assertRaises(ValueError): verify_out_features_out_indices(["a", "b"], (0, 2), ["a", "b", "c"]) # Out features and out indices should be in order with self.assertRaises(ValueError): verify_out_features_out_indices(["b", "a"], (0, 1), ["a", "b"]) # Check passes with valid inputs verify_out_features_out_indices(["a", "b", "d"], (0, 1, -1), ["a", "b", "c", "d"]) def test_backbone_mixin(self): backbone = BackboneMixin() backbone.stage_names = ["a", "b", "c"] backbone._out_features = ["a", "c"] backbone._out_indices = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features, ["a", "c"]) self.assertEqual(backbone.out_indices, [0, 2]) # Check out features and indices are updated correctly backbone.out_features = ["a", "b"] self.assertEqual(backbone.out_features, ["a", "b"]) self.assertEqual(backbone.out_indices, [0, 1]) backbone.out_indices = [-3, -1] self.assertEqual(backbone.out_features, ["a", "c"]) self.assertEqual(backbone.out_indices, [-3, -1])
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/utils/test_model_output.py
# coding=utf-8 # Copyright 2020 The Hugging Face Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from dataclasses import dataclass from typing import Optional from transformers.testing_utils import require_torch from transformers.utils import ModelOutput @dataclass class ModelOutputTest(ModelOutput): a: float b: Optional[float] = None c: Optional[float] = None class ModelOutputTester(unittest.TestCase): def test_get_attributes(self): x = ModelOutputTest(a=30) self.assertEqual(x.a, 30) self.assertIsNone(x.b) self.assertIsNone(x.c) with self.assertRaises(AttributeError): _ = x.d def test_index_with_ints_and_slices(self): x = ModelOutputTest(a=30, b=10) self.assertEqual(x[0], 30) self.assertEqual(x[1], 10) self.assertEqual(x[:2], (30, 10)) self.assertEqual(x[:], (30, 10)) x = ModelOutputTest(a=30, c=10) self.assertEqual(x[0], 30) self.assertEqual(x[1], 10) self.assertEqual(x[:2], (30, 10)) self.assertEqual(x[:], (30, 10)) def test_index_with_strings(self): x = ModelOutputTest(a=30, b=10) self.assertEqual(x["a"], 30) self.assertEqual(x["b"], 10) with self.assertRaises(KeyError): _ = x["c"] x = ModelOutputTest(a=30, c=10) self.assertEqual(x["a"], 30) self.assertEqual(x["c"], 10) with self.assertRaises(KeyError): _ = x["b"] def test_dict_like_properties(self): x = ModelOutputTest(a=30) self.assertEqual(list(x.keys()), ["a"]) self.assertEqual(list(x.values()), [30]) self.assertEqual(list(x.items()), [("a", 30)]) self.assertEqual(list(x), ["a"]) x = ModelOutputTest(a=30, b=10) self.assertEqual(list(x.keys()), ["a", "b"]) self.assertEqual(list(x.values()), [30, 10]) self.assertEqual(list(x.items()), [("a", 30), ("b", 10)]) self.assertEqual(list(x), ["a", "b"]) x = ModelOutputTest(a=30, c=10) self.assertEqual(list(x.keys()), ["a", "c"]) self.assertEqual(list(x.values()), [30, 10]) self.assertEqual(list(x.items()), [("a", 30), ("c", 10)]) self.assertEqual(list(x), ["a", "c"]) with self.assertRaises(Exception): x = x.update({"d": 20}) with self.assertRaises(Exception): del x["a"] with self.assertRaises(Exception): _ = x.pop("a") with self.assertRaises(Exception): _ = x.setdefault("d", 32) def test_set_attributes(self): x = ModelOutputTest(a=30) x.a = 10 self.assertEqual(x.a, 10) self.assertEqual(x["a"], 10) def test_set_keys(self): x = ModelOutputTest(a=30) x["a"] = 10 self.assertEqual(x.a, 10) self.assertEqual(x["a"], 10) def test_instantiate_from_dict(self): x = ModelOutputTest({"a": 30, "b": 10}) self.assertEqual(list(x.keys()), ["a", "b"]) self.assertEqual(x.a, 30) self.assertEqual(x.b, 10) def test_instantiate_from_iterator(self): x = ModelOutputTest([("a", 30), ("b", 10)]) self.assertEqual(list(x.keys()), ["a", "b"]) self.assertEqual(x.a, 30) self.assertEqual(x.b, 10) with self.assertRaises(ValueError): _ = ModelOutputTest([("a", 30), (10, 10)]) x = ModelOutputTest(a=(30, 30)) self.assertEqual(list(x.keys()), ["a"]) self.assertEqual(x.a, (30, 30)) @require_torch def test_torch_pytree(self): # ensure torch.utils._pytree treats ModelOutput subclasses as nodes (and not leaves) # this is important for DistributedDataParallel gradient synchronization with static_graph=True import torch.utils._pytree as pytree x = ModelOutput({"a": 1.0, "c": 2.0}) self.assertFalse(pytree._is_leaf(x)) x = ModelOutputTest(a=1.0, c=2.0) self.assertFalse(pytree._is_leaf(x)) expected_flat_outs = [1.0, 2.0] expected_tree_spec = pytree.TreeSpec( ModelOutputTest, (ModelOutputTest, ["a", "c"]), [pytree.LeafSpec(), pytree.LeafSpec()] ) actual_flat_outs, actual_tree_spec = pytree.tree_flatten(x) self.assertEqual(expected_flat_outs, actual_flat_outs) self.assertEqual(expected_tree_spec, actual_tree_spec) unflattened_x = pytree.tree_unflatten(actual_flat_outs, actual_tree_spec) self.assertEqual(x, unflattened_x) class ModelOutputTestNoDataclass(ModelOutput): """Invalid test subclass of ModelOutput where @dataclass decorator is not used""" a: float b: Optional[float] = None c: Optional[float] = None class ModelOutputSubclassTester(unittest.TestCase): def test_direct_model_output(self): # Check that direct usage of ModelOutput instantiates without errors ModelOutput({"a": 1.1}) def test_subclass_no_dataclass(self): # Check that a subclass of ModelOutput without @dataclass is invalid # A valid subclass is inherently tested other unit tests above. with self.assertRaises(TypeError): ModelOutputTestNoDataclass(a=1.1, b=2.2, c=3.3)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/utils/tiny_model_summary.json
{ "ASTForAudioClassification": { "tokenizer_classes": [], "processor_classes": [ "ASTFeatureExtractor" ], "model_classes": [ "ASTForAudioClassification" ], "sha": "83d6e076db7768a3645401bad3204624985e1d08" }, "ASTModel": { "tokenizer_classes": [], "processor_classes": [ "ASTFeatureExtractor" ], "model_classes": [ "ASTModel" ], "sha": "75e68f956f6f2c0709b01e596e7a6aecb1b29dce" }, "AlbertForMaskedLM": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "AlbertForMaskedLM", "TFAlbertForMaskedLM" ], "sha": "d29de71ac29e1019c3a7762f7357f750730cb037" }, "AlbertForMultipleChoice": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "AlbertForMultipleChoice", "TFAlbertForMultipleChoice" ], "sha": "242aecce6a589a2964c0f695621fa22a83751579" }, "AlbertForPreTraining": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "AlbertForPreTraining", "TFAlbertForPreTraining" ], "sha": "41330be4b271687f4d88ddc96346c12aa11de983" }, "AlbertForQuestionAnswering": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "AlbertForQuestionAnswering", "TFAlbertForQuestionAnswering" ], "sha": "040b81c15f437f4722349dc5b41fccd17ebd7fdc" }, "AlbertForSequenceClassification": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "AlbertForSequenceClassification", "TFAlbertForSequenceClassification" ], "sha": "39c1a0e2c1c2623106d3211d751e9b32f23a91a0" }, "AlbertForTokenClassification": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "AlbertForTokenClassification", "TFAlbertForTokenClassification" ], "sha": "359c3f4a311a4053a6f6d6a880db5f82c8e3ff1f" }, "AlbertModel": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "AlbertModel", "TFAlbertModel" ], "sha": "34a63314686b64aaeb595ddb95006f1ff2ffda17" }, "AlignModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "EfficientNetImageProcessor" ], "model_classes": [ "AlignModel" ], "sha": "68a4f9d3f493f44efa7c1dde6fcca23350e2c92b" }, "AltCLIPModel": { "tokenizer_classes": [ "XLMRobertaTokenizerFast" ], "processor_classes": [ "CLIPImageProcessor" ], "model_classes": [ "AltCLIPModel" ], "sha": "3106af0fd503970717c05f27218e5cacf19ba872" }, "BarkModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BarkModel" ], "sha": "187e590fd87359cea47693e8cb11a604cd7b673c" }, "BartForCausalLM": { "tokenizer_classes": [ "BartTokenizer", "BartTokenizerFast" ], "processor_classes": [], "model_classes": [ "BartForCausalLM" ], "sha": "c25526ac67d2dbe79fe5462af4b7908ca2fbc3ff" }, "BartForConditionalGeneration": { "tokenizer_classes": [ "BartTokenizer", "BartTokenizerFast" ], "processor_classes": [], "model_classes": [ "BartForConditionalGeneration", "TFBartForConditionalGeneration" ], "sha": "3a489a21e4b04705f4a6047924b7616a67be7e37" }, "BartForQuestionAnswering": { "tokenizer_classes": [ "BartTokenizer", "BartTokenizerFast" ], "processor_classes": [], "model_classes": [ "BartForQuestionAnswering" ], "sha": "3ebf9aab39a57ceab55128d5fc6f61e4db0dadd4" }, "BartForSequenceClassification": { "tokenizer_classes": [ "BartTokenizer", "BartTokenizerFast" ], "processor_classes": [], "model_classes": [ "BartForSequenceClassification", "TFBartForSequenceClassification" ], "sha": "ea452fd9a928cfebd71723afa50feb20326917bc" }, "BartModel": { "tokenizer_classes": [ "BartTokenizer", "BartTokenizerFast" ], "processor_classes": [], "model_classes": [ "BartModel", "TFBartModel" ], "sha": "e5df6d1aa75f03833b2df328b9c35463f73a421b" }, "BeitForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "BeitImageProcessor" ], "model_classes": [ "BeitForImageClassification" ], "sha": "e997587bb890f82faad4bd25eb23d85ba21ecaaa" }, "BeitForSemanticSegmentation": { "tokenizer_classes": [], "processor_classes": [ "BeitImageProcessor" ], "model_classes": [ "BeitForSemanticSegmentation" ], "sha": "d4afa9e21e3fe5b087578ed68974d9b3ffc1fb22" }, "BeitModel": { "tokenizer_classes": [], "processor_classes": [ "BeitImageProcessor" ], "model_classes": [ "BeitModel" ], "sha": "5c4a051f0cca6f64d02c6168deb88413cae10d2c" }, "BertForMaskedLM": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BertForMaskedLM", "TFBertForMaskedLM" ], "sha": "3e32baa52ce044c75edfb5c28abd51ee8d051282" }, "BertForMultipleChoice": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BertForMultipleChoice", "TFBertForMultipleChoice" ], "sha": "0b8c3a6d411d1e19e5fd98d4d8631ae7616eeeaa" }, "BertForNextSentencePrediction": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BertForNextSentencePrediction", "TFBertForNextSentencePrediction" ], "sha": "628e70debf8864bd0b63aff7901d17d9c4f7612c" }, "BertForPreTraining": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BertForPreTraining", "TFBertForPreTraining" ], "sha": "c748ad37e6a200a6f64b2764191bfe13f976032f" }, "BertForQuestionAnswering": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BertForQuestionAnswering", "TFBertForQuestionAnswering" ], "sha": "4671ad0c21493b97c5eb2f0201192704c29876d5" }, "BertForSequenceClassification": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BertForSequenceClassification", "TFBertForSequenceClassification" ], "sha": "37a9d44022264c12bdf3ec257778f953b63d4aaf" }, "BertForTokenClassification": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BertForTokenClassification", "TFBertForTokenClassification" ], "sha": "d7dc3a0793ff6dfcb794b21130ee0f185d2c61a2" }, "BertLMHeadModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BertLMHeadModel", "TFBertLMHeadModel" ], "sha": "b4e3acc1990f3e365ffddbd54b620a26d9fb4b09" }, "BertModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BertModel", "TFBertModel" ], "sha": "3956d303d3cddf0708ff20660c1ea5f6ec30e434" }, "BigBirdForCausalLM": { "tokenizer_classes": [ "BigBirdTokenizer", "BigBirdTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdForCausalLM" ], "sha": "5c7a487af5248d9c01b45d5481b7d7bb9b36e1b5" }, "BigBirdForMaskedLM": { "tokenizer_classes": [ "BigBirdTokenizer", "BigBirdTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdForMaskedLM" ], "sha": "476ef8225c0f69270b577706ad4f1dda13e4dde5" }, "BigBirdForMultipleChoice": { "tokenizer_classes": [ "BigBirdTokenizer", "BigBirdTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdForMultipleChoice" ], "sha": "cf93eaa1019987112c171a407745bc183a20513a" }, "BigBirdForPreTraining": { "tokenizer_classes": [ "BigBirdTokenizer", "BigBirdTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdForPreTraining" ], "sha": "5fb9efa13334431e7c186a9fa314b89c4a1eee72" }, "BigBirdForQuestionAnswering": { "tokenizer_classes": [ "BigBirdTokenizer", "BigBirdTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdForQuestionAnswering" ], "sha": "f82f88bd71fba819a8ffb0692915d3529e705417" }, "BigBirdForSequenceClassification": { "tokenizer_classes": [ "BigBirdTokenizer", "BigBirdTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdForSequenceClassification" ], "sha": "ea398090858f9af93b54fc9a8d65cfed78ac27ff" }, "BigBirdForTokenClassification": { "tokenizer_classes": [ "BigBirdTokenizer", "BigBirdTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdForTokenClassification" ], "sha": "2cdea118999fa58ba9fb0162d99e2ffa146c3df1" }, "BigBirdModel": { "tokenizer_classes": [ "BigBirdTokenizer", "BigBirdTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdModel" ], "sha": "9c55989f31df156194e6997606fb14d9897e0300" }, "BigBirdPegasusForCausalLM": { "tokenizer_classes": [ "PegasusTokenizer", "PegasusTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdPegasusForCausalLM" ], "sha": "49bc8816c666dee32e27cd8e00136b604eb85243" }, "BigBirdPegasusForConditionalGeneration": { "tokenizer_classes": [ "PegasusTokenizer", "PegasusTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdPegasusForConditionalGeneration" ], "sha": "e791aa6d1af5a76ca0926d95b1f28bd2d8adf376" }, "BigBirdPegasusForQuestionAnswering": { "tokenizer_classes": [ "PegasusTokenizer", "PegasusTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdPegasusForQuestionAnswering" ], "sha": "7650e076713ca707a37062adc8c9c1cd60dad7c7" }, "BigBirdPegasusForSequenceClassification": { "tokenizer_classes": [ "PegasusTokenizer", "PegasusTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdPegasusForSequenceClassification" ], "sha": "02500e8ebd9c53528750013fb963fbdc2be34034" }, "BigBirdPegasusModel": { "tokenizer_classes": [ "PegasusTokenizer", "PegasusTokenizerFast" ], "processor_classes": [], "model_classes": [ "BigBirdPegasusModel" ], "sha": "b07c5304dfba673cf8b9cf5cd1aa45fbfea1c2f3" }, "BioGptForCausalLM": { "tokenizer_classes": [ "BioGptTokenizer" ], "processor_classes": [], "model_classes": [ "BioGptForCausalLM" ], "sha": "07073b31da84054fd12226e3cae4cb3beb2547f9" }, "BioGptForSequenceClassification": { "tokenizer_classes": [ "BioGptTokenizer" ], "processor_classes": [], "model_classes": [ "BioGptForSequenceClassification" ], "sha": "8e18ad6218abd795e050dec324a8c827ccedacb4" }, "BioGptForTokenClassification": { "tokenizer_classes": [ "BioGptTokenizer" ], "processor_classes": [], "model_classes": [ "BioGptForTokenClassification" ], "sha": "67f8173c1a17273064d452a9031a51b67f327b6a" }, "BioGptModel": { "tokenizer_classes": [ "BioGptTokenizer" ], "processor_classes": [], "model_classes": [ "BioGptModel" ], "sha": "fe18551d0743538a990520b75707294ec57b4ebe" }, "BitBackbone": { "tokenizer_classes": [], "processor_classes": [ "BitImageProcessor" ], "model_classes": [ "BitBackbone" ], "sha": "2f06f6b4395b6dce2b00ac839ff757410e743cd7" }, "BitForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "BitImageProcessor" ], "model_classes": [ "BitForImageClassification" ], "sha": "d0d8476f2d285ddda7c42c0d4a8e4bf6f5d2bfdf" }, "BitModel": { "tokenizer_classes": [], "processor_classes": [ "BitImageProcessor" ], "model_classes": [ "BitModel" ], "sha": "30a8a9b1a6b253cc500c01cf41bc1fc9581ea5e5" }, "BlenderbotForCausalLM": { "tokenizer_classes": [ "BlenderbotTokenizer", "BlenderbotTokenizerFast" ], "processor_classes": [], "model_classes": [ "BlenderbotForCausalLM" ], "sha": "8aad2e13e8920bca3cf988ba45f8a7b008b51a81" }, "BlenderbotForConditionalGeneration": { "tokenizer_classes": [ "BlenderbotTokenizer", "BlenderbotTokenizerFast" ], "processor_classes": [], "model_classes": [ "BlenderbotForConditionalGeneration", "TFBlenderbotForConditionalGeneration" ], "sha": "e8532878b9924fa02fb4b059b7f6e7fa372fff91" }, "BlenderbotModel": { "tokenizer_classes": [ "BlenderbotTokenizer", "BlenderbotTokenizerFast" ], "processor_classes": [], "model_classes": [ "BlenderbotModel", "TFBlenderbotModel" ], "sha": "ff848a40c30ca98eb7c6870bbb02677d5af9db55" }, "BlenderbotSmallForCausalLM": { "tokenizer_classes": [ "BlenderbotSmallTokenizer" ], "processor_classes": [], "model_classes": [ "BlenderbotSmallForCausalLM" ], "sha": "4c57c106630932eb9de4d76210a540d04616304d" }, "BlenderbotSmallForConditionalGeneration": { "tokenizer_classes": [ "BlenderbotSmallTokenizer" ], "processor_classes": [], "model_classes": [ "BlenderbotSmallForConditionalGeneration", "TFBlenderbotSmallForConditionalGeneration" ], "sha": "b8db01fcf3e37a5b369cd50e169bf383b8e905d8" }, "BlenderbotSmallModel": { "tokenizer_classes": [ "BlenderbotSmallTokenizer" ], "processor_classes": [], "model_classes": [ "BlenderbotSmallModel", "TFBlenderbotSmallModel" ], "sha": "0a10c70e225ec63278faffa8fabf759f063f0e55" }, "Blip2ForConditionalGeneration": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [ "BlipImageProcessor" ], "model_classes": [ "Blip2ForConditionalGeneration" ], "sha": "35e1ef43da3554af62eb29a7b3dbbef3f3bef48e" }, "Blip2Model": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [ "BlipImageProcessor" ], "model_classes": [ "Blip2Model" ], "sha": "c23378f225be31872fff33c103cf0ebc2454ffcc" }, "BlipForConditionalGeneration": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "BlipImageProcessor" ], "model_classes": [ "BlipForConditionalGeneration", "TFBlipForConditionalGeneration" ], "sha": "eaf32bc0369349deef0c777442fc185119171d1f" }, "BlipModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "BlipImageProcessor" ], "model_classes": [ "BlipModel", "TFBlipModel" ], "sha": "3d1d1c15eff22d6b2664a2d15757fa6f5d93827d" }, "BloomForCausalLM": { "tokenizer_classes": [ "BloomTokenizerFast" ], "processor_classes": [], "model_classes": [ "BloomForCausalLM" ], "sha": "0f4f06f162cd67d34d03ee156484e4001d468500" }, "BloomForQuestionAnswering": { "tokenizer_classes": [ "BloomTokenizerFast" ], "processor_classes": [], "model_classes": [ "BloomForQuestionAnswering" ], "sha": "23f369f163eef8c9c9685900440b0cbb0f3439fd" }, "BloomForSequenceClassification": { "tokenizer_classes": [ "BloomTokenizerFast" ], "processor_classes": [], "model_classes": [ "BloomForSequenceClassification" ], "sha": "b2280eef7172835f39b265eb0c46623257f67bbe" }, "BloomForTokenClassification": { "tokenizer_classes": [ "BloomTokenizerFast" ], "processor_classes": [], "model_classes": [ "BloomForTokenClassification" ], "sha": "9796aa45f99adff987c978089e11c0bd9d7b997f" }, "BloomModel": { "tokenizer_classes": [ "BloomTokenizerFast" ], "processor_classes": [], "model_classes": [ "BloomModel" ], "sha": "28b600fcfdc4f4938406fb518abf895620048cb2" }, "BrosForTokenClassification": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BrosForTokenClassification" ], "sha": "4ec2c91936f96b93667e8946fc7abbdeeb08a6d7" }, "BrosModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "BrosModel" ], "sha": "e2464830b1874eeaf9f4b425fbe0ce8e7c7643e9" }, "CLIPModel": { "tokenizer_classes": [ "CLIPTokenizer", "CLIPTokenizerFast" ], "processor_classes": [ "CLIPImageProcessor" ], "model_classes": [ "CLIPModel", "TFCLIPModel" ], "sha": "0452d344074485d0e7eb5d5c12447b7c9dbc9619" }, "CLIPSegModel": { "tokenizer_classes": [ "CLIPTokenizer", "CLIPTokenizerFast" ], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "CLIPSegModel" ], "sha": "7b1305214ccc85d29b776ffbee06748693852a04" }, "CTRLForSequenceClassification": { "tokenizer_classes": [ "CTRLTokenizer" ], "processor_classes": [], "model_classes": [ "CTRLForSequenceClassification", "TFCTRLForSequenceClassification" ], "sha": "280b5a3502d607c55c9f8d9f198fe9c2802d6f73" }, "CTRLLMHeadModel": { "tokenizer_classes": [ "CTRLTokenizer" ], "processor_classes": [], "model_classes": [ "CTRLLMHeadModel", "TFCTRLLMHeadModel" ], "sha": "662381663b216f1dd3c9cd30e2e83cb4c6fc9552" }, "CTRLModel": { "tokenizer_classes": [ "CTRLTokenizer" ], "processor_classes": [], "model_classes": [ "CTRLModel", "TFCTRLModel" ], "sha": "68b19b4f132d5a191a73acd78d983cbdcf068e9c" }, "CanineForMultipleChoice": { "tokenizer_classes": [ "CanineTokenizer" ], "processor_classes": [], "model_classes": [ "CanineForMultipleChoice" ], "sha": "fa0451453ed202f903ff7dcf6071aab6630fb89f" }, "CanineForQuestionAnswering": { "tokenizer_classes": [ "CanineTokenizer" ], "processor_classes": [], "model_classes": [ "CanineForQuestionAnswering" ], "sha": "5e1012bb086ac2e0b1497eeb7ed14eb2183d4ecb" }, "CanineForSequenceClassification": { "tokenizer_classes": [ "CanineTokenizer" ], "processor_classes": [], "model_classes": [ "CanineForSequenceClassification" ], "sha": "75336dc9179153869c38a8047ce4b1e02677a260" }, "CanineForTokenClassification": { "tokenizer_classes": [ "CanineTokenizer" ], "processor_classes": [], "model_classes": [ "CanineForTokenClassification" ], "sha": "65a622ea8e12597e12f45e59d46d8dbe8461fc10" }, "CanineModel": { "tokenizer_classes": [ "CanineTokenizer" ], "processor_classes": [], "model_classes": [ "CanineModel" ], "sha": "531ef67ad4f0b3dc7a9e5d722c774096b7401b1b" }, "ChineseCLIPModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "ChineseCLIPImageProcessor" ], "model_classes": [ "ChineseCLIPModel" ], "sha": "504271a3c5fd9c2e877f5b4c01848bc18778c7c3" }, "ClapModel": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [ "ClapFeatureExtractor" ], "model_classes": [ "ClapModel" ], "sha": "a7874595b900f9b2ddc79130dafc3ff48f4fbfb9" }, "ClvpModelForConditionalGeneration": { "tokenizer_classes": [ "ClvpTokenizer" ], "processor_classes": [ "ClvpFeatureExtractor" ], "model_classes": [], "sha": "45df7581535be337ff781707b6c20994ca221f05" }, "CodeGenForCausalLM": { "tokenizer_classes": [ "CodeGenTokenizer", "CodeGenTokenizerFast" ], "processor_classes": [], "model_classes": [ "CodeGenForCausalLM" ], "sha": "a3fc69d757fd1f0aa01bcbc4337f586651c7cb10" }, "CodeGenModel": { "tokenizer_classes": [ "CodeGenTokenizer", "CodeGenTokenizerFast" ], "processor_classes": [], "model_classes": [ "CodeGenModel" ], "sha": "dad4941a2b7429fc6e8206fcc4a04fc40f4a0beb" }, "ConditionalDetrForObjectDetection": { "tokenizer_classes": [], "processor_classes": [ "ConditionalDetrImageProcessor" ], "model_classes": [ "ConditionalDetrForObjectDetection" ], "sha": "762c213a0285edc84eb813a2ed90063cf971ca43" }, "ConditionalDetrModel": { "tokenizer_classes": [], "processor_classes": [ "ConditionalDetrImageProcessor" ], "model_classes": [ "ConditionalDetrModel" ], "sha": "18b75874158cac520c63605293b06e0b1327c263" }, "ConvBertForMaskedLM": { "tokenizer_classes": [ "ConvBertTokenizer", "ConvBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ConvBertForMaskedLM", "TFConvBertForMaskedLM" ], "sha": "307c70e32c3d3c18aeb45e0cbdc9fcd2957d9aba" }, "ConvBertForMultipleChoice": { "tokenizer_classes": [ "ConvBertTokenizer", "ConvBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ConvBertForMultipleChoice", "TFConvBertForMultipleChoice" ], "sha": "d6561a21ffdb82d03c1822af0510eb7482ce5026" }, "ConvBertForQuestionAnswering": { "tokenizer_classes": [ "ConvBertTokenizer", "ConvBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ConvBertForQuestionAnswering", "TFConvBertForQuestionAnswering" ], "sha": "8a056da5cc421415c2a24b9f644dd95ca279411d" }, "ConvBertForSequenceClassification": { "tokenizer_classes": [ "ConvBertTokenizer", "ConvBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ConvBertForSequenceClassification", "TFConvBertForSequenceClassification" ], "sha": "8bb8b20e51d282d777cc567cacadd97a35f0811e" }, "ConvBertForTokenClassification": { "tokenizer_classes": [ "ConvBertTokenizer", "ConvBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ConvBertForTokenClassification", "TFConvBertForTokenClassification" ], "sha": "8db0dd3c2b8ccc958fa9a84801f4f837b42fcf2c" }, "ConvBertModel": { "tokenizer_classes": [ "ConvBertTokenizer", "ConvBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ConvBertModel", "TFConvBertModel" ], "sha": "c9c5b1a74f0e468d8467473cabeaa67fcdbaddb7" }, "ConvNextBackbone": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "ConvNextBackbone" ], "sha": "499c7d6a97825b79e19663b70f3b60c4813b6bf2" }, "ConvNextForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "ConvNextForImageClassification", "TFConvNextForImageClassification" ], "sha": "0b490fd6b19cdbf721025dbd6ee45dcc5828e6e3" }, "ConvNextModel": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "ConvNextModel", "TFConvNextModel" ], "sha": "7b3b47a57b9a9120e022b91d6067daeac55b794f" }, "ConvNextV2Backbone": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "ConvNextV2Backbone" ], "sha": "c82fc526949dfd892a1fee3c34be6f8d80c4d3df" }, "ConvNextV2ForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "ConvNextV2ForImageClassification", "TFConvNextV2ForImageClassification" ], "sha": "ee22bae1cbb87d66fc7f62f7e15a43d6ff80d3cc" }, "ConvNextV2Model": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "ConvNextV2Model", "TFConvNextV2Model" ], "sha": "c4dd68ee1102cba05bcc483da2a88e39427b7249" }, "CvtForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "CvtForImageClassification", "TFCvtForImageClassification" ], "sha": "4b1938e252fdb26a06c1f5755e07fa8f6eed2d75" }, "CvtModel": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "CvtModel", "TFCvtModel" ], "sha": "27fed12c174f4f4f1fe27075d1c29602fe0669f0" }, "DPRQuestionEncoder": { "tokenizer_classes": [ "DPRQuestionEncoderTokenizer", "DPRQuestionEncoderTokenizerFast" ], "processor_classes": [], "model_classes": [ "DPRQuestionEncoder", "TFDPRQuestionEncoder" ], "sha": "09ae0269780271e0a4916f7bab1dbc4f8a76070d" }, "DPTForDepthEstimation": { "tokenizer_classes": [], "processor_classes": [ "DPTImageProcessor" ], "model_classes": [ "DPTForDepthEstimation" ], "sha": "11b7735d64d95b6599811631b012d2dec6eaa2c1" }, "DPTForSemanticSegmentation": { "tokenizer_classes": [], "processor_classes": [ "DPTImageProcessor" ], "model_classes": [ "DPTForSemanticSegmentation" ], "sha": "e140c3c716a4bf11dad875e5f5f0abd2bd4cbbcb" }, "DPTModel": { "tokenizer_classes": [], "processor_classes": [ "DPTImageProcessor" ], "model_classes": [ "DPTModel" ], "sha": "1d6ae6c0b60868dffbef0dddeda381c51c6dcba5" }, "Data2VecAudioForAudioFrameClassification": { "tokenizer_classes": [], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Data2VecAudioForAudioFrameClassification" ], "sha": "a64828b27e73fc8dd95aeb315108ca2f6a66b55f" }, "Data2VecAudioForCTC": { "tokenizer_classes": [], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Data2VecAudioForCTC" ], "sha": "bb161b6a181bd2c22cf30222f46fa6ef42225744" }, "Data2VecAudioForSequenceClassification": { "tokenizer_classes": [], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Data2VecAudioForSequenceClassification" ], "sha": "8de17e0a959eca5f72b2ea59a11bc1fa744785d9" }, "Data2VecAudioForXVector": { "tokenizer_classes": [], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Data2VecAudioForXVector" ], "sha": "dcb92484cf28fb4fe1dcf5d6e8d78e04382fdce9" }, "Data2VecAudioModel": { "tokenizer_classes": [], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Data2VecAudioModel" ], "sha": "73f503fdff73b7616154f64dbe38a685cc48e8eb" }, "Data2VecTextForCausalLM": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "Data2VecTextForCausalLM" ], "sha": "1f3658ce623653338cd31516551e8181aa08bb38" }, "Data2VecTextForMaskedLM": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "Data2VecTextForMaskedLM" ], "sha": "fb41ac30d0faa0899bf5afaa0986df8993395ca6" }, "Data2VecTextForMultipleChoice": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "Data2VecTextForMultipleChoice" ], "sha": "e7556d520ad90ebae5ad88554d45a37488d00040" }, "Data2VecTextForQuestionAnswering": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "Data2VecTextForQuestionAnswering" ], "sha": "9630833d76a1fd7e96b904d87bb11b7c00ccd021" }, "Data2VecTextForSequenceClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "Data2VecTextForSequenceClassification" ], "sha": "156e4019c37d9592f193ba80553cd245cbccecb3" }, "Data2VecTextForTokenClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "Data2VecTextForTokenClassification" ], "sha": "55b3a49fdbf22479d6eb939261d4b884ea288270" }, "Data2VecTextModel": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "Data2VecTextModel" ], "sha": "c21be3e4f88e8357bf33bfba8f8e05ae2e735124" }, "Data2VecVisionForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "BeitImageProcessor" ], "model_classes": [ "Data2VecVisionForImageClassification", "TFData2VecVisionForImageClassification" ], "sha": "d640e7ced7a3fbbb8c8661a4f67b934e55406172" }, "Data2VecVisionForSemanticSegmentation": { "tokenizer_classes": [], "processor_classes": [ "BeitImageProcessor" ], "model_classes": [ "Data2VecVisionForSemanticSegmentation", "TFData2VecVisionForSemanticSegmentation" ], "sha": "3eba3cd694fab6530b7e5da8f49d3951301c816a" }, "Data2VecVisionModel": { "tokenizer_classes": [], "processor_classes": [ "BeitImageProcessor" ], "model_classes": [ "Data2VecVisionModel", "TFData2VecVisionModel" ], "sha": "2a7ad25e4359970dc70494a2f3eb98e2a3c9806d" }, "DebertaForMaskedLM": { "tokenizer_classes": [ "DebertaTokenizer", "DebertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaForMaskedLM", "TFDebertaForMaskedLM" ], "sha": "e0f9ada9e0f6d4d7cc39d7cbd58369b0c84de33d" }, "DebertaForQuestionAnswering": { "tokenizer_classes": [ "DebertaTokenizer", "DebertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaForQuestionAnswering", "TFDebertaForQuestionAnswering" ], "sha": "a3eb69cdb0b52f7d0fb730e882f1a54b9a7442ea" }, "DebertaForSequenceClassification": { "tokenizer_classes": [ "DebertaTokenizer", "DebertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaForSequenceClassification", "TFDebertaForSequenceClassification" ], "sha": "32af91d12c4e9b6d62b420bee93311fd77d3c933" }, "DebertaForTokenClassification": { "tokenizer_classes": [ "DebertaTokenizer", "DebertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaForTokenClassification", "TFDebertaForTokenClassification" ], "sha": "ba62ba2726d813e60e512476fc1b178aa3858175" }, "DebertaModel": { "tokenizer_classes": [ "DebertaTokenizer", "DebertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaModel", "TFDebertaModel" ], "sha": "4273294e14cd04c0e2cd1dcff5cf7e5d4fe906ba" }, "DebertaV2ForMaskedLM": { "tokenizer_classes": [ "DebertaV2Tokenizer", "DebertaV2TokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaV2ForMaskedLM", "TFDebertaV2ForMaskedLM" ], "sha": "a053dedc2cdf32918a84277cb0c05186604496a5" }, "DebertaV2ForMultipleChoice": { "tokenizer_classes": [ "DebertaV2Tokenizer", "DebertaV2TokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaV2ForMultipleChoice", "TFDebertaV2ForMultipleChoice" ], "sha": "07e39f520ce239b39ef8cb24cd7874d06c791063" }, "DebertaV2ForQuestionAnswering": { "tokenizer_classes": [ "DebertaV2Tokenizer", "DebertaV2TokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaV2ForQuestionAnswering", "TFDebertaV2ForQuestionAnswering" ], "sha": "9cecb3a7fc6b95099122283644ea1f8ced287d1b" }, "DebertaV2ForSequenceClassification": { "tokenizer_classes": [ "DebertaV2Tokenizer", "DebertaV2TokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaV2ForSequenceClassification", "TFDebertaV2ForSequenceClassification" ], "sha": "df9ea1f5c0f2ccd139b21cfb3963a5a5ebfb5b81" }, "DebertaV2ForTokenClassification": { "tokenizer_classes": [ "DebertaV2Tokenizer", "DebertaV2TokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaV2ForTokenClassification", "TFDebertaV2ForTokenClassification" ], "sha": "51fe01989df38a540ac1abca5ee71a51365defd5" }, "DebertaV2Model": { "tokenizer_classes": [ "DebertaV2Tokenizer", "DebertaV2TokenizerFast" ], "processor_classes": [], "model_classes": [ "DebertaV2Model", "TFDebertaV2Model" ], "sha": "211df4bd1a4a9b66c97af3f9231a5d2af8de7b9f" }, "DeformableDetrForObjectDetection": { "tokenizer_classes": [], "processor_classes": [ "DeformableDetrImageProcessor" ], "model_classes": [ "DeformableDetrForObjectDetection" ], "sha": "8fa0db215c458f60ae4d455d6fb067c1c5e39fdc" }, "DeformableDetrModel": { "tokenizer_classes": [], "processor_classes": [ "DeformableDetrImageProcessor" ], "model_classes": [ "DeformableDetrModel" ], "sha": "0faac5624696b03edd14694642f9804f2cd8f3da" }, "DeiTForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "DeiTImageProcessor" ], "model_classes": [ "DeiTForImageClassification", "TFDeiTForImageClassification" ], "sha": "21fc864199dafa0130f16a45769c6b6ca22c7784" }, "DeiTForImageClassificationWithTeacher": { "tokenizer_classes": [], "processor_classes": [ "DeiTImageProcessor" ], "model_classes": [ "DeiTForImageClassificationWithTeacher", "TFDeiTForImageClassificationWithTeacher" ], "sha": "5a5738a109e27f3d4b78a0db4cb1d3331140c10e" }, "DeiTForMaskedImageModeling": { "tokenizer_classes": [], "processor_classes": [ "DeiTImageProcessor" ], "model_classes": [ "DeiTForMaskedImageModeling", "TFDeiTForMaskedImageModeling" ], "sha": "d5df5c538fe1efb8d668a3893d1691d505a0de06" }, "DeiTModel": { "tokenizer_classes": [], "processor_classes": [ "DeiTImageProcessor" ], "model_classes": [ "DeiTModel", "TFDeiTModel" ], "sha": "0fdbff6f44b7c6933c2027fec1d7f87bec06b590" }, "DetaForObjectDetection": { "tokenizer_classes": [], "processor_classes": [ "DetaImageProcessor" ], "model_classes": [ "DetaForObjectDetection" ], "sha": "a15ad6ce64fbcb5021b2b99e9587c4011ef3341d" }, "DetaModel": { "tokenizer_classes": [], "processor_classes": [ "DetaImageProcessor" ], "model_classes": [ "DetaModel" ], "sha": "8820f2297ec0dec8f1875054559c8b7a162098e3" }, "DetrForObjectDetection": { "tokenizer_classes": [], "processor_classes": [ "DetrImageProcessor" ], "model_classes": [ "DetrForObjectDetection" ], "sha": "7dc967c53f4b3f07904c42b255346b744d0ad84e" }, "DetrForSegmentation": { "tokenizer_classes": [], "processor_classes": [ "DetrImageProcessor" ], "model_classes": [ "DetrForSegmentation" ], "sha": "e34330acdae359588ef853e961a78d419dc4e8eb" }, "DetrModel": { "tokenizer_classes": [], "processor_classes": [ "DetrImageProcessor" ], "model_classes": [ "DetrModel" ], "sha": "f15ce38a10c7447e8048b1681e4811322a005722" }, "DinatBackbone": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "DinatBackbone" ], "sha": "3ba13790a0796d90104c207f75bb3d5d79723d51" }, "DinatForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "DinatForImageClassification" ], "sha": "624cf2d864a7ea2f90e24014a213e34597e8bd76" }, "DinatModel": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "DinatModel" ], "sha": "d6c75bc51196f0a683afb12de6310fdda13efefd" }, "Dinov2Backbone": { "tokenizer_classes": [], "processor_classes": [ "BitImageProcessor" ], "model_classes": [ "Dinov2Backbone" ], "sha": "dbf8d2ff3092ac53c11e6525e6cbae7ace84769a" }, "Dinov2ForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "BitImageProcessor" ], "model_classes": [ "Dinov2ForImageClassification" ], "sha": "ae44840966456aae33641df2c8c8a4af5b457b24" }, "Dinov2Model": { "tokenizer_classes": [], "processor_classes": [ "BitImageProcessor" ], "model_classes": [ "Dinov2Model" ], "sha": "6f560b1cc9806bcf84fe0b0c60b5faf9c29be959" }, "DistilBertForMaskedLM": { "tokenizer_classes": [ "DistilBertTokenizer", "DistilBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "DistilBertForMaskedLM", "TFDistilBertForMaskedLM" ], "sha": "b2dfda30b012821996e6e603729562d9c900bc0f" }, "DistilBertForMultipleChoice": { "tokenizer_classes": [ "DistilBertTokenizer", "DistilBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "DistilBertForMultipleChoice", "TFDistilBertForMultipleChoice" ], "sha": "ec6b83129a7d1be2a6b8d58303abcca5541a5cb3" }, "DistilBertForQuestionAnswering": { "tokenizer_classes": [ "DistilBertTokenizer", "DistilBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "DistilBertForQuestionAnswering", "TFDistilBertForQuestionAnswering" ], "sha": "812406b226415044469b0e0a84c4fe0ff338c5d3" }, "DistilBertForSequenceClassification": { "tokenizer_classes": [ "DistilBertTokenizer", "DistilBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "DistilBertForSequenceClassification", "TFDistilBertForSequenceClassification" ], "sha": "6f427ce7b3e5aaa596938fbd98437d3875581b7b" }, "DistilBertForTokenClassification": { "tokenizer_classes": [ "DistilBertTokenizer", "DistilBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "DistilBertForTokenClassification", "TFDistilBertForTokenClassification" ], "sha": "166dbe3f5d6ecd871762567069454d6ec65234b4" }, "DistilBertModel": { "tokenizer_classes": [ "DistilBertTokenizer", "DistilBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "DistilBertModel", "TFDistilBertModel" ], "sha": "cc4425ad0676f3ec00e8bffe485fe83cae61041a" }, "DonutSwinModel": { "tokenizer_classes": [], "processor_classes": [ "DonutImageProcessor" ], "model_classes": [ "DonutSwinModel" ], "sha": "1b10654fbfe2f2ea410a672ab605bd5c60d3f284" }, "EfficientFormerForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "EfficientFormerImageProcessor" ], "model_classes": [ "EfficientFormerForImageClassification", "TFEfficientFormerForImageClassification" ], "sha": "ebadb628e12f268e321fcc756fa4606f7b5b3178" }, "EfficientFormerForImageClassificationWithTeacher": { "tokenizer_classes": [], "processor_classes": [ "EfficientFormerImageProcessor" ], "model_classes": [ "EfficientFormerForImageClassificationWithTeacher", "TFEfficientFormerForImageClassificationWithTeacher" ], "sha": "1beabce6da9cb4ebbeafcd1ef23fac36b4a269e2" }, "EfficientFormerModel": { "tokenizer_classes": [], "processor_classes": [ "EfficientFormerImageProcessor" ], "model_classes": [ "EfficientFormerModel", "TFEfficientFormerModel" ], "sha": "200fae5b875844d09c8a91d1c155b72b06a517f6" }, "EfficientNetForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "EfficientNetImageProcessor" ], "model_classes": [ "EfficientNetForImageClassification" ], "sha": "6ed195ee636d2c0b885139da8c7b45d57ebaeee0" }, "EfficientNetModel": { "tokenizer_classes": [], "processor_classes": [ "EfficientNetImageProcessor" ], "model_classes": [ "EfficientNetModel" ], "sha": "eb03c90d4aaad98af0f19e0dfbdc41106297ffff" }, "ElectraForCausalLM": { "tokenizer_classes": [ "ElectraTokenizer", "ElectraTokenizerFast" ], "processor_classes": [], "model_classes": [ "ElectraForCausalLM" ], "sha": "c78396bc8cdd8db247892339de8da80d691d1d04" }, "ElectraForMaskedLM": { "tokenizer_classes": [ "ElectraTokenizer", "ElectraTokenizerFast" ], "processor_classes": [], "model_classes": [ "ElectraForMaskedLM", "TFElectraForMaskedLM" ], "sha": "631337703dbd8d41904c39891a41c6f1edd31813" }, "ElectraForMultipleChoice": { "tokenizer_classes": [ "ElectraTokenizer", "ElectraTokenizerFast" ], "processor_classes": [], "model_classes": [ "ElectraForMultipleChoice", "TFElectraForMultipleChoice" ], "sha": "66fdea6e22cfcbd3caa49ea82f31871c460612fa" }, "ElectraForPreTraining": { "tokenizer_classes": [ "ElectraTokenizer", "ElectraTokenizerFast" ], "processor_classes": [], "model_classes": [ "ElectraForPreTraining", "TFElectraForPreTraining" ], "sha": "7b2d0fa8726b1180c7d6cde4f4afc3800eba7e6f" }, "ElectraForQuestionAnswering": { "tokenizer_classes": [ "ElectraTokenizer", "ElectraTokenizerFast" ], "processor_classes": [], "model_classes": [ "ElectraForQuestionAnswering", "TFElectraForQuestionAnswering" ], "sha": "c6b127fd9f3019462e4ca2373762836207e39ce2" }, "ElectraForSequenceClassification": { "tokenizer_classes": [ "ElectraTokenizer", "ElectraTokenizerFast" ], "processor_classes": [], "model_classes": [ "ElectraForSequenceClassification", "TFElectraForSequenceClassification" ], "sha": "41f0089ab7876abe0e28dbbd565144acb31f8127" }, "ElectraForTokenClassification": { "tokenizer_classes": [ "ElectraTokenizer", "ElectraTokenizerFast" ], "processor_classes": [], "model_classes": [ "ElectraForTokenClassification", "TFElectraForTokenClassification" ], "sha": "1fdbbe70c1ddd16503820a1443d6a379a15ed777" }, "ElectraModel": { "tokenizer_classes": [ "ElectraTokenizer", "ElectraTokenizerFast" ], "processor_classes": [], "model_classes": [ "ElectraModel", "TFElectraModel" ], "sha": "312b532cbef26610d80f2bd008650160cae4f7a1" }, "EncodecModel": { "tokenizer_classes": [], "processor_classes": [ "EncodecFeatureExtractor" ], "model_classes": [ "EncodecModel" ], "sha": "e14c5a2fd6529c85cd4ac5a05ee9e550ced6a006" }, "EncoderDecoderModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "EncoderDecoderModel", "TFEncoderDecoderModel" ], "sha": "1038be9fd1b87b2e0a8f33721ff8e4612d34b3b6" }, "ErnieForCausalLM": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ErnieForCausalLM" ], "sha": "b49e00112ff06c2f0a0e54499921dddcf8c3c6a8" }, "ErnieForMaskedLM": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ErnieForMaskedLM" ], "sha": "30429830d1997222d885dcfdbd36d5e02d0d34b1" }, "ErnieForMultipleChoice": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ErnieForMultipleChoice" ], "sha": "5a21144bf35dfb60560ff8249116ad4459c0069a" }, "ErnieForNextSentencePrediction": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ErnieForNextSentencePrediction" ], "sha": "ed5868efb39bf6afb29f0cf444deafcf1e50b5bc" }, "ErnieForPreTraining": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ErnieForPreTraining" ], "sha": "e4ad30d291c310fea25e6f91f91393f993513b42" }, "ErnieForQuestionAnswering": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ErnieForQuestionAnswering" ], "sha": "fe7c74b763f63a9fd864dad325385075df7c80c8" }, "ErnieForSequenceClassification": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ErnieForSequenceClassification" ], "sha": "84e0be05fcd52f54e96a69f67a2481323a58a9db" }, "ErnieForTokenClassification": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ErnieForTokenClassification" ], "sha": "91cf62c43a5a83332552ffa2d8e5e44d63a224ea" }, "ErnieMForMultipleChoice": { "tokenizer_classes": [ "ErnieMTokenizer" ], "processor_classes": [], "model_classes": [ "ErnieMForMultipleChoice" ], "sha": "c42ee7fcb132a323ace314c32e63c8a7d36ce18f" }, "ErnieMForQuestionAnswering": { "tokenizer_classes": [ "ErnieMTokenizer" ], "processor_classes": [], "model_classes": [ "ErnieMForQuestionAnswering" ], "sha": "2b90dee75ca87b214f96db00002aa18244ec8e84" }, "ErnieMForSequenceClassification": { "tokenizer_classes": [ "ErnieMTokenizer" ], "processor_classes": [], "model_classes": [ "ErnieMForSequenceClassification" ], "sha": "d8368646d8b1c67b1460af9c6ec13fd9d894cae6" }, "ErnieMForTokenClassification": { "tokenizer_classes": [ "ErnieMTokenizer" ], "processor_classes": [], "model_classes": [ "ErnieMForTokenClassification" ], "sha": "a9e29ba60fa0b7bedc2ed26a6b9911427df1ca6b" }, "ErnieMModel": { "tokenizer_classes": [ "ErnieMTokenizer" ], "processor_classes": [], "model_classes": [ "ErnieMModel" ], "sha": "7306eac3f38c3cf6211f0e741fdb81c6cc92bc09" }, "ErnieModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "ErnieModel" ], "sha": "b51478a9f40e353c41be3a29ccef103dcfe22b4b" }, "EsmForMaskedLM": { "tokenizer_classes": [ "EsmTokenizer" ], "processor_classes": [], "model_classes": [ "EsmForMaskedLM", "TFEsmForMaskedLM" ], "sha": "b56297b6cd64b9ba7c613d0cd146f1ecbea8115e" }, "EsmForSequenceClassification": { "tokenizer_classes": [ "EsmTokenizer" ], "processor_classes": [], "model_classes": [ "EsmForSequenceClassification", "TFEsmForSequenceClassification" ], "sha": "cc6d7ef0a4763540d67b7a4fb31bede9a7d3f245" }, "EsmForTokenClassification": { "tokenizer_classes": [ "EsmTokenizer" ], "processor_classes": [], "model_classes": [ "EsmForTokenClassification", "TFEsmForTokenClassification" ], "sha": "498953f66e260b974c504abbc863ee266d6c84a9" }, "EsmModel": { "tokenizer_classes": [ "EsmTokenizer" ], "processor_classes": [], "model_classes": [ "EsmModel", "TFEsmModel" ], "sha": "183838263b70809310117a0761542501acf64c21" }, "FNetForMaskedLM": { "tokenizer_classes": [ "FNetTokenizer", "FNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "FNetForMaskedLM" ], "sha": "91eaae1eac894af5d96c0221ec9bcef7f1af41c8" }, "FNetForMultipleChoice": { "tokenizer_classes": [ "FNetTokenizer", "FNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "FNetForMultipleChoice" ], "sha": "c15d98d5f7a6f3ef3099b1257949bee208d5466e" }, "FNetForNextSentencePrediction": { "tokenizer_classes": [ "FNetTokenizer", "FNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "FNetForNextSentencePrediction" ], "sha": "c59440b44d07d61fc45a90ded7fc11d6f25b143d" }, "FNetForPreTraining": { "tokenizer_classes": [ "FNetTokenizer", "FNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "FNetForPreTraining" ], "sha": "c05f55ccfb2f2533babd3c6e99de7749bc8081da" }, "FNetForQuestionAnswering": { "tokenizer_classes": [ "FNetTokenizer", "FNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "FNetForQuestionAnswering" ], "sha": "47788e49dd435653fa2aa4b3ccae3572a870758e" }, "FNetForSequenceClassification": { "tokenizer_classes": [ "FNetTokenizer", "FNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "FNetForSequenceClassification" ], "sha": "a3049b896ea6c5a32c364989c3afe604ee58b9fc" }, "FNetForTokenClassification": { "tokenizer_classes": [ "FNetTokenizer", "FNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "FNetForTokenClassification" ], "sha": "3bcdafca57d544bb81e2f7eead1e512c168582fc" }, "FNetModel": { "tokenizer_classes": [ "FNetTokenizer", "FNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "FNetModel" ], "sha": "48fa66de37df126504db3b658806135eb877f505" }, "FSMTForConditionalGeneration": { "tokenizer_classes": [ "FSMTTokenizer" ], "processor_classes": [], "model_classes": [ "FSMTForConditionalGeneration" ], "sha": "6a1a981b29c8a98c1fd31bd0ad809f5575ca6c7a" }, "FSMTModel": { "tokenizer_classes": [ "FSMTTokenizer" ], "processor_classes": [], "model_classes": [ "FSMTModel" ], "sha": "683f6f73a2ab87801f1695a72d1af63cf173ab7c" }, "FalconForCausalLM": { "tokenizer_classes": [ "PreTrainedTokenizerFast" ], "processor_classes": [], "model_classes": [ "FalconForCausalLM" ], "sha": "60076d5dafc5e33ba9c90dcd05e7c0834e44049a" }, "FalconForQuestionAnswering": { "tokenizer_classes": [ "PreTrainedTokenizerFast" ], "processor_classes": [], "model_classes": [ "FalconForQuestionAnswering" ], "sha": "b1ee9cd5fad2d177ea5a46df4611cd02f66ae788" }, "FalconForSequenceClassification": { "tokenizer_classes": [ "PreTrainedTokenizerFast" ], "processor_classes": [], "model_classes": [ "FalconForSequenceClassification" ], "sha": "007838c0991c2b6a87dc49a8a5c20f29149a00fa" }, "FalconForTokenClassification": { "tokenizer_classes": [ "PreTrainedTokenizerFast" ], "processor_classes": [], "model_classes": [ "FalconForTokenClassification" ], "sha": "0ea6ae548773daa6e3317fddc058957e956eebf4" }, "FalconModel": { "tokenizer_classes": [ "PreTrainedTokenizerFast" ], "processor_classes": [], "model_classes": [ "FalconModel" ], "sha": "ca15a579c946eb00c5b39cc8e0ea63d0c1460f84" }, "FlaubertForMultipleChoice": { "tokenizer_classes": [ "FlaubertTokenizer" ], "processor_classes": [], "model_classes": [ "FlaubertForMultipleChoice", "TFFlaubertForMultipleChoice" ], "sha": "8b12bd87a63f2e86c3482431742f6d8abf6ec4fd" }, "FlaubertForQuestionAnsweringSimple": { "tokenizer_classes": [ "FlaubertTokenizer" ], "processor_classes": [], "model_classes": [ "FlaubertForQuestionAnsweringSimple", "TFFlaubertForQuestionAnsweringSimple" ], "sha": "5c0e7ad1efae7e3497f5cd6d2d9519403df49d37" }, "FlaubertForSequenceClassification": { "tokenizer_classes": [ "FlaubertTokenizer" ], "processor_classes": [], "model_classes": [ "FlaubertForSequenceClassification", "TFFlaubertForSequenceClassification" ], "sha": "762f12a8c99690be8ed2663b7af3011660174a7c" }, "FlaubertForTokenClassification": { "tokenizer_classes": [ "FlaubertTokenizer" ], "processor_classes": [], "model_classes": [ "FlaubertForTokenClassification", "TFFlaubertForTokenClassification" ], "sha": "d2ab741c937bb69ef27c89e4c86a8c9d444874ca" }, "FlaubertModel": { "tokenizer_classes": [ "FlaubertTokenizer" ], "processor_classes": [], "model_classes": [ "FlaubertModel", "TFFlaubertModel" ], "sha": "bdc2f8e17bb869393053429ec8c1c842bfeabb07" }, "FlaubertWithLMHeadModel": { "tokenizer_classes": [ "FlaubertTokenizer" ], "processor_classes": [], "model_classes": [ "FlaubertWithLMHeadModel", "TFFlaubertWithLMHeadModel" ], "sha": "f20eb0932c90061003c9cc4e109c6ea22559c4f2" }, "FlavaForPreTraining": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "FlavaImageProcessor" ], "model_classes": [ "FlavaForPreTraining" ], "sha": "6e9b2094060a5fa27984c7b49e5d0e820a88b487" }, "FlavaModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "FlavaImageProcessor" ], "model_classes": [ "FlavaModel" ], "sha": "31ebf1b7a0ef1fd5059b98e28e5ab1c366d2c482" }, "FocalNetBackbone": { "tokenizer_classes": [], "processor_classes": [ "BitImageProcessor" ], "model_classes": [ "FocalNetBackbone" ], "sha": "eb8c580969443cb87de7dd9a256deaface03692f" }, "FocalNetForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "BitImageProcessor" ], "model_classes": [ "FocalNetForImageClassification" ], "sha": "28d30ded26a3213e8fb7011a455afc3aa98b0a95" }, "FocalNetForMaskedImageModeling": { "tokenizer_classes": [], "processor_classes": [ "BitImageProcessor" ], "model_classes": [ "FocalNetForMaskedImageModeling" ], "sha": "0ea7626d19c9dd2f3113d977f643a1babc720bd3" }, "FocalNetModel": { "tokenizer_classes": [], "processor_classes": [ "BitImageProcessor" ], "model_classes": [ "FocalNetModel" ], "sha": "107b004e6aa14108a359b7d22bdb9aa141ec05d5" }, "FunnelBaseModel": { "tokenizer_classes": [ "FunnelTokenizer", "FunnelTokenizerFast" ], "processor_classes": [], "model_classes": [ "FunnelBaseModel", "TFFunnelBaseModel" ], "sha": "87fed4252812df23315a56531625333e315681c6" }, "FunnelForMaskedLM": { "tokenizer_classes": [ "FunnelTokenizer", "FunnelTokenizerFast" ], "processor_classes": [], "model_classes": [ "FunnelForMaskedLM", "TFFunnelForMaskedLM" ], "sha": "5543daf29f185cd45f2599bd6f38c96064c9c8de" }, "FunnelForMultipleChoice": { "tokenizer_classes": [ "FunnelTokenizer", "FunnelTokenizerFast" ], "processor_classes": [], "model_classes": [ "FunnelForMultipleChoice", "TFFunnelForMultipleChoice" ], "sha": "a8bf597e37dbefb1ac5c97c4cb162c3d522a33a1" }, "FunnelForPreTraining": { "tokenizer_classes": [ "FunnelTokenizer", "FunnelTokenizerFast" ], "processor_classes": [], "model_classes": [ "FunnelForPreTraining", "TFFunnelForPreTraining" ], "sha": "cbcb300d60aacd5950a45409b6e3f0f240c9082e" }, "FunnelForQuestionAnswering": { "tokenizer_classes": [ "FunnelTokenizer", "FunnelTokenizerFast" ], "processor_classes": [], "model_classes": [ "FunnelForQuestionAnswering", "TFFunnelForQuestionAnswering" ], "sha": "6a5675305e096434e818486a13892cb55daffd13" }, "FunnelForSequenceClassification": { "tokenizer_classes": [ "FunnelTokenizer", "FunnelTokenizerFast" ], "processor_classes": [], "model_classes": [ "FunnelForSequenceClassification", "TFFunnelForSequenceClassification" ], "sha": "1bc557a1e4314da21a44dee57b799e95a7025e5c" }, "FunnelForTokenClassification": { "tokenizer_classes": [ "FunnelTokenizer", "FunnelTokenizerFast" ], "processor_classes": [], "model_classes": [ "FunnelForTokenClassification", "TFFunnelForTokenClassification" ], "sha": "693bc1217a224efd558f410ddc8ffc63739bebc3" }, "FunnelModel": { "tokenizer_classes": [ "FunnelTokenizer", "FunnelTokenizerFast" ], "processor_classes": [], "model_classes": [ "FunnelModel", "TFFunnelModel" ], "sha": "bfbaa8fa21c3abf80b94e7168b5ecff8ec5b5f76" }, "FuyuForCausalLM": { "tokenizer_classes": [ "LlamaTokenizerFast" ], "processor_classes": [ "FuyuImageProcessor" ], "model_classes": [ "FuyuForCausalLM" ], "sha": "685d78258ea95c5c82e0e4555d0d4a2270ab8bff" }, "GLPNForDepthEstimation": { "tokenizer_classes": [], "processor_classes": [ "GLPNImageProcessor" ], "model_classes": [ "GLPNForDepthEstimation" ], "sha": "32ca1c1ef5d33242e5e7c0433bcd773c082f0260" }, "GLPNModel": { "tokenizer_classes": [], "processor_classes": [ "GLPNImageProcessor" ], "model_classes": [ "GLPNModel" ], "sha": "24a8dbb48b1aa0ba2eba44324fcd0c78cca64dd4" }, "GPT2ForQuestionAnswering": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPT2ForQuestionAnswering" ], "sha": "a5bdd6bd4d79feece85ea9a8bd4ee5fe54c1d45b" }, "GPT2ForSequenceClassification": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPT2ForSequenceClassification", "TFGPT2ForSequenceClassification" ], "sha": "90a2d78e5c7f288152f8456c3d58a43b40a58449" }, "GPT2ForTokenClassification": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPT2ForTokenClassification" ], "sha": "da78bc95b45fab2da9d43f2ca27164996e31ade1" }, "GPT2LMHeadModel": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPT2LMHeadModel", "TFGPT2LMHeadModel" ], "sha": "78f56535d4ce19e9d7c0992e390085c5a4196b37" }, "GPT2Model": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPT2Model", "TFGPT2Model" ], "sha": "d6694b0d8fe17978761c9305dc151780506b192e" }, "GPTBigCodeForCausalLM": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTBigCodeForCausalLM" ], "sha": "99f7aaadf9c29669c63ef6c16f6bc5c07dbb9126" }, "GPTBigCodeForSequenceClassification": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTBigCodeForSequenceClassification" ], "sha": "64a7398d5763161037b818314c60dd83d93d03e9" }, "GPTBigCodeForTokenClassification": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTBigCodeForTokenClassification" ], "sha": "310537ecd22d45f71bf594b17922cf2abc338eaf" }, "GPTBigCodeModel": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTBigCodeModel" ], "sha": "3069419084a9dc36802d47de9df3d314ccfc2f28" }, "GPTJForCausalLM": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTJForCausalLM", "TFGPTJForCausalLM" ], "sha": "1fff390baa45cb187903ebdd269c975bb9ed7386" }, "GPTJForQuestionAnswering": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTJForQuestionAnswering", "TFGPTJForQuestionAnswering" ], "sha": "3d4ec61dbed01f844d4c309971eeb5ad722c6c84" }, "GPTJForSequenceClassification": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTJForSequenceClassification", "TFGPTJForSequenceClassification" ], "sha": "4b5db259cd16ca84ae2cd79aa4851cdd14479128" }, "GPTJModel": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTJModel", "TFGPTJModel" ], "sha": "d8e1db30d08fbf57da6fc139aea3ffd63ab6226e" }, "GPTNeoForCausalLM": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTNeoForCausalLM" ], "sha": "e88934e402c15195dd99b2947632415dd7645268" }, "GPTNeoForQuestionAnswering": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTNeoForQuestionAnswering" ], "sha": "623883e94bd08caf9b3f839b98debeea72d5bc2b" }, "GPTNeoForSequenceClassification": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTNeoForSequenceClassification" ], "sha": "bf2090d5d91a70eb37ba51fbdcf23afc7031fea8" }, "GPTNeoForTokenClassification": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTNeoForTokenClassification" ], "sha": "d5208e73e24a1671219776b50fe5f96e0e4cd218" }, "GPTNeoModel": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTNeoModel" ], "sha": "72a7cd49da613c3125a90884df4763545c594e56" }, "GPTNeoXForCausalLM": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTNeoXForCausalLM" ], "sha": "0229cfaaa843c6b492ac2abffabb00f1ff1936f8" }, "GPTNeoXForQuestionAnswering": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTNeoXForQuestionAnswering" ], "sha": "7d2f08c959c211129952ee03b5562add09fe6864" }, "GPTNeoXForSequenceClassification": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTNeoXForSequenceClassification" ], "sha": "17c4b845ee2e0bb780ca2dea2d59a3d9d5d3c651" }, "GPTNeoXForTokenClassification": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTNeoXForTokenClassification" ], "sha": "3aa4fe8a562f32230041d6d3616aa5ecc3f30192" }, "GPTNeoXJapaneseForCausalLM": { "tokenizer_classes": [ "GPTNeoXJapaneseTokenizer" ], "processor_classes": [], "model_classes": [ "GPTNeoXJapaneseForCausalLM" ], "sha": "5fca2479f1064fd22e17f944c8fcc14f7e73f1d5" }, "GPTNeoXJapaneseModel": { "tokenizer_classes": [ "GPTNeoXJapaneseTokenizer" ], "processor_classes": [], "model_classes": [ "GPTNeoXJapaneseModel" ], "sha": "5c6ed124150df845cfc701d70b97fdcde687be52" }, "GPTNeoXModel": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "GPTNeoXModel" ], "sha": "33114ba2f72189d5a2bd63f0cdb78551189242ff" }, "GPTSanJapaneseForConditionalGeneration": { "tokenizer_classes": [ "GPTSanJapaneseTokenizer" ], "processor_classes": [], "model_classes": [ "GPTSanJapaneseForConditionalGeneration" ], "sha": "ff6a41faaa713c7fbd5d9a1a50539745f9e1178e" }, "GitForCausalLM": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "CLIPImageProcessor" ], "model_classes": [ "GitForCausalLM" ], "sha": "60f9c50466ae0beeb11776ca5bfeb6473f441554" }, "GitModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "CLIPImageProcessor" ], "model_classes": [ "GitModel" ], "sha": "3d2eb6bddf95bb4a4e59b045d4e464c730c07f41" }, "GroupViTModel": { "tokenizer_classes": [ "CLIPTokenizer", "CLIPTokenizerFast" ], "processor_classes": [ "CLIPImageProcessor" ], "model_classes": [ "GroupViTModel", "TFGroupViTModel" ], "sha": "05a3a02dd46cb9eb078608dec98f633c0cf559ef" }, "HubertForCTC": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "HubertForCTC" ], "sha": "13431b76106f993eedcff48a75bae590a09b14f7" }, "HubertForSequenceClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "HubertForSequenceClassification" ], "sha": "d23f46607a900b1a55dfee4b7ed205a6823035b1" }, "HubertModel": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "HubertModel", "TFHubertModel" ], "sha": "3224562c86c4669db65ae7defdc5fb555b113e95" }, "IBertForMaskedLM": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "IBertForMaskedLM" ], "sha": "e333a9c9d375f4d839b7e9e21d1a1c8dad58d7d1" }, "IBertForMultipleChoice": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "IBertForMultipleChoice" ], "sha": "a81f7d64cd7ce5fe6cd726b23d9d14ac5d17bf53" }, "IBertForQuestionAnswering": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "IBertForQuestionAnswering" ], "sha": "7b66d13d4d6801a82cbeb7f9fd853ca1630d1f8b" }, "IBertForSequenceClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "IBertForSequenceClassification" ], "sha": "309d57145c40f889222fe5df62f14dddf4496b38" }, "IBertForTokenClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "IBertForTokenClassification" ], "sha": "b032e9bff4b081b78c098b2d8bc610ac035c6ddf" }, "IBertModel": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "IBertModel" ], "sha": "6749164c678d4883d455f98b1dfc98c62da8f08b" }, "IdeficsForVisionText2Text": { "tokenizer_classes": [ "LlamaTokenizerFast" ], "processor_classes": [ "IdeficsImageProcessor" ], "model_classes": [ "IdeficsForVisionText2Text" ], "sha": "2c2f2e2cd6b02a77d0cdd8c3767ba9a6267dbd20" }, "IdeficsModel": { "tokenizer_classes": [ "LlamaTokenizerFast" ], "processor_classes": [ "IdeficsImageProcessor" ], "model_classes": [ "IdeficsModel" ], "sha": "649df2e35e067efd573ff2d083784a5cf876545e" }, "ImageGPTForCausalImageModeling": { "tokenizer_classes": [], "processor_classes": [ "ImageGPTImageProcessor" ], "model_classes": [ "ImageGPTForCausalImageModeling" ], "sha": "9a7d1fc04439ab1d9d690de9c3e7673f08568cdf" }, "ImageGPTForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ImageGPTImageProcessor" ], "model_classes": [ "ImageGPTForImageClassification" ], "sha": "d92c7aed4ba5de74a1f542b736010090e4a58b42" }, "ImageGPTModel": { "tokenizer_classes": [], "processor_classes": [ "ImageGPTImageProcessor" ], "model_classes": [ "ImageGPTModel" ], "sha": "5a7983e48d5841704733dd0756177680ed50c074" }, "Kosmos2ForConditionalGeneration": { "tokenizer_classes": [ "XLMRobertaTokenizerFast" ], "processor_classes": [ "CLIPImageProcessor" ], "model_classes": [ "Kosmos2ForConditionalGeneration" ], "sha": "d1d4607782b911411676f1ee79997dee645def58" }, "Kosmos2Model": { "tokenizer_classes": [ "XLMRobertaTokenizerFast" ], "processor_classes": [ "CLIPImageProcessor" ], "model_classes": [ "Kosmos2Model" ], "sha": "379d8944a65312094d9ab1c4b8a82058a2d3274e" }, "LEDForConditionalGeneration": { "tokenizer_classes": [ "LEDTokenizer", "LEDTokenizerFast" ], "processor_classes": [], "model_classes": [ "LEDForConditionalGeneration", "TFLEDForConditionalGeneration" ], "sha": "a354b49a79351f3ea8ae7776d9f8352ae26cfc14" }, "LEDForQuestionAnswering": { "tokenizer_classes": [ "LEDTokenizer", "LEDTokenizerFast" ], "processor_classes": [], "model_classes": [ "LEDForQuestionAnswering" ], "sha": "47c7a75a1e650dae60ff6e9bbab0f2386946670c" }, "LEDForSequenceClassification": { "tokenizer_classes": [ "LEDTokenizer", "LEDTokenizerFast" ], "processor_classes": [], "model_classes": [ "LEDForSequenceClassification" ], "sha": "3571e2c9d9f2f2ec0b8fe47090330b128be05126" }, "LEDModel": { "tokenizer_classes": [ "LEDTokenizer", "LEDTokenizerFast" ], "processor_classes": [], "model_classes": [ "LEDModel", "TFLEDModel" ], "sha": "3c3f6eb142545afc570187bfdabfe65d43dafbe4" }, "LayoutLMForMaskedLM": { "tokenizer_classes": [ "LayoutLMTokenizer", "LayoutLMTokenizerFast" ], "processor_classes": [], "model_classes": [ "LayoutLMForMaskedLM", "TFLayoutLMForMaskedLM" ], "sha": "0368bd9bd8fd3eb43b8a3b38962b5345b8765514" }, "LayoutLMForQuestionAnswering": { "tokenizer_classes": [ "LayoutLMTokenizer", "LayoutLMTokenizerFast" ], "processor_classes": [], "model_classes": [ "LayoutLMForQuestionAnswering", "TFLayoutLMForQuestionAnswering" ], "sha": "0d6a4bc614fccfa313c1fb6d132a250929518f85" }, "LayoutLMForSequenceClassification": { "tokenizer_classes": [ "LayoutLMTokenizer", "LayoutLMTokenizerFast" ], "processor_classes": [], "model_classes": [ "LayoutLMForSequenceClassification", "TFLayoutLMForSequenceClassification" ], "sha": "1bd68c73dbf6c8c0526d24fbe2831be82998c440" }, "LayoutLMForTokenClassification": { "tokenizer_classes": [ "LayoutLMTokenizer", "LayoutLMTokenizerFast" ], "processor_classes": [], "model_classes": [ "LayoutLMForTokenClassification", "TFLayoutLMForTokenClassification" ], "sha": "155e7da3f1d786aa39d957b16080c52de4a7efd7" }, "LayoutLMModel": { "tokenizer_classes": [ "LayoutLMTokenizer", "LayoutLMTokenizerFast" ], "processor_classes": [], "model_classes": [ "LayoutLMModel", "TFLayoutLMModel" ], "sha": "14f77b30d267910f11f0fd532a91a6b85ab3a4de" }, "LayoutLMv2ForQuestionAnswering": { "tokenizer_classes": [ "LayoutLMv2Tokenizer", "LayoutLMv2TokenizerFast" ], "processor_classes": [ "LayoutLMv2ImageProcessor" ], "model_classes": [ "LayoutLMv2ForQuestionAnswering" ], "sha": "f452e28dd34d3c38cce046b1cc7b0ada69f587b1" }, "LayoutLMv2ForSequenceClassification": { "tokenizer_classes": [ "LayoutLMv2Tokenizer", "LayoutLMv2TokenizerFast" ], "processor_classes": [ "LayoutLMv2ImageProcessor" ], "model_classes": [ "LayoutLMv2ForSequenceClassification" ], "sha": "b483e08fd143113629ecda3dbfd57e69bfeb5f11" }, "LayoutLMv2ForTokenClassification": { "tokenizer_classes": [ "LayoutLMv2Tokenizer", "LayoutLMv2TokenizerFast" ], "processor_classes": [ "LayoutLMv2ImageProcessor" ], "model_classes": [ "LayoutLMv2ForTokenClassification" ], "sha": "0721ae69bff00ecfff1b3d1521a475cde0253299" }, "LayoutLMv2Model": { "tokenizer_classes": [ "LayoutLMv2Tokenizer", "LayoutLMv2TokenizerFast" ], "processor_classes": [ "LayoutLMv2ImageProcessor" ], "model_classes": [ "LayoutLMv2Model" ], "sha": "6a1b510769b344979a910a7d0bade613a9ec2dfc" }, "LayoutLMv3ForQuestionAnswering": { "tokenizer_classes": [ "LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" ], "processor_classes": [ "LayoutLMv3ImageProcessor" ], "model_classes": [ "LayoutLMv3ForQuestionAnswering", "TFLayoutLMv3ForQuestionAnswering" ], "sha": "4640242388e69cf77ea2dd3ac36ec6f1b26628c8" }, "LayoutLMv3ForSequenceClassification": { "tokenizer_classes": [ "LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" ], "processor_classes": [ "LayoutLMv3ImageProcessor" ], "model_classes": [ "LayoutLMv3ForSequenceClassification", "TFLayoutLMv3ForSequenceClassification" ], "sha": "96515f699874cfbfbec7a64c539ae92419e4c6dc" }, "LayoutLMv3ForTokenClassification": { "tokenizer_classes": [ "LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" ], "processor_classes": [ "LayoutLMv3ImageProcessor" ], "model_classes": [ "LayoutLMv3ForTokenClassification", "TFLayoutLMv3ForTokenClassification" ], "sha": "ed4ffc464f2028fe50dfc6823f4eda78d34be7e6" }, "LayoutLMv3Model": { "tokenizer_classes": [ "LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" ], "processor_classes": [ "LayoutLMv3ImageProcessor" ], "model_classes": [ "LayoutLMv3Model", "TFLayoutLMv3Model" ], "sha": "69725e5e2445e5c1c3aa8a2aa49cfd72e0a44565" }, "LevitForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "LevitImageProcessor" ], "model_classes": [ "LevitForImageClassification" ], "sha": "5ae8ccaa1fe1c947cb8ae6499e4a150c668bb9f0" }, "LevitForImageClassificationWithTeacher": { "tokenizer_classes": [], "processor_classes": [ "LevitImageProcessor" ], "model_classes": [ "LevitForImageClassificationWithTeacher" ], "sha": "568cc0d965b9bd293f240e7724314db6d50f6722" }, "LevitModel": { "tokenizer_classes": [], "processor_classes": [ "LevitImageProcessor" ], "model_classes": [ "LevitModel" ], "sha": "172efa52b50c75c3b3e498fa638f55e65b2ebf87" }, "LiltForQuestionAnswering": { "tokenizer_classes": [ "LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" ], "processor_classes": [], "model_classes": [ "LiltForQuestionAnswering" ], "sha": "0a348441999e98ec003b29fc4d5a67ad22ee6ca2" }, "LiltForSequenceClassification": { "tokenizer_classes": [ "LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" ], "processor_classes": [], "model_classes": [ "LiltForSequenceClassification" ], "sha": "c53ab0ba33536fe564a4a1e4f1674d990c01b83a" }, "LiltForTokenClassification": { "tokenizer_classes": [ "LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" ], "processor_classes": [], "model_classes": [ "LiltForTokenClassification" ], "sha": "14f85076f9b3f7016917e324d51ebd22511a2ae5" }, "LiltModel": { "tokenizer_classes": [ "LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" ], "processor_classes": [], "model_classes": [ "LiltModel" ], "sha": "3f1166cc14c532388df7e82336a8e575a813bd3f" }, "LongT5ForConditionalGeneration": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "LongT5ForConditionalGeneration" ], "sha": "c685cbbe706ad5c9a28689631765726a1874dcc7" }, "LongT5Model": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "LongT5Model" ], "sha": "6b468e55e2490565e6155690201086ac00c72062" }, "LongformerForMaskedLM": { "tokenizer_classes": [ "LongformerTokenizer", "LongformerTokenizerFast" ], "processor_classes": [], "model_classes": [ "LongformerForMaskedLM", "TFLongformerForMaskedLM" ], "sha": "929d3bda9a1485d9bae41f9dbfc1d149c1c4e78e" }, "LongformerForMultipleChoice": { "tokenizer_classes": [ "LongformerTokenizer", "LongformerTokenizerFast" ], "processor_classes": [], "model_classes": [ "LongformerForMultipleChoice", "TFLongformerForMultipleChoice" ], "sha": "60b1ecac6b9385ce18c7e6978ab161cce8e7f9d4" }, "LongformerForQuestionAnswering": { "tokenizer_classes": [ "LongformerTokenizer", "LongformerTokenizerFast" ], "processor_classes": [], "model_classes": [ "LongformerForQuestionAnswering", "TFLongformerForQuestionAnswering" ], "sha": "be45ab1321b703f2200cbbcae560aaf2e2afef88" }, "LongformerForSequenceClassification": { "tokenizer_classes": [ "LongformerTokenizer", "LongformerTokenizerFast" ], "processor_classes": [], "model_classes": [ "LongformerForSequenceClassification", "TFLongformerForSequenceClassification" ], "sha": "8bc0de0b0f740bf397eb2770ec3ce3a24f3d7af9" }, "LongformerForTokenClassification": { "tokenizer_classes": [ "LongformerTokenizer", "LongformerTokenizerFast" ], "processor_classes": [], "model_classes": [ "LongformerForTokenClassification", "TFLongformerForTokenClassification" ], "sha": "efa33a9b6f47f0f7979af08ae8d04a5a7363a14b" }, "LongformerModel": { "tokenizer_classes": [ "LongformerTokenizer", "LongformerTokenizerFast" ], "processor_classes": [], "model_classes": [ "LongformerModel", "TFLongformerModel" ], "sha": "b023d531688e8655fc09300ac36742588efb3240" }, "LukeForMaskedLM": { "tokenizer_classes": [ "LukeTokenizer" ], "processor_classes": [], "model_classes": [ "LukeForMaskedLM" ], "sha": "954cf6cd2bf1f298a3956b10c36656c57387506d" }, "LukeForMultipleChoice": { "tokenizer_classes": [ "LukeTokenizer" ], "processor_classes": [], "model_classes": [ "LukeForMultipleChoice" ], "sha": "d1310a9174ad50d60b30ad6049e165deb2539034" }, "LukeForQuestionAnswering": { "tokenizer_classes": [ "LukeTokenizer" ], "processor_classes": [], "model_classes": [ "LukeForQuestionAnswering" ], "sha": "3ea38da4e32cb4e45bea82b2e81a8639aeba2c35" }, "LukeForSequenceClassification": { "tokenizer_classes": [ "LukeTokenizer" ], "processor_classes": [], "model_classes": [ "LukeForSequenceClassification" ], "sha": "b5b11248aeb4f5976379d15a977aeb2677e0c0f9" }, "LukeForTokenClassification": { "tokenizer_classes": [ "LukeTokenizer" ], "processor_classes": [], "model_classes": [ "LukeForTokenClassification" ], "sha": "8aab1a33ad26a344a6f4dfd68630e9661e174471" }, "LukeModel": { "tokenizer_classes": [ "LukeTokenizer" ], "processor_classes": [], "model_classes": [ "LukeModel" ], "sha": "ae23a674e7297d41f33c9af86e039757dfd2d531" }, "LxmertForPreTraining": { "tokenizer_classes": [ "LxmertTokenizer", "LxmertTokenizerFast" ], "processor_classes": [], "model_classes": [ "LxmertForPreTraining", "TFLxmertForPreTraining" ], "sha": "7b0843403c187aef00f20d5087086468d9613d2c" }, "LxmertForQuestionAnswering": { "tokenizer_classes": [ "LxmertTokenizer", "LxmertTokenizerFast" ], "processor_classes": [], "model_classes": [ "LxmertForQuestionAnswering" ], "sha": "27a74bd2cd156e46656c43ceb432c4deda0df5c1" }, "LxmertModel": { "tokenizer_classes": [ "LxmertTokenizer", "LxmertTokenizerFast" ], "processor_classes": [], "model_classes": [ "LxmertModel", "TFLxmertModel" ], "sha": "97612a0d6b14406ea9bfd7672e6974e0961cbef1" }, "M2M100ForConditionalGeneration": { "tokenizer_classes": [ "M2M100Tokenizer" ], "processor_classes": [], "model_classes": [ "M2M100ForConditionalGeneration" ], "sha": "32ac347092d51f658b41ffc111b67d49acdeab46" }, "M2M100Model": { "tokenizer_classes": [ "M2M100Tokenizer" ], "processor_classes": [], "model_classes": [ "M2M100Model" ], "sha": "e95c2ae168c7ba19f8114def40e1b1edd953b2f5" }, "MBartForCausalLM": { "tokenizer_classes": [ "MBartTokenizer", "MBartTokenizerFast" ], "processor_classes": [], "model_classes": [ "MBartForCausalLM" ], "sha": "a45044f8056328d20a764356eca3d0746a7a195e" }, "MBartForConditionalGeneration": { "tokenizer_classes": [ "MBartTokenizer", "MBartTokenizerFast" ], "processor_classes": [], "model_classes": [ "MBartForConditionalGeneration", "TFMBartForConditionalGeneration" ], "sha": "171e918962d6c0ee56c6b070858e19e16c8dd09f" }, "MBartForQuestionAnswering": { "tokenizer_classes": [ "MBartTokenizer", "MBartTokenizerFast" ], "processor_classes": [], "model_classes": [ "MBartForQuestionAnswering" ], "sha": "1ee08565d24777335595e0d2940e454abdcff731" }, "MBartForSequenceClassification": { "tokenizer_classes": [ "MBartTokenizer", "MBartTokenizerFast" ], "processor_classes": [], "model_classes": [ "MBartForSequenceClassification" ], "sha": "53e9c88ecfa2475d27afe099ffa7a8bcdb7ef7e4" }, "MBartModel": { "tokenizer_classes": [ "MBartTokenizer", "MBartTokenizerFast" ], "processor_classes": [], "model_classes": [ "MBartModel", "TFMBartModel" ], "sha": "2d492b34d69dd63b411990d5c8bb692fd637e91c" }, "MCTCTForCTC": { "tokenizer_classes": [], "processor_classes": [ "MCTCTFeatureExtractor" ], "model_classes": [ "MCTCTForCTC" ], "sha": "895a3d74f87b344b1f0a71eae4f085941d51b5cf" }, "MCTCTModel": { "tokenizer_classes": [], "processor_classes": [ "MCTCTFeatureExtractor" ], "model_classes": [ "MCTCTModel" ], "sha": "ce73d5c2b6fe163de778697d7b0543bf00d7ffa8" }, "MPNetForMaskedLM": { "tokenizer_classes": [ "MPNetTokenizer", "MPNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "MPNetForMaskedLM", "TFMPNetForMaskedLM" ], "sha": "50af96e7d0202aef86e396c136e4c4fde8afe183" }, "MPNetForMultipleChoice": { "tokenizer_classes": [ "MPNetTokenizer", "MPNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "MPNetForMultipleChoice", "TFMPNetForMultipleChoice" ], "sha": "af4ff8bf296a3a51f5ab6cd9f56741e4c732487c" }, "MPNetForQuestionAnswering": { "tokenizer_classes": [ "MPNetTokenizer", "MPNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "MPNetForQuestionAnswering", "TFMPNetForQuestionAnswering" ], "sha": "3e1a25c0d3243f78f81580c312ada3b39c06b428" }, "MPNetForSequenceClassification": { "tokenizer_classes": [ "MPNetTokenizer", "MPNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "MPNetForSequenceClassification", "TFMPNetForSequenceClassification" ], "sha": "43da45c0a0d73c5a5567b4c7ec512ec5023e52dd" }, "MPNetForTokenClassification": { "tokenizer_classes": [ "MPNetTokenizer", "MPNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "MPNetForTokenClassification", "TFMPNetForTokenClassification" ], "sha": "4e825eff24df533321ebab823eb66ce67e4ab3d9" }, "MPNetModel": { "tokenizer_classes": [ "MPNetTokenizer", "MPNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "MPNetModel", "TFMPNetModel" ], "sha": "847c68344c2922e9a71fa8835b87a0f6f72b9f47" }, "MarianForCausalLM": { "tokenizer_classes": [ "MarianTokenizer" ], "processor_classes": [], "model_classes": [], "sha": "5fb205e6db8e18e3c6cdd4e4709be292ba4599f3" }, "MarianMTModel": { "tokenizer_classes": [ "MarianTokenizer" ], "processor_classes": [], "model_classes": [ "MarianMTModel", "TFMarianMTModel" ], "sha": "0405f542b31561592231a86e3009d05256cbf49f" }, "MarianModel": { "tokenizer_classes": [ "MarianTokenizer" ], "processor_classes": [], "model_classes": [ "MarianModel", "TFMarianModel" ], "sha": "3649748c0286c6d5179a7013a716f7314db182a8" }, "MarkupLMForQuestionAnswering": { "tokenizer_classes": [ "MarkupLMTokenizer", "MarkupLMTokenizerFast" ], "processor_classes": [ "MarkupLMFeatureExtractor" ], "model_classes": [ "MarkupLMForQuestionAnswering" ], "sha": "c8bb9f93591d980362547b0bdca9f23ace2f383e" }, "MarkupLMForSequenceClassification": { "tokenizer_classes": [ "MarkupLMTokenizer", "MarkupLMTokenizerFast" ], "processor_classes": [ "MarkupLMFeatureExtractor" ], "model_classes": [ "MarkupLMForSequenceClassification" ], "sha": "c2cb7245d68d76e0a5f993fc8a3de099ecebc68b" }, "MarkupLMForTokenClassification": { "tokenizer_classes": [ "MarkupLMTokenizer", "MarkupLMTokenizerFast" ], "processor_classes": [ "MarkupLMFeatureExtractor" ], "model_classes": [ "MarkupLMForTokenClassification" ], "sha": "b9f924e82f400de0b34b46ee4ba276d686bd4890" }, "MarkupLMModel": { "tokenizer_classes": [ "MarkupLMTokenizer", "MarkupLMTokenizerFast" ], "processor_classes": [ "MarkupLMFeatureExtractor" ], "model_classes": [ "MarkupLMModel" ], "sha": "9687ba29f1c59d978e3d4b0fa702031f88eff53b" }, "Mask2FormerForUniversalSegmentation": { "tokenizer_classes": [], "processor_classes": [ "Mask2FormerImageProcessor" ], "model_classes": [ "Mask2FormerForUniversalSegmentation" ], "sha": "6429a7349527c9ef140ae691b83c47702cce1bc0" }, "Mask2FormerModel": { "tokenizer_classes": [], "processor_classes": [ "Mask2FormerImageProcessor" ], "model_classes": [ "Mask2FormerModel" ], "sha": "9bee8709204024b3669d503cdfe8890182f2a075" }, "MaskFormerForInstanceSegmentation": { "tokenizer_classes": [], "processor_classes": [ "MaskFormerImageProcessor" ], "model_classes": [ "MaskFormerForInstanceSegmentation" ], "sha": "f844aaa81f55cb199c115f1bf95c217a70685570" }, "MaskFormerModel": { "tokenizer_classes": [], "processor_classes": [ "MaskFormerImageProcessor" ], "model_classes": [ "MaskFormerModel" ], "sha": "473b54a464bc0ccee29bc23b4f6610f32eec05af" }, "MegaForCausalLM": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegaForCausalLM" ], "sha": "6642b9da860f8b62abcfb0660feabcebf6698418" }, "MegaForMaskedLM": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegaForMaskedLM" ], "sha": "6b2d47ba03bec9e6f7eefdd4a67351fa191aae6f" }, "MegaForMultipleChoice": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegaForMultipleChoice" ], "sha": "2b1e751da36a4410473eef07a62b09227a26d504" }, "MegaForQuestionAnswering": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegaForQuestionAnswering" ], "sha": "612acd9a53c351c42514adb3c04f2057d2870be7" }, "MegaForSequenceClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegaForSequenceClassification" ], "sha": "4871572da1613b7e9cfd3640c6d1129af004eefb" }, "MegaForTokenClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegaForTokenClassification" ], "sha": "450d3722c3b995215d06b9c12544c99f958581c7" }, "MegaModel": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegaModel" ], "sha": "ca0862db27428893fe22f9bb5d2eb0875c2156f3" }, "MegatronBertForCausalLM": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegatronBertForCausalLM" ], "sha": "ff08d05ef8f98fdccf1f01560ec6ec4adbc8a3e3" }, "MegatronBertForMaskedLM": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegatronBertForMaskedLM" ], "sha": "2ed25e2681d26b51b404ef1347a385c5f2c86a9a" }, "MegatronBertForMultipleChoice": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegatronBertForMultipleChoice" ], "sha": "1485af4b75f8f234d2b4b5aea50ab2ec55223a15" }, "MegatronBertForNextSentencePrediction": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegatronBertForNextSentencePrediction" ], "sha": "52bc9ee1d5145344f66b088ed278f07ed3d90584" }, "MegatronBertForPreTraining": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegatronBertForPreTraining" ], "sha": "e580d0efd54e1c92789e39b32929234e36ee427f" }, "MegatronBertForQuestionAnswering": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegatronBertForQuestionAnswering" ], "sha": "7342ba042a3c30c15382d00fcb0521533fc43841" }, "MegatronBertForSequenceClassification": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegatronBertForSequenceClassification" ], "sha": "6a7cd480511d817a1e221c8f7558c55a93baed1b" }, "MegatronBertForTokenClassification": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegatronBertForTokenClassification" ], "sha": "8b5334b6ec5f025293ca861de474b57ca84bc005" }, "MegatronBertModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MegatronBertModel" ], "sha": "f2457fbe535ba97ea13db049f53618b42e13f047" }, "MgpstrForSceneTextRecognition": { "tokenizer_classes": [], "processor_classes": [ "MgpstrProcessor" ], "model_classes": [ "MgpstrForSceneTextRecognition" ], "sha": "f197d5bfa1fe27b5f28a6e6d4e3ad229b753450a" }, "MistralForCausalLM": { "tokenizer_classes": [ "LlamaTokenizer", "LlamaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MistralForCausalLM" ], "sha": "f7e06aeedbba8f4f665b438b868ed932d451f64b" }, "MistralForSequenceClassification": { "tokenizer_classes": [ "LlamaTokenizer", "LlamaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MistralForSequenceClassification" ], "sha": "65045444ea1933309270d8b08b21d3fa94a84290" }, "MistralModel": { "tokenizer_classes": [ "LlamaTokenizer", "LlamaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MistralModel" ], "sha": "becd727ad72b1e8a7c0fa0ea39b61904fa68aeac" }, "MobileBertForMaskedLM": { "tokenizer_classes": [ "MobileBertTokenizer", "MobileBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MobileBertForMaskedLM", "TFMobileBertForMaskedLM" ], "sha": "d689e737d73ad23aed3aabd3177591fc827d1c62" }, "MobileBertForMultipleChoice": { "tokenizer_classes": [ "MobileBertTokenizer", "MobileBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MobileBertForMultipleChoice", "TFMobileBertForMultipleChoice" ], "sha": "403d1f88be7eb0c769ff3a8e57eab21cc3e75afb" }, "MobileBertForNextSentencePrediction": { "tokenizer_classes": [ "MobileBertTokenizer", "MobileBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MobileBertForNextSentencePrediction", "TFMobileBertForNextSentencePrediction" ], "sha": "b4d8836a0f259ee3bca9f230093836c9117c5e4d" }, "MobileBertForPreTraining": { "tokenizer_classes": [ "MobileBertTokenizer", "MobileBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MobileBertForPreTraining", "TFMobileBertForPreTraining" ], "sha": "fbaa13ea6f9fcebb9fde620dd009d12510440d17" }, "MobileBertForQuestionAnswering": { "tokenizer_classes": [ "MobileBertTokenizer", "MobileBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MobileBertForQuestionAnswering", "TFMobileBertForQuestionAnswering" ], "sha": "ba6a55cf2daec55bfb220c9bab0bc4ad96510087" }, "MobileBertForSequenceClassification": { "tokenizer_classes": [ "MobileBertTokenizer", "MobileBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MobileBertForSequenceClassification", "TFMobileBertForSequenceClassification" ], "sha": "17ab35603bec351457e035eef2d0426538071f72" }, "MobileBertForTokenClassification": { "tokenizer_classes": [ "MobileBertTokenizer", "MobileBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MobileBertForTokenClassification", "TFMobileBertForTokenClassification" ], "sha": "dee83e820e6c4f069886a5d1875bf6775897313e" }, "MobileBertModel": { "tokenizer_classes": [ "MobileBertTokenizer", "MobileBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "MobileBertModel", "TFMobileBertModel" ], "sha": "09b2db33ea798a762eeaf7e727e95f9ea8a6d14f" }, "MobileNetV1ForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "MobileNetV1ImageProcessor" ], "model_classes": [ "MobileNetV1ForImageClassification" ], "sha": "55023dbd0935f147bf1bccf960cea01ca07e0f0c" }, "MobileNetV1Model": { "tokenizer_classes": [], "processor_classes": [ "MobileNetV1ImageProcessor" ], "model_classes": [ "MobileNetV1Model" ], "sha": "178bd24528147a028938d6ee5c7e65c969ea37b0" }, "MobileNetV2ForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "MobileNetV2ImageProcessor" ], "model_classes": [ "MobileNetV2ForImageClassification" ], "sha": "ff907f740cf9ea91bc3cdf403a94ae28fbb2548a" }, "MobileNetV2ForSemanticSegmentation": { "tokenizer_classes": [], "processor_classes": [ "MobileNetV2ImageProcessor" ], "model_classes": [ "MobileNetV2ForSemanticSegmentation" ], "sha": "48adbc340e42882f52b54d4f5dd045e16e9ef2d6" }, "MobileNetV2Model": { "tokenizer_classes": [], "processor_classes": [ "MobileNetV2ImageProcessor" ], "model_classes": [ "MobileNetV2Model" ], "sha": "e876885828825472a80ef1796d89d60b901813ba" }, "MobileViTForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "MobileViTImageProcessor" ], "model_classes": [ "MobileViTForImageClassification", "TFMobileViTForImageClassification" ], "sha": "7d0b31864f856e00f9e34e8c6781dcc7a8cdaf1e" }, "MobileViTForSemanticSegmentation": { "tokenizer_classes": [], "processor_classes": [ "MobileViTImageProcessor" ], "model_classes": [ "MobileViTForSemanticSegmentation", "TFMobileViTForSemanticSegmentation" ], "sha": "215f727caa3c3fc94fa4df486aa706e5d99d4194" }, "MobileViTModel": { "tokenizer_classes": [], "processor_classes": [ "MobileViTImageProcessor" ], "model_classes": [ "MobileViTModel", "TFMobileViTModel" ], "sha": "b3a1452e7cb44b600b21ee14f3d5382366855a46" }, "MobileViTV2ForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "MobileViTImageProcessor" ], "model_classes": [ "MobileViTV2ForImageClassification" ], "sha": "25752b0967ad594341d1b685401450d7f698433c" }, "MobileViTV2ForSemanticSegmentation": { "tokenizer_classes": [], "processor_classes": [ "MobileViTImageProcessor" ], "model_classes": [ "MobileViTV2ForSemanticSegmentation" ], "sha": "13b953f50be33219d55a12f1098be38b88000897" }, "MobileViTV2Model": { "tokenizer_classes": [], "processor_classes": [ "MobileViTImageProcessor" ], "model_classes": [ "MobileViTV2Model" ], "sha": "2f46357659db2d6d54d870e28073deeea1c8cb64" }, "MptForCausalLM": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "MptForCausalLM" ], "sha": "500c869b956c65f6b1a7b4867727f124c6f5728a" }, "MptForQuestionAnswering": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "MptForQuestionAnswering" ], "sha": "6ee46572bf61eb5e7dbbdaf00b73c4d37efc42d9" }, "MptForSequenceClassification": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "MptForSequenceClassification" ], "sha": "f0b9153413b5dfceeb96b67d4b0f22c94bbaf64a" }, "MptForTokenClassification": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "MptForTokenClassification" ], "sha": "3f7c3ccd67cd0b2aae56d37613429a64ef813246" }, "MptModel": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "MptModel" ], "sha": "ea747f234556661b0c8b84a626f267066ce586bf" }, "MraForMaskedLM": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MraForMaskedLM" ], "sha": "c00ee46cfd2b8fed29cc37f0a4ead40ad51a439c" }, "MraForMultipleChoice": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MraForMultipleChoice" ], "sha": "f397469ba8109f64dab2d75335ea7bf0c2dbeb74" }, "MraForQuestionAnswering": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MraForQuestionAnswering" ], "sha": "c2ed75acd20e5440a76d6504d9a3ebc2513011f0" }, "MraForSequenceClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MraForSequenceClassification" ], "sha": "f47672d3708508bda7774215bee44a92ec16ab2f" }, "MraForTokenClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MraForTokenClassification" ], "sha": "f0961ab5818bca473607fb94b391c186dc1d3492" }, "MraModel": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "MraModel" ], "sha": "315f34f30bcc4b0b66b11987726df2a80c50e271" }, "MusicgenForCausalLM": { "tokenizer_classes": [ "T5TokenizerFast" ], "processor_classes": [], "model_classes": [], "sha": "f67d387eaaa7c71ddf88af95eda4bf14ace08d49" }, "MusicgenForConditionalGeneration": { "tokenizer_classes": [ "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "MusicgenForConditionalGeneration" ], "sha": "16102cdf580e70cf0b4e0e2cda5bc75b934da92c" }, "MvpForCausalLM": { "tokenizer_classes": [ "MvpTokenizer", "MvpTokenizerFast" ], "processor_classes": [], "model_classes": [ "MvpForCausalLM" ], "sha": "105e5f2c8a0f20d404cb71795539cda5dd49716d" }, "MvpForConditionalGeneration": { "tokenizer_classes": [ "MvpTokenizer", "MvpTokenizerFast" ], "processor_classes": [], "model_classes": [ "MvpForConditionalGeneration" ], "sha": "b0b706f14b2f8aae288cba30ae0064e0be7e888b" }, "MvpForQuestionAnswering": { "tokenizer_classes": [ "MvpTokenizer", "MvpTokenizerFast" ], "processor_classes": [], "model_classes": [ "MvpForQuestionAnswering" ], "sha": "82f152b36a40a4c22edcb146e6eaec636d84fa2d" }, "MvpForSequenceClassification": { "tokenizer_classes": [ "MvpTokenizer", "MvpTokenizerFast" ], "processor_classes": [], "model_classes": [ "MvpForSequenceClassification" ], "sha": "506b68544d064001929ee9e6db3752e62972a6aa" }, "MvpModel": { "tokenizer_classes": [ "MvpTokenizer", "MvpTokenizerFast" ], "processor_classes": [], "model_classes": [ "MvpModel" ], "sha": "3f4653184721a2bc029b27706d335ef7ddd219d5" }, "NatBackbone": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "NatBackbone" ], "sha": "d5cc5eccba4da609c82e9f5c649301b9f9fee9fb" }, "NatForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "NatForImageClassification" ], "sha": "2ff4c9e73c49c392c02a467e87b5511fd924242a" }, "NatModel": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "NatModel" ], "sha": "75e9756bb94d0ccdce98a8e963eeecbc66f9d573" }, "NezhaForMaskedLM": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NezhaForMaskedLM" ], "sha": "5991cca4b78f0ed7299259a71f3eeed3f3452b72" }, "NezhaForMultipleChoice": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NezhaForMultipleChoice" ], "sha": "0f6e9ec791d85ad4503acdec50b3a120f984016b" }, "NezhaForNextSentencePrediction": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NezhaForNextSentencePrediction" ], "sha": "9a34316c14ec8ecc98ff08e46760915c80098a57" }, "NezhaForPreTraining": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NezhaForPreTraining" ], "sha": "6259db427a0073061de352ea819d38a74798edd7" }, "NezhaForQuestionAnswering": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NezhaForQuestionAnswering" ], "sha": "31c6a34e85ae8c41294e0f4ef25044e00e511c4d" }, "NezhaForSequenceClassification": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NezhaForSequenceClassification" ], "sha": "db057c308ba2e05f223404de11e1816ce4bd62a9" }, "NezhaForTokenClassification": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NezhaForTokenClassification" ], "sha": "235f4e10b4a59709650c2bece3e342ec153d9cfc" }, "NezhaModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NezhaModel" ], "sha": "80e05ba7c55bcdd7f4d1387ef9a09a7a8e95b5ac" }, "NllbMoeForConditionalGeneration": { "tokenizer_classes": [ "NllbTokenizer", "NllbTokenizerFast" ], "processor_classes": [], "model_classes": [ "NllbMoeForConditionalGeneration" ], "sha": "2a7f87dffe826af3d52086888f3f3773246e5528" }, "NllbMoeModel": { "tokenizer_classes": [ "NllbTokenizer", "NllbTokenizerFast" ], "processor_classes": [], "model_classes": [ "NllbMoeModel" ], "sha": "9f7a2261eed4658e1aa5623be4672ba64bee7da5" }, "NystromformerForMaskedLM": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NystromformerForMaskedLM" ], "sha": "37036847783f1e65e81ecd43803270a1ecb276f3" }, "NystromformerForMultipleChoice": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NystromformerForMultipleChoice" ], "sha": "42a077d5ab6830e20560466eaccc525eff10c3ae" }, "NystromformerForQuestionAnswering": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NystromformerForQuestionAnswering" ], "sha": "1cfaf79051731824db4f09989f093f87f4fceec5" }, "NystromformerForSequenceClassification": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NystromformerForSequenceClassification" ], "sha": "d75231203066df41e9b6b25dbee9ad40e8515c18" }, "NystromformerForTokenClassification": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NystromformerForTokenClassification" ], "sha": "5a499dc96e106bf41fc9166f2ad06527ec7ca14e" }, "NystromformerModel": { "tokenizer_classes": [ "AlbertTokenizer", "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "NystromformerModel" ], "sha": "2b6adb37ec473b15d71e2eb459acea08df6940ce" }, "OPTForCausalLM": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "OPTForCausalLM", "TFOPTForCausalLM" ], "sha": "190d1f4fc0011d2eaeaa05282e0fbd2445e4b11f" }, "OPTForQuestionAnswering": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "OPTForQuestionAnswering" ], "sha": "0fa9277ce10dbc3d0922b354befb684a136af00b" }, "OPTForSequenceClassification": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "OPTForSequenceClassification" ], "sha": "784ab288ab7280b1853ee400ef10ee2a965df352" }, "OPTModel": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [], "model_classes": [ "OPTModel", "TFOPTModel" ], "sha": "901d92b8f51edb0ec9614cb185fb66a8b5d364c3" }, "OneFormerForUniversalSegmentation": { "tokenizer_classes": [ "CLIPTokenizer", "CLIPTokenizerFast" ], "processor_classes": [ "OneFormerImageProcessor" ], "model_classes": [ "OneFormerForUniversalSegmentation" ], "sha": "fee1cfd676acc40f09017702ddac6504f3090d14" }, "OneFormerModel": { "tokenizer_classes": [ "CLIPTokenizer", "CLIPTokenizerFast" ], "processor_classes": [ "OneFormerImageProcessor" ], "model_classes": [ "OneFormerModel" ], "sha": "4163a79328c78f93ec57942598698a138c19a577" }, "OpenAIGPTForSequenceClassification": { "tokenizer_classes": [ "OpenAIGPTTokenizer", "OpenAIGPTTokenizerFast" ], "processor_classes": [], "model_classes": [ "OpenAIGPTForSequenceClassification", "TFOpenAIGPTForSequenceClassification" ], "sha": "c513f7f952935085f7573bf70a1ac3ad8f33434c" }, "OpenAIGPTLMHeadModel": { "tokenizer_classes": [ "OpenAIGPTTokenizer", "OpenAIGPTTokenizerFast" ], "processor_classes": [], "model_classes": [ "OpenAIGPTLMHeadModel", "TFOpenAIGPTLMHeadModel" ], "sha": "33f59ecd860f7a998483ec7631fe32d257235461" }, "OpenAIGPTModel": { "tokenizer_classes": [ "OpenAIGPTTokenizer", "OpenAIGPTTokenizerFast" ], "processor_classes": [], "model_classes": [ "OpenAIGPTModel", "TFOpenAIGPTModel" ], "sha": "00f6ec0a3a5276af71d08a26199e0ccbf2556fc9" }, "OwlViTForObjectDetection": { "tokenizer_classes": [ "CLIPTokenizer", "CLIPTokenizerFast" ], "processor_classes": [ "OwlViTImageProcessor" ], "model_classes": [ "OwlViTForObjectDetection" ], "sha": "af958c9164f23d0f12921a8edf687f9aaa6af90e" }, "OwlViTModel": { "tokenizer_classes": [ "CLIPTokenizer", "CLIPTokenizerFast" ], "processor_classes": [ "OwlViTImageProcessor" ], "model_classes": [ "OwlViTModel" ], "sha": "f0e27b2b4e53ba70e05d13dcfea8e85272b292a5" }, "Owlv2ForObjectDetection": { "tokenizer_classes": [ "CLIPTokenizer", "CLIPTokenizerFast" ], "processor_classes": [ "Owlv2ImageProcessor" ], "model_classes": [ "Owlv2ForObjectDetection" ], "sha": "30439c0b2749726468dc13a755261e8101170052" }, "Owlv2Model": { "tokenizer_classes": [ "CLIPTokenizer", "CLIPTokenizerFast" ], "processor_classes": [ "Owlv2ImageProcessor" ], "model_classes": [ "Owlv2Model" ], "sha": "7aeebdad5f72b36cb07c74355afad8e6052e2377" }, "PLBartForCausalLM": { "tokenizer_classes": [ "PLBartTokenizer" ], "processor_classes": [], "model_classes": [ "PLBartForCausalLM" ], "sha": "6ee51133246dbdb18fc3681ebd62d21e421b9bb4" }, "PLBartForConditionalGeneration": { "tokenizer_classes": [ "PLBartTokenizer" ], "processor_classes": [], "model_classes": [ "PLBartForConditionalGeneration" ], "sha": "ba191d28f4678d20b4dfed5fca5944018282cf20" }, "PLBartForSequenceClassification": { "tokenizer_classes": [ "PLBartTokenizer" ], "processor_classes": [], "model_classes": [ "PLBartForSequenceClassification" ], "sha": "02063b3d9707fcff619a4e37a0d6e58f76e39b18" }, "PLBartModel": { "tokenizer_classes": [ "PLBartTokenizer" ], "processor_classes": [], "model_classes": [ "PLBartModel" ], "sha": "cfbba29169b3f40d800403fc1b53982e1f88c5f8" }, "PegasusForCausalLM": { "tokenizer_classes": [ "PegasusTokenizer", "PegasusTokenizerFast" ], "processor_classes": [], "model_classes": [ "PegasusForCausalLM" ], "sha": "6e685a698302a3ba33e5379d3a37eb0bc1ae2f70" }, "PegasusForConditionalGeneration": { "tokenizer_classes": [ "PegasusTokenizer", "PegasusTokenizerFast" ], "processor_classes": [], "model_classes": [ "PegasusForConditionalGeneration", "TFPegasusForConditionalGeneration" ], "sha": "15e58ee2ebc14b6e80ef2891259057ee5f049be2" }, "PegasusModel": { "tokenizer_classes": [ "PegasusTokenizer", "PegasusTokenizerFast" ], "processor_classes": [], "model_classes": [ "PegasusModel", "TFPegasusModel" ], "sha": "fa36b24523db411ef77903453346b8be81ef73fe" }, "PegasusXForConditionalGeneration": { "tokenizer_classes": [ "PegasusTokenizer", "PegasusTokenizerFast" ], "processor_classes": [], "model_classes": [ "PegasusXForConditionalGeneration" ], "sha": "7588a8120f26a36c1687c14bdf1e9f9656891c1a" }, "PegasusXModel": { "tokenizer_classes": [ "PegasusTokenizer", "PegasusTokenizerFast" ], "processor_classes": [], "model_classes": [ "PegasusXModel" ], "sha": "a0bdff627416ac3c39c22d081f5d88d8b8fd99cc" }, "PerceiverForImageClassificationConvProcessing": { "tokenizer_classes": [ "PerceiverTokenizer" ], "processor_classes": [ "PerceiverImageProcessor" ], "model_classes": [ "PerceiverForImageClassificationConvProcessing" ], "sha": "2c1e5e62ebc9d0c931adc8c665fb05bde6c1c1f1" }, "PerceiverForImageClassificationFourier": { "tokenizer_classes": [ "PerceiverTokenizer" ], "processor_classes": [ "PerceiverImageProcessor" ], "model_classes": [ "PerceiverForImageClassificationFourier" ], "sha": "88da41b8851b76b8be0dacdb3de023db02bb031a" }, "PerceiverForImageClassificationLearned": { "tokenizer_classes": [ "PerceiverTokenizer" ], "processor_classes": [ "PerceiverImageProcessor" ], "model_classes": [ "PerceiverForImageClassificationLearned" ], "sha": "879bd1fa38d3baddb027bb2cacba2d160a741375" }, "PerceiverForMaskedLM": { "tokenizer_classes": [ "PerceiverTokenizer" ], "processor_classes": [ "PerceiverImageProcessor" ], "model_classes": [ "PerceiverForMaskedLM" ], "sha": "1d2459cbd281ef72da5682e65102aaca96183045" }, "PerceiverForSequenceClassification": { "tokenizer_classes": [ "PerceiverTokenizer" ], "processor_classes": [ "PerceiverImageProcessor" ], "model_classes": [ "PerceiverForSequenceClassification" ], "sha": "576f1f96348f0343458499fbf53d4102b5c0f2ff" }, "PerceiverModel": { "tokenizer_classes": [ "PerceiverTokenizer" ], "processor_classes": [ "PerceiverImageProcessor" ], "model_classes": [ "PerceiverModel" ], "sha": "83ec4d2d61ed62525ee033e13d144817beb29d19" }, "PersimmonForCausalLM": { "tokenizer_classes": [ "LlamaTokenizer", "LlamaTokenizerFast" ], "processor_classes": [], "model_classes": [ "PersimmonForCausalLM" ], "sha": "454234d6496c3857f5bf3eafb784616e2cd3ea82" }, "PersimmonForSequenceClassification": { "tokenizer_classes": [ "LlamaTokenizer", "LlamaTokenizerFast" ], "processor_classes": [], "model_classes": [ "PersimmonForSequenceClassification" ], "sha": "1d2674846543a181ca67bafa8b8f3a48bd2eefd1" }, "PersimmonModel": { "tokenizer_classes": [ "LlamaTokenizer", "LlamaTokenizerFast" ], "processor_classes": [], "model_classes": [ "PersimmonModel" ], "sha": "b8c8d479e29e9ee048e2d0b05b001ac835ad8859" }, "PhiForCausalLM": { "tokenizer_classes": [ "CodeGenTokenizer", "CodeGenTokenizerFast" ], "processor_classes": [], "model_classes": [ "PhiForCausalLM" ], "sha": "3fecc0109a4a3a230e3a5509eaf47a26eba85d79" }, "PhiForSequenceClassification": { "tokenizer_classes": [ "CodeGenTokenizer", "CodeGenTokenizerFast" ], "processor_classes": [], "model_classes": [ "PhiForSequenceClassification" ], "sha": "e1c9f8ebf1317516acc1cd6338de71a53e770245" }, "PhiForTokenClassification": { "tokenizer_classes": [ "CodeGenTokenizer", "CodeGenTokenizerFast" ], "processor_classes": [], "model_classes": [ "PhiForTokenClassification" ], "sha": "d3a8054903753b5c96c05eaf9877905a116a1d5e" }, "PhiModel": { "tokenizer_classes": [ "CodeGenTokenizer", "CodeGenTokenizerFast" ], "processor_classes": [], "model_classes": [ "PhiModel" ], "sha": "99c38d5ce7ace35127d00ed3eeb3561308ea6b21" }, "Pix2StructForConditionalGeneration": { "tokenizer_classes": [ "T5TokenizerFast" ], "processor_classes": [ "Pix2StructImageProcessor", "Pix2StructProcessor" ], "model_classes": [ "Pix2StructForConditionalGeneration" ], "sha": "42b3de00ad535076c4893e4ac5ae2d2748cc4ccb" }, "PoolFormerForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "PoolFormerImageProcessor" ], "model_classes": [ "PoolFormerForImageClassification" ], "sha": "ef04de5a6896100d457fb9553dd9789c09cca98e" }, "PoolFormerModel": { "tokenizer_classes": [], "processor_classes": [ "PoolFormerImageProcessor" ], "model_classes": [ "PoolFormerModel" ], "sha": "e8037215ebdbf795329ef6525cdc6aa547f04ace" }, "ProphetNetForCausalLM": { "tokenizer_classes": [ "ProphetNetTokenizer" ], "processor_classes": [], "model_classes": [ "ProphetNetForCausalLM" ], "sha": "d40b1e75bbc5ea0839563457aff6eee5bc0bb03e" }, "ProphetNetForConditionalGeneration": { "tokenizer_classes": [ "ProphetNetTokenizer" ], "processor_classes": [], "model_classes": [ "ProphetNetForConditionalGeneration" ], "sha": "d842875c41278032af39c03c66902786bb5ff2c7" }, "ProphetNetModel": { "tokenizer_classes": [ "ProphetNetTokenizer" ], "processor_classes": [], "model_classes": [ "ProphetNetModel" ], "sha": "f1ddbbcc768c7ba54c4d75b319540c1635e65937" }, "PvtForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "PvtImageProcessor" ], "model_classes": [ "PvtForImageClassification" ], "sha": "589b37bd6941aff6dd248259f9eee3c422a41fde" }, "PvtModel": { "tokenizer_classes": [], "processor_classes": [ "PvtImageProcessor" ], "model_classes": [ "PvtModel" ], "sha": "c40765c382515ae627652d60e9077b6478448d48" }, "ReformerForMaskedLM": { "tokenizer_classes": [ "ReformerTokenizer", "ReformerTokenizerFast" ], "processor_classes": [], "model_classes": [ "ReformerForMaskedLM" ], "sha": "1e6431e42c676b525e3215e9e3cc8f1404f9f82b" }, "ReformerForQuestionAnswering": { "tokenizer_classes": [ "ReformerTokenizer", "ReformerTokenizerFast" ], "processor_classes": [], "model_classes": [ "ReformerForQuestionAnswering" ], "sha": "62b43977f244474bd6982c6327d0c57310258fcd" }, "ReformerForSequenceClassification": { "tokenizer_classes": [ "ReformerTokenizer", "ReformerTokenizerFast" ], "processor_classes": [], "model_classes": [ "ReformerForSequenceClassification" ], "sha": "67bd534a990a7dcfa02406987e7f066caa2a30e8" }, "ReformerModel": { "tokenizer_classes": [ "ReformerTokenizer", "ReformerTokenizerFast" ], "processor_classes": [], "model_classes": [ "ReformerModel" ], "sha": "a34ddb1389067448e9bc1323de674951cfb4cff1" }, "ReformerModelWithLMHead": { "tokenizer_classes": [ "ReformerTokenizer", "ReformerTokenizerFast" ], "processor_classes": [], "model_classes": [], "sha": "e7a8addaea8407d4c55e144e48aee04be6cca618" }, "RegNetForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "RegNetForImageClassification", "TFRegNetForImageClassification" ], "sha": "5ec67c84fc7944c0c5b386bd26820bc4d1f3b32a" }, "RegNetModel": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "RegNetModel", "TFRegNetModel" ], "sha": "72375e1401dc8271d4abb6295c9cee376f7b8f1a" }, "RemBertForCausalLM": { "tokenizer_classes": [ "RemBertTokenizer", "RemBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "RemBertForCausalLM", "TFRemBertForCausalLM" ], "sha": "8d9ae3d74a0e0a8958b4ee8c9dca3632abf52ef9" }, "RemBertForMaskedLM": { "tokenizer_classes": [ "RemBertTokenizer", "RemBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "RemBertForMaskedLM", "TFRemBertForMaskedLM" ], "sha": "b7c27d01e1cc3bef9ddd6a78627d700b3bffd759" }, "RemBertForMultipleChoice": { "tokenizer_classes": [ "RemBertTokenizer", "RemBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "RemBertForMultipleChoice", "TFRemBertForMultipleChoice" ], "sha": "2fe192677b9740cf24dd559339d46925e8ac23d4" }, "RemBertForQuestionAnswering": { "tokenizer_classes": [ "RemBertTokenizer", "RemBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "RemBertForQuestionAnswering", "TFRemBertForQuestionAnswering" ], "sha": "22b8ba44681b96292a1cf7f6df4ba6bb7937ec6e" }, "RemBertForSequenceClassification": { "tokenizer_classes": [ "RemBertTokenizer", "RemBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "RemBertForSequenceClassification", "TFRemBertForSequenceClassification" ], "sha": "20f3e89341ea15266d2685a8798142fba03c3f98" }, "RemBertForTokenClassification": { "tokenizer_classes": [ "RemBertTokenizer", "RemBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "RemBertForTokenClassification", "TFRemBertForTokenClassification" ], "sha": "15712ff753708da3cf0550e76e73a5d0bba7784e" }, "RemBertModel": { "tokenizer_classes": [ "RemBertTokenizer", "RemBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "RemBertModel", "TFRemBertModel" ], "sha": "59cc6d099b1ded0aaead8684457415b129f79e86" }, "ResNetBackbone": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "ResNetBackbone" ], "sha": "c84a6bcf8af4b6a3403dea3cf4c55965ac39f239" }, "ResNetForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "ResNetForImageClassification", "TFResNetForImageClassification" ], "sha": "34a180ad24d80811d420d7aa4fbec4a17751aaf8" }, "ResNetModel": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "ResNetModel", "TFResNetModel" ], "sha": "fafa6cdf9986c6cfbae360596b3574162430bcd3" }, "RoCBertForCausalLM": { "tokenizer_classes": [ "RoCBertTokenizer" ], "processor_classes": [], "model_classes": [ "RoCBertForCausalLM" ], "sha": "194d8dafc4f4142f8d31e6b4be14b55d812f923b" }, "RoCBertForMaskedLM": { "tokenizer_classes": [ "RoCBertTokenizer" ], "processor_classes": [], "model_classes": [ "RoCBertForMaskedLM" ], "sha": "8bc285f32f3b932dbd56ddf91b1170734d638eeb" }, "RoCBertForMultipleChoice": { "tokenizer_classes": [ "RoCBertTokenizer" ], "processor_classes": [], "model_classes": [ "RoCBertForMultipleChoice" ], "sha": "bb54e5ae021d728022d34b12fee3f087d9486af9" }, "RoCBertForPreTraining": { "tokenizer_classes": [ "RoCBertTokenizer" ], "processor_classes": [], "model_classes": [ "RoCBertForPreTraining" ], "sha": "86ebbd5b0bc84660ad7f505082eff19b86c137c8" }, "RoCBertForQuestionAnswering": { "tokenizer_classes": [ "RoCBertTokenizer" ], "processor_classes": [], "model_classes": [ "RoCBertForQuestionAnswering" ], "sha": "1bfc2dc3d6e76170e6dca1ff32a54a0887ff28a3" }, "RoCBertForSequenceClassification": { "tokenizer_classes": [ "RoCBertTokenizer" ], "processor_classes": [], "model_classes": [ "RoCBertForSequenceClassification" ], "sha": "c329038802241f454273894128fea38b60f7c739" }, "RoCBertForTokenClassification": { "tokenizer_classes": [ "RoCBertTokenizer" ], "processor_classes": [], "model_classes": [ "RoCBertForTokenClassification" ], "sha": "afe5ec22c2ad1d9ff6e3e64c87eb7555faaa936d" }, "RoCBertModel": { "tokenizer_classes": [ "RoCBertTokenizer" ], "processor_classes": [], "model_classes": [ "RoCBertModel" ], "sha": "29de5580d5f5d3461a88673e7b4c492a9d8a67a4" }, "RoFormerForCausalLM": { "tokenizer_classes": [ "RoFormerTokenizer", "RoFormerTokenizerFast" ], "processor_classes": [], "model_classes": [ "RoFormerForCausalLM", "TFRoFormerForCausalLM" ], "sha": "6e074219c6dd8f8b221bbfda64fba100f729f88d" }, "RoFormerForMaskedLM": { "tokenizer_classes": [ "RoFormerTokenizer", "RoFormerTokenizerFast" ], "processor_classes": [], "model_classes": [ "RoFormerForMaskedLM", "TFRoFormerForMaskedLM" ], "sha": "a3a4d05f9b29601553a77244f2adcf8194f9367c" }, "RoFormerForMultipleChoice": { "tokenizer_classes": [ "RoFormerTokenizer", "RoFormerTokenizerFast" ], "processor_classes": [], "model_classes": [ "RoFormerForMultipleChoice", "TFRoFormerForMultipleChoice" ], "sha": "aca3999a1d14f09644faed44e2cdfb28ed68a3d3" }, "RoFormerForQuestionAnswering": { "tokenizer_classes": [ "RoFormerTokenizer", "RoFormerTokenizerFast" ], "processor_classes": [], "model_classes": [ "RoFormerForQuestionAnswering", "TFRoFormerForQuestionAnswering" ], "sha": "b8a20b3a788f178b9ef64e2eb9587f693dca1b69" }, "RoFormerForSequenceClassification": { "tokenizer_classes": [ "RoFormerTokenizer", "RoFormerTokenizerFast" ], "processor_classes": [], "model_classes": [ "RoFormerForSequenceClassification", "TFRoFormerForSequenceClassification" ], "sha": "d092e2d5e62012bf4ec921e763b37865d6189216" }, "RoFormerForTokenClassification": { "tokenizer_classes": [ "RoFormerTokenizer", "RoFormerTokenizerFast" ], "processor_classes": [], "model_classes": [ "RoFormerForTokenClassification", "TFRoFormerForTokenClassification" ], "sha": "85d3a17062e1f3e0539abfe738a88203e25349b6" }, "RoFormerModel": { "tokenizer_classes": [ "RoFormerTokenizer", "RoFormerTokenizerFast" ], "processor_classes": [], "model_classes": [ "RoFormerModel", "TFRoFormerModel" ], "sha": "22e7df2f4cd66caf449f2342f63d176005afccc9" }, "RobertaForCausalLM": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaForCausalLM", "TFRobertaForCausalLM" ], "sha": "5d1d24d56f9735402e50a2ea513ffde44487733e" }, "RobertaForMaskedLM": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaForMaskedLM", "TFRobertaForMaskedLM" ], "sha": "b21c9daf0b3b66530bf5d45d67df5ec392b5059c" }, "RobertaForMultipleChoice": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaForMultipleChoice", "TFRobertaForMultipleChoice" ], "sha": "10020d9546d4d7318f4d514fe13daaad07e6269f" }, "RobertaForQuestionAnswering": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaForQuestionAnswering", "TFRobertaForQuestionAnswering" ], "sha": "eea4a81306891746bac9e7715f805a2d9dbf4be7" }, "RobertaForSequenceClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaForSequenceClassification", "TFRobertaForSequenceClassification" ], "sha": "6a6f53fc6ab98e29ed539e76b1cb76d25a2cd720" }, "RobertaForTokenClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaForTokenClassification", "TFRobertaForTokenClassification" ], "sha": "9190044c4091eb0d98ae7638c453e24846bca5d7" }, "RobertaModel": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaModel", "TFRobertaModel" ], "sha": "181a0b8a7ad24500ec327ad07ddb225f0680ac0a" }, "RobertaPreLayerNormForCausalLM": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaPreLayerNormForCausalLM", "TFRobertaPreLayerNormForCausalLM" ], "sha": "73b6d4531b41f295a5d310d7aa44736004a59865" }, "RobertaPreLayerNormForMaskedLM": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaPreLayerNormForMaskedLM", "TFRobertaPreLayerNormForMaskedLM" ], "sha": "a61723c77e5ab7adc95285e7823a0a49b99af395" }, "RobertaPreLayerNormForMultipleChoice": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaPreLayerNormForMultipleChoice", "TFRobertaPreLayerNormForMultipleChoice" ], "sha": "3dcfa62e0771358c60232a18135bfe7c7f6d715e" }, "RobertaPreLayerNormForQuestionAnswering": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaPreLayerNormForQuestionAnswering", "TFRobertaPreLayerNormForQuestionAnswering" ], "sha": "a8e76a5a50f7df60055e5ed6a1c3af2e7d34cf01" }, "RobertaPreLayerNormForSequenceClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaPreLayerNormForSequenceClassification", "TFRobertaPreLayerNormForSequenceClassification" ], "sha": "7509cb0286d146ef2fc6beb8867ae31b92fb1b16" }, "RobertaPreLayerNormForTokenClassification": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaPreLayerNormForTokenClassification", "TFRobertaPreLayerNormForTokenClassification" ], "sha": "3ad5814ba126b41e18c1978c970e396fab6da9bf" }, "RobertaPreLayerNormModel": { "tokenizer_classes": [ "RobertaTokenizer", "RobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "RobertaPreLayerNormModel", "TFRobertaPreLayerNormModel" ], "sha": "4830db38fd310404c5ab70bd00684eca0bc06ca8" }, "RwkvForCausalLM": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "RwkvForCausalLM" ], "sha": "2f452fd46b39e39b1a6a95fa1d8232405bbb3e96" }, "RwkvModel": { "tokenizer_classes": [ "GPTNeoXTokenizerFast" ], "processor_classes": [], "model_classes": [ "RwkvModel" ], "sha": "88a52c9437dc3c06f65a8252490be7eb91197804" }, "SEWDForCTC": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "SEWDForCTC" ], "sha": "5c7495c77ae9e0f12c0de05d3a5fb95bdcd91768" }, "SEWDForSequenceClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "SEWDForSequenceClassification" ], "sha": "d6cbf1164ce1999fdaf3deeb7a6eba19a3b1f873" }, "SEWDModel": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "SEWDModel" ], "sha": "dde4e02219449f149bb3403bbeae127cafaf9c79" }, "SEWForCTC": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "SEWForCTC" ], "sha": "4477c7a277059fba08772acf91cf3e3dd3cb073b" }, "SEWForSequenceClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "SEWForSequenceClassification" ], "sha": "3b90fbb1c0c3848fed18f91a0169bb297a3e6619" }, "SEWModel": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "SEWModel" ], "sha": "0a0fbb844eeefa0dce62bd05db30a2bb91e5dc88" }, "SamModel": { "tokenizer_classes": [], "processor_classes": [ "SamImageProcessor" ], "model_classes": [ "SamModel", "TFSamModel" ], "sha": "eca8651bc84e5ac3b1b62e784b744a6bd1b82575" }, "SegformerForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "SegformerImageProcessor" ], "model_classes": [ "SegformerForImageClassification", "TFSegformerForImageClassification" ], "sha": "c566ae0ed382be4ed61ed6dacffa2ba663e9cc19" }, "SegformerForSemanticSegmentation": { "tokenizer_classes": [], "processor_classes": [ "SegformerImageProcessor" ], "model_classes": [ "SegformerForSemanticSegmentation", "TFSegformerForSemanticSegmentation" ], "sha": "b73798972cdf24daafa858994713aca60e2bf90d" }, "SegformerModel": { "tokenizer_classes": [], "processor_classes": [ "SegformerImageProcessor" ], "model_classes": [ "SegformerModel", "TFSegformerModel" ], "sha": "3d4ba8ed2bdf801e6afa855b9d77893f2b7f9e10" }, "Speech2TextForConditionalGeneration": { "tokenizer_classes": [ "Speech2TextTokenizer" ], "processor_classes": [ "Speech2TextFeatureExtractor" ], "model_classes": [ "Speech2TextForConditionalGeneration", "TFSpeech2TextForConditionalGeneration" ], "sha": "1da80293ec78762e136cf6dd64b652693f9ab364" }, "Speech2TextModel": { "tokenizer_classes": [ "Speech2TextTokenizer" ], "processor_classes": [ "Speech2TextFeatureExtractor" ], "model_classes": [ "Speech2TextModel", "TFSpeech2TextModel" ], "sha": "7c6e63bd0c15dd99ef01573d4c43f90e4920cc91" }, "SpeechEncoderDecoderModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "SpeechEncoderDecoderModel" ], "sha": "78602ae0857728e95de4042bdca8a31ef818890a" }, "SpeechT5ForSpeechToText": { "tokenizer_classes": [ "SpeechT5Tokenizer" ], "processor_classes": [ "SpeechT5FeatureExtractor" ], "model_classes": [ "SpeechT5ForSpeechToText" ], "sha": "d46f0a83324e5865420a27a738ef203292de3479" }, "SpeechT5ForTextToSpeech": { "tokenizer_classes": [ "SpeechT5Tokenizer" ], "processor_classes": [ "SpeechT5FeatureExtractor" ], "model_classes": [ "SpeechT5ForTextToSpeech" ], "sha": "922e748d9e1ea256a8d9259782021cd3820d5924" }, "SpeechT5Model": { "tokenizer_classes": [ "SpeechT5Tokenizer" ], "processor_classes": [ "SpeechT5FeatureExtractor" ], "model_classes": [ "SpeechT5Model" ], "sha": "7b248f77ca88ffddcdb538e772f6de63a86a4f9b" }, "SplinterForPreTraining": { "tokenizer_classes": [ "SplinterTokenizer" ], "processor_classes": [], "model_classes": [ "SplinterForPreTraining" ], "sha": "e8a94efa740f1d685fa553f49132c6f022de5389" }, "SplinterForQuestionAnswering": { "tokenizer_classes": [ "SplinterTokenizer" ], "processor_classes": [], "model_classes": [ "SplinterForQuestionAnswering" ], "sha": "d038b7b683face4a361ab0f474d8a5b111c44c4d" }, "SplinterModel": { "tokenizer_classes": [ "SplinterTokenizer" ], "processor_classes": [], "model_classes": [ "SplinterModel" ], "sha": "a35b13cbb7faba46dc265761bb839267eb53d248" }, "SqueezeBertForMaskedLM": { "tokenizer_classes": [ "SqueezeBertTokenizer", "SqueezeBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "SqueezeBertForMaskedLM" ], "sha": "33ce239408c22d2c98be63c9ab4607ef9ceb6d49" }, "SqueezeBertForMultipleChoice": { "tokenizer_classes": [ "SqueezeBertTokenizer", "SqueezeBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "SqueezeBertForMultipleChoice" ], "sha": "7e9e666896420c7839e27dcb280981d034ba4da5" }, "SqueezeBertForQuestionAnswering": { "tokenizer_classes": [ "SqueezeBertTokenizer", "SqueezeBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "SqueezeBertForQuestionAnswering" ], "sha": "bceb045a9ac6eb2ded7d358ed577c6dc28ea487a" }, "SqueezeBertForSequenceClassification": { "tokenizer_classes": [ "SqueezeBertTokenizer", "SqueezeBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "SqueezeBertForSequenceClassification" ], "sha": "c5aeb1f454a1d059d41a5f8dacaf784b9de0b899" }, "SqueezeBertForTokenClassification": { "tokenizer_classes": [ "SqueezeBertTokenizer", "SqueezeBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "SqueezeBertForTokenClassification" ], "sha": "70ba60ca44a380e6aa983a37b163c57217219df7" }, "SqueezeBertModel": { "tokenizer_classes": [ "SqueezeBertTokenizer", "SqueezeBertTokenizerFast" ], "processor_classes": [], "model_classes": [ "SqueezeBertModel" ], "sha": "e0a3ac56a4047da3f921638252ead5e44438bbdb" }, "SwiftFormerForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "SwiftFormerForImageClassification" ], "sha": "a249b14a525d29e675b6e4af4baacd9ba7df7598" }, "SwiftFormerModel": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "SwiftFormerModel" ], "sha": "25ba2d88c770533f8c69811d2a454a00c1d09f5d" }, "Swin2SRForImageSuperResolution": { "tokenizer_classes": [], "processor_classes": [ "Swin2SRImageProcessor" ], "model_classes": [ "Swin2SRForImageSuperResolution" ], "sha": "3a2780de0b455084c018ac8a62b56040969e26ec" }, "Swin2SRModel": { "tokenizer_classes": [], "processor_classes": [ "Swin2SRImageProcessor" ], "model_classes": [ "Swin2SRModel" ], "sha": "c67f6ecff9ef8675c3869c987277b0a1e040f4be" }, "SwinBackbone": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "SwinBackbone" ], "sha": "89b28b8ec05a7b3357be75a77eb7809e6fd5cfef" }, "SwinForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "SwinForImageClassification", "TFSwinForImageClassification" ], "sha": "e3c2e80f380ef79781313981da1a993dd8b8d34d" }, "SwinForMaskedImageModeling": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "SwinForMaskedImageModeling", "TFSwinForMaskedImageModeling" ], "sha": "d84b061fbace1bc6e697e3253e222de42053f978" }, "SwinModel": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "SwinModel", "TFSwinModel" ], "sha": "23ff641295660ec4fea399be8aa1bc14565961f8" }, "Swinv2ForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "Swinv2ForImageClassification" ], "sha": "3fd755cdf4cf611db83f72f9c9b00eb9257a38ca" }, "Swinv2ForMaskedImageModeling": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "Swinv2ForMaskedImageModeling" ], "sha": "8375c31eb6231fde36ec6533a34ba5b28e296163" }, "Swinv2Model": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "Swinv2Model" ], "sha": "70aeb72e8a266f668c8b51a517ec01003b8d6804" }, "SwitchTransformersForConditionalGeneration": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "SwitchTransformersForConditionalGeneration" ], "sha": "c8fcd2bb735894c78db7f1e5b51afc78aced7adb" }, "SwitchTransformersModel": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "SwitchTransformersModel" ], "sha": "275bbf6d389bfd0540b9f824c609c6b22a577328" }, "T5EncoderModel": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "T5EncoderModel", "TFT5EncoderModel" ], "sha": "1c75090036a2b3740dfe2d570b889332ad8e59e8" }, "T5ForConditionalGeneration": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "T5ForConditionalGeneration", "TFT5ForConditionalGeneration" ], "sha": "593fd6072a4e265f5cc73b1973cd8af76b261f29" }, "T5ForQuestionAnswering": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "T5ForQuestionAnswering" ], "sha": "b9edf2de494244ff032f67d2d7bdf6c591000c94" }, "T5ForSequenceClassification": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "T5ForSequenceClassification" ], "sha": "105b5c4c8e1efe927444108f1388c4f102ebad15" }, "T5Model": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "T5Model", "TFT5Model" ], "sha": "eb3d20dda0ba77c1de618d78116a1a0c784c515c" }, "TableTransformerForObjectDetection": { "tokenizer_classes": [], "processor_classes": [ "DetrImageProcessor" ], "model_classes": [ "TableTransformerForObjectDetection" ], "sha": "9cf1e3f5c3555a727672a32b49f8b96c5aa20be6" }, "TableTransformerModel": { "tokenizer_classes": [], "processor_classes": [ "DetrImageProcessor" ], "model_classes": [ "TableTransformerModel" ], "sha": "7b446244d8739b0c29d98f7d537b15ad578577d5" }, "TapasForMaskedLM": { "tokenizer_classes": [ "TapasTokenizer" ], "processor_classes": [], "model_classes": [ "TFTapasForMaskedLM", "TapasForMaskedLM" ], "sha": "2cedb92dd9a3dc37ffb7d35ad5190b110992577c" }, "TapasForQuestionAnswering": { "tokenizer_classes": [ "TapasTokenizer" ], "processor_classes": [], "model_classes": [ "TFTapasForQuestionAnswering", "TapasForQuestionAnswering" ], "sha": "4cc91b9e5db662e6e392d8052587ae419896d72b" }, "TapasForSequenceClassification": { "tokenizer_classes": [ "TapasTokenizer" ], "processor_classes": [], "model_classes": [ "TFTapasForSequenceClassification", "TapasForSequenceClassification" ], "sha": "7c37bfb87a6fce2f8604bb3cab2a14e09a285e14" }, "TapasModel": { "tokenizer_classes": [ "TapasTokenizer" ], "processor_classes": [], "model_classes": [ "TFTapasModel", "TapasModel" ], "sha": "bc004af0a415afe1f566c3afe8dd4d48d08c1ce0" }, "TimesformerForVideoClassification": { "tokenizer_classes": [], "processor_classes": [ "VideoMAEImageProcessor" ], "model_classes": [ "TimesformerForVideoClassification" ], "sha": "0b3b8e314618d7af34fb44477745491b44bf556d" }, "TimesformerModel": { "tokenizer_classes": [], "processor_classes": [ "VideoMAEImageProcessor" ], "model_classes": [ "TimesformerModel" ], "sha": "ea51f7ebb6426ad2b1fa1396e83f8e8ad5bc3b44" }, "TransfoXLForSequenceClassification": { "tokenizer_classes": [ "TransfoXLTokenizer" ], "processor_classes": [], "model_classes": [ "TFTransfoXLForSequenceClassification", "TransfoXLForSequenceClassification" ], "sha": "f3d370184350667d74056b979081b0bf5b0083c1" }, "TransfoXLLMHeadModel": { "tokenizer_classes": [ "TransfoXLTokenizer" ], "processor_classes": [], "model_classes": [ "TFTransfoXLLMHeadModel", "TransfoXLLMHeadModel" ], "sha": "e0d4cebcdde52d8d4c81782a1edc606830bd6afd" }, "TransfoXLModel": { "tokenizer_classes": [ "TransfoXLTokenizer" ], "processor_classes": [], "model_classes": [ "TFTransfoXLModel", "TransfoXLModel" ], "sha": "6938eeae35662a862accb01412dfc486454bdc8f" }, "TvltForPreTraining": { "tokenizer_classes": [], "processor_classes": [ "TvltProcessor" ], "model_classes": [ "TvltForPreTraining" ], "sha": "f7bd2833764eb6d55a921aaed81d3f21119016ae" }, "TvltModel": { "tokenizer_classes": [], "processor_classes": [ "TvltProcessor" ], "model_classes": [ "TvltModel" ], "sha": "c3cbf7a6159c038f333ce7adda2480ea3396b2b3" }, "UMT5EncoderModel": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "UMT5EncoderModel" ], "sha": "2894e49c9fbd17ea4b3dab56ec388be354c1a5f0" }, "UMT5ForQuestionAnswering": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "UMT5ForQuestionAnswering" ], "sha": "b381aa068a44200db539f2f48f4e34a5ed1cb093" }, "UMT5ForSequenceClassification": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "UMT5ForSequenceClassification" ], "sha": "aa9f77b7b3cff21425b7512e7c0f478af7b5db14" }, "UMT5Model": { "tokenizer_classes": [ "T5Tokenizer", "T5TokenizerFast" ], "processor_classes": [], "model_classes": [ "UMT5Model" ], "sha": "9180d850b24e5494442a4f7a8ca1a4c102f9babd" }, "UniSpeechForCTC": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "UniSpeechForCTC" ], "sha": "102b56d76f4d74cface309801c0ad80892583751" }, "UniSpeechForPreTraining": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "UniSpeechForPreTraining" ], "sha": "830be5b3e85aaae7bcc961218e417c29743d6042" }, "UniSpeechForSequenceClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "UniSpeechForSequenceClassification" ], "sha": "a30ac1516944757ccd8efcbcf94033a03f8708bf" }, "UniSpeechModel": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "UniSpeechModel" ], "sha": "18e170eb1091715b74ace28c8c380b6bf2b6202d" }, "UniSpeechSatForAudioFrameClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "UniSpeechSatForAudioFrameClassification" ], "sha": "7eba5a1c6cd610928b27ecb217bb17c729a07a57" }, "UniSpeechSatForCTC": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "UniSpeechSatForCTC" ], "sha": "a8617538d3a2ae990f022bb0c36b8428a4870822" }, "UniSpeechSatForPreTraining": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "UniSpeechSatForPreTraining" ], "sha": "a772f66db0ab49e1050e524d7fcbe5106ebdaf96" }, "UniSpeechSatForSequenceClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "UniSpeechSatForSequenceClassification" ], "sha": "f1c16567bd829a6d8a7a2d167d22e9653149e625" }, "UniSpeechSatForXVector": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "UniSpeechSatForXVector" ], "sha": "71cb3780cf3678f74fba00e19df82df76dca6133" }, "UniSpeechSatModel": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "UniSpeechSatModel" ], "sha": "ea755bbc7c6c6aa649c58b4b000f243acbbd6b5a" }, "UperNetForSemanticSegmentation": { "tokenizer_classes": [], "processor_classes": [ "SegformerImageProcessor" ], "model_classes": [ "UperNetForSemanticSegmentation" ], "sha": "f1871cb388bc0b203f5397bfc06a373736c2fb9c" }, "VanForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "VanForImageClassification" ], "sha": "694eb147bc4768aeabeffbfb97732281b71a621d" }, "VanModel": { "tokenizer_classes": [], "processor_classes": [ "ConvNextImageProcessor" ], "model_classes": [ "VanModel" ], "sha": "d8ac60ce952020f2b0355fc566d634b2c5ba635d" }, "ViTForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "TFViTForImageClassification", "ViTForImageClassification" ], "sha": "5b3b44a3ed492070c273e481e30ecf4deddc5ec3" }, "ViTForMaskedImageModeling": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "ViTForMaskedImageModeling" ], "sha": "d984e0b432fe195c2c26952d4f249031e7b1e2ea" }, "ViTHybridForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ViTHybridImageProcessor" ], "model_classes": [ "ViTHybridForImageClassification" ], "sha": "69c7c396032ffe60d54953b584394899fb95ccc1" }, "ViTHybridModel": { "tokenizer_classes": [], "processor_classes": [ "ViTHybridImageProcessor" ], "model_classes": [ "ViTHybridModel" ], "sha": "077443bfefe40d625314dbd274d2ff8089624797" }, "ViTMAEForPreTraining": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "TFViTMAEForPreTraining", "ViTMAEForPreTraining" ], "sha": "2d98d80d9c45eef0d5b6f5426d7196bb546fe9fc" }, "ViTMAEModel": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "TFViTMAEModel", "ViTMAEModel" ], "sha": "c7c2f12c19d2dbec08851a9dac7485909629a5fd" }, "ViTMSNForImageClassification": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "ViTMSNForImageClassification" ], "sha": "feda819aa7dbb55d850130f4cf1d210858d7eb89" }, "ViTMSNModel": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "ViTMSNModel" ], "sha": "0733abf168cb47a149821fdd2113d546e15c47de" }, "ViTModel": { "tokenizer_classes": [], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "TFViTModel", "ViTModel" ], "sha": "31817b7a64ebc3333fcd4801dfbb356ab07b13dd" }, "VideoMAEForPreTraining": { "tokenizer_classes": [], "processor_classes": [ "VideoMAEImageProcessor" ], "model_classes": [ "VideoMAEForPreTraining" ], "sha": "9de66c4bb759dc7269a7af17bf70b3194550acaa" }, "VideoMAEForVideoClassification": { "tokenizer_classes": [], "processor_classes": [ "VideoMAEImageProcessor" ], "model_classes": [ "VideoMAEForVideoClassification" ], "sha": "d3f743408386bc0ffe2d979de35335e87bc34aec" }, "VideoMAEModel": { "tokenizer_classes": [], "processor_classes": [ "VideoMAEImageProcessor" ], "model_classes": [ "VideoMAEModel" ], "sha": "a2be96beba888817d92b67525601569d830342ff" }, "ViltForQuestionAnswering": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "ViltImageProcessor" ], "model_classes": [ "ViltForQuestionAnswering" ], "sha": "faeffbf43da6621717d8b13e7ebe87d58d750cb2" }, "ViltModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "ViltImageProcessor" ], "model_classes": [ "ViltModel" ], "sha": "3a89b7b5782947c4f4125162ffe1c9cc18c9c800" }, "VisionEncoderDecoderModel": { "tokenizer_classes": [ "GPT2Tokenizer", "GPT2TokenizerFast" ], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "TFVisionEncoderDecoderModel", "VisionEncoderDecoderModel" ], "sha": "23917761070cf16b26a6d033b6bff9100bbc618b" }, "VisionTextDualEncoderModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [ "ViTImageProcessor" ], "model_classes": [ "TFVisionTextDualEncoderModel", "VisionTextDualEncoderModel" ], "sha": "c3569ef17f66acbacb76f7ceb6f71e02d075dd6c" }, "VisualBertForPreTraining": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "VisualBertForPreTraining" ], "sha": "ce5a4d93ce762971cd216cda9aef8b9ce3f0450b" }, "VisualBertModel": { "tokenizer_classes": [ "BertTokenizer", "BertTokenizerFast" ], "processor_classes": [], "model_classes": [ "VisualBertModel" ], "sha": "85020189fb7bf1217eb9370b09bca8ec5bcfdafa" }, "VitsModel": { "tokenizer_classes": [ "VitsTokenizer" ], "processor_classes": [], "model_classes": [ "VitsModel" ], "sha": "b9a20ca5b6a7874576e485850260578895587dd2" }, "Wav2Vec2ConformerForAudioFrameClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ConformerForAudioFrameClassification" ], "sha": "e316a18a1d165b4cb51a7f28f8e8dab676da4b56" }, "Wav2Vec2ConformerForCTC": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ConformerForCTC" ], "sha": "a2ecb2985fcbb9f3ed000c12c1af6da36f5eaa3a" }, "Wav2Vec2ConformerForPreTraining": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ConformerForPreTraining" ], "sha": "099279b69e5da19efb05589804ccee210a0e57ae" }, "Wav2Vec2ConformerForSequenceClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ConformerForSequenceClassification" ], "sha": "e8c1bca543c54bf15a6c026cb3761993b52cf617" }, "Wav2Vec2ConformerForXVector": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ConformerForXVector" ], "sha": "ba206a55998f16e134960728bd02006eaf39114f" }, "Wav2Vec2ConformerModel": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ConformerModel" ], "sha": "ef2fe3aa8c23e6f8696e6612061aaddecae49994" }, "Wav2Vec2ForAudioFrameClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ForAudioFrameClassification" ], "sha": "ab219f119e10f56e1059966c66d23f0df3c2c343" }, "Wav2Vec2ForCTC": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ForCTC" ], "sha": "6245fbb1cb99cea5c4de1e73f81fba978fb275ac" }, "Wav2Vec2ForMaskedLM": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ForMaskedLM" ], "sha": "e083cf4fefec4df3c241dbbe5e17a84a794a89bd" }, "Wav2Vec2ForPreTraining": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ForPreTraining" ], "sha": "a8d71e216334260353ccbf5ce84cd6924f7457da" }, "Wav2Vec2ForSequenceClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "TFWav2Vec2ForSequenceClassification", "Wav2Vec2ForSequenceClassification" ], "sha": "2000b2022abcc37100241485f5872126b70164c9" }, "Wav2Vec2ForXVector": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "Wav2Vec2ForXVector" ], "sha": "f4c422db53aae061ea609f4407af7cd5b33c8942" }, "Wav2Vec2Model": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "TFWav2Vec2Model", "Wav2Vec2Model" ], "sha": "7a998ee3ee0619a52828a79c3eed6872fd053f37" }, "WavLMForAudioFrameClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "WavLMForAudioFrameClassification" ], "sha": "b135610f8d5de0b1a5bf5ed7212966135c63d6ec" }, "WavLMForCTC": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "WavLMForCTC" ], "sha": "f1139c5ddf34d2327ae1f6917edd7da180b06971" }, "WavLMForSequenceClassification": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "WavLMForSequenceClassification" ], "sha": "4ba5f2019b46866ce2011c993194ebda60afc028" }, "WavLMForXVector": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "WavLMForXVector" ], "sha": "faf9264eac56a56d5510a0984d7e1146e4c8cf62" }, "WavLMModel": { "tokenizer_classes": [ "Wav2Vec2CTCTokenizer" ], "processor_classes": [ "Wav2Vec2FeatureExtractor" ], "model_classes": [ "WavLMModel" ], "sha": "e932275e37cb643be271f655bd1d649f4f4b4bd5" }, "WhisperForAudioClassification": { "tokenizer_classes": [ "WhisperTokenizer" ], "processor_classes": [ "WhisperFeatureExtractor" ], "model_classes": [ "WhisperForAudioClassification" ], "sha": "d71b13674b1a67443cd19d0594a3b5b1e5968f0d" }, "WhisperForCausalLM": { "tokenizer_classes": [ "WhisperTokenizer" ], "processor_classes": [ "WhisperFeatureExtractor" ], "model_classes": [ "WhisperForCausalLM" ], "sha": "e7febfd7f4512e029293c677e6d2633e23fc459a" }, "WhisperForConditionalGeneration": { "tokenizer_classes": [ "WhisperTokenizer", "WhisperTokenizerFast" ], "processor_classes": [ "WhisperFeatureExtractor" ], "model_classes": [ "TFWhisperForConditionalGeneration", "WhisperForConditionalGeneration" ], "sha": "598101b885b24508042d9292e54aa04bff96318e" }, "WhisperModel": { "tokenizer_classes": [ "WhisperTokenizer", "WhisperTokenizerFast" ], "processor_classes": [ "WhisperFeatureExtractor" ], "model_classes": [ "TFWhisperModel", "WhisperModel" ], "sha": "c04c50216bb6b0a8f4d55f2fa9f9f4cf61c8a77c" }, "XCLIPModel": { "tokenizer_classes": [ "CLIPTokenizer", "CLIPTokenizerFast" ], "processor_classes": [ "VideoMAEImageProcessor" ], "model_classes": [ "XCLIPModel" ], "sha": "299ffffc6b94c3558bf7dbc38e24074c99490046" }, "XGLMForCausalLM": { "tokenizer_classes": [ "XGLMTokenizer", "XGLMTokenizerFast" ], "processor_classes": [], "model_classes": [ "TFXGLMForCausalLM", "XGLMForCausalLM" ], "sha": "d5381ce297c249d559937c6bb6316cf1fdad2613" }, "XGLMModel": { "tokenizer_classes": [ "XGLMTokenizer", "XGLMTokenizerFast" ], "processor_classes": [], "model_classes": [ "TFXGLMModel", "XGLMModel" ], "sha": "2b5cef167822cfaa558d259af1722e2f785cd3d5" }, "XLMForMultipleChoice": { "tokenizer_classes": [ "XLMTokenizer" ], "processor_classes": [], "model_classes": [ "TFXLMForMultipleChoice", "XLMForMultipleChoice" ], "sha": "f0c8cc6462449ac9eb9b4158e433bd3c923db3af" }, "XLMForQuestionAnsweringSimple": { "tokenizer_classes": [ "XLMTokenizer" ], "processor_classes": [], "model_classes": [ "TFXLMForQuestionAnsweringSimple", "XLMForQuestionAnsweringSimple" ], "sha": "82e93a2653cf3646eaaf02d8cc5f8ff9a4551523" }, "XLMForSequenceClassification": { "tokenizer_classes": [ "XLMTokenizer" ], "processor_classes": [], "model_classes": [ "TFXLMForSequenceClassification", "XLMForSequenceClassification" ], "sha": "2d6892f5f703be9b481bca91477032bd0e36dbe5" }, "XLMForTokenClassification": { "tokenizer_classes": [ "XLMTokenizer" ], "processor_classes": [], "model_classes": [ "TFXLMForTokenClassification", "XLMForTokenClassification" ], "sha": "9a591395e7a0643a03f5d2debb98caa3966e021c" }, "XLMModel": { "tokenizer_classes": [ "XLMTokenizer" ], "processor_classes": [], "model_classes": [ "TFXLMModel", "XLMModel" ], "sha": "022b86df246414ff712475d9ca55db690ff1d3bf" }, "XLMRobertaXLForCausalLM": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XLMRobertaXLForCausalLM" ], "sha": "fc05408e5b33a31638476ef337719dfbb7615ef3" }, "XLMRobertaXLForMaskedLM": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XLMRobertaXLForMaskedLM" ], "sha": "e96f198eede757e5ae2c87632fdcfb341073ef6e" }, "XLMRobertaXLForMultipleChoice": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XLMRobertaXLForMultipleChoice" ], "sha": "52732625f1bfbbb7cb4ba1cf0963de596d81822d" }, "XLMRobertaXLForQuestionAnswering": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XLMRobertaXLForQuestionAnswering" ], "sha": "da388fdd2d28e0757eb0c2b2c612a8ff03af2223" }, "XLMRobertaXLForSequenceClassification": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XLMRobertaXLForSequenceClassification" ], "sha": "980721187633bcf21ac0b8edbed933527f4611df" }, "XLMRobertaXLForTokenClassification": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XLMRobertaXLForTokenClassification" ], "sha": "37a97280faf6fef0bd946d3934d77a1b60fbf473" }, "XLMRobertaXLModel": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XLMRobertaXLModel" ], "sha": "8fbeb39a984912e47f5d24a31be61639031a0fc3" }, "XLMWithLMHeadModel": { "tokenizer_classes": [ "XLMTokenizer" ], "processor_classes": [], "model_classes": [ "TFXLMWithLMHeadModel", "XLMWithLMHeadModel" ], "sha": "db70bdefbaf095e88b8097e4b601d9105a511afa" }, "XLNetForMultipleChoice": { "tokenizer_classes": [ "XLNetTokenizer", "XLNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "TFXLNetForMultipleChoice", "XLNetForMultipleChoice" ], "sha": "8bb7e28d0cd1e93154d3232baf5e9c79acaf9f1a" }, "XLNetForQuestionAnsweringSimple": { "tokenizer_classes": [ "XLNetTokenizer", "XLNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "TFXLNetForQuestionAnsweringSimple", "XLNetForQuestionAnsweringSimple" ], "sha": "fabd06a45d947f3d46f1b8dce2186cf3b27776dc" }, "XLNetForSequenceClassification": { "tokenizer_classes": [ "XLNetTokenizer", "XLNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "TFXLNetForSequenceClassification", "XLNetForSequenceClassification" ], "sha": "e3c194f24537ebf2c474ade60becb9397696edec" }, "XLNetForTokenClassification": { "tokenizer_classes": [ "XLNetTokenizer", "XLNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "TFXLNetForTokenClassification", "XLNetForTokenClassification" ], "sha": "16aa15029aa667046d504c4a88ceddfdd5b5fb40" }, "XLNetLMHeadModel": { "tokenizer_classes": [ "XLNetTokenizer", "XLNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "TFXLNetLMHeadModel", "XLNetLMHeadModel" ], "sha": "c9a98cc982a16ca162832a8cbea25116479bb938" }, "XLNetModel": { "tokenizer_classes": [ "XLNetTokenizer", "XLNetTokenizerFast" ], "processor_classes": [], "model_classes": [ "TFXLNetModel", "XLNetModel" ], "sha": "1d6e231942135faf32b8d9a97773d8f6c85ca561" }, "XmodForCausalLM": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XmodForCausalLM" ], "sha": "c6b746071f2f067099a8fb4f57ce3c27a7e4b67d" }, "XmodForMaskedLM": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XmodForMaskedLM" ], "sha": "e1085818f4ed3c6073b2038635e5f3061208923d" }, "XmodForMultipleChoice": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XmodForMultipleChoice" ], "sha": "c63042cdf196be3fed846421b345d439b2483f69" }, "XmodForQuestionAnswering": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XmodForQuestionAnswering" ], "sha": "75acd3071fae9978c82618cd0f090c87aabc1f23" }, "XmodForSequenceClassification": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XmodForSequenceClassification" ], "sha": "523a16570be048618913ac17ccd00d343bcb5e99" }, "XmodForTokenClassification": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XmodForTokenClassification" ], "sha": "a0f0a02732b4579670dad11a69ae244ebd777b49" }, "XmodModel": { "tokenizer_classes": [ "XLMRobertaTokenizer", "XLMRobertaTokenizerFast" ], "processor_classes": [], "model_classes": [ "XmodModel" ], "sha": "bc286de0035450e7dcd6bcce78098a967b9c2b6c" }, "YolosForObjectDetection": { "tokenizer_classes": [], "processor_classes": [ "YolosImageProcessor" ], "model_classes": [ "YolosForObjectDetection" ], "sha": "0a4aae25bfbe8b5edd4815cb00d697a6ba7d2126" }, "YolosModel": { "tokenizer_classes": [], "processor_classes": [ "YolosImageProcessor" ], "model_classes": [ "YolosModel" ], "sha": "339bc51f1914f031a550e5f95095ed4a4c22a7de" }, "YosoForMaskedLM": { "tokenizer_classes": [ "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "YosoForMaskedLM" ], "sha": "cb291bedcbec199ea195f086e3ebea6fab026bba" }, "YosoForMultipleChoice": { "tokenizer_classes": [ "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "YosoForMultipleChoice" ], "sha": "cf2d3a3f0628bc9d0da68ea8de26b12016453fee" }, "YosoForQuestionAnswering": { "tokenizer_classes": [ "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "YosoForQuestionAnswering" ], "sha": "e8c3091f674588adfa3371b3de0427a9b39dd03f" }, "YosoForSequenceClassification": { "tokenizer_classes": [ "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "YosoForSequenceClassification" ], "sha": "88132cbaa1a9a87f65b6f9813c388011377f18cf" }, "YosoForTokenClassification": { "tokenizer_classes": [ "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "YosoForTokenClassification" ], "sha": "fd2219856608d3dba70dc7b1a06af629903dec31" }, "YosoModel": { "tokenizer_classes": [ "AlbertTokenizerFast" ], "processor_classes": [], "model_classes": [ "YosoModel" ], "sha": "e144d9f1fe39c21eda1177702640e126892605ce" } }
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/utils/test_hub_utils.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) RANDOM_BERT = "hf-internal-testing/tiny-random-bert" CACHE_DIR = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert") FULL_COMMIT_HASH = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6" GATED_REPO = "hf-internal-testing/dummy-gated-model" README_FILE = "README.md" class GetFromCacheTests(unittest.TestCase): def test_cached_file(self): archive_file = cached_file(RANDOM_BERT, CONFIG_NAME) # Should have downloaded the file in here self.assertTrue(os.path.isdir(CACHE_DIR)) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(CACHE_DIR, subfolder))) with open(os.path.join(CACHE_DIR, "refs", "main")) as f: main_commit = f.read() self.assertEqual(archive_file, os.path.join(CACHE_DIR, "snapshots", main_commit, CONFIG_NAME)) self.assertTrue(os.path.isfile(archive_file)) # File is cached at the same place the second time. new_archive_file = cached_file(RANDOM_BERT, CONFIG_NAME) self.assertEqual(archive_file, new_archive_file) # Using a specific revision to test the full commit hash. archive_file = cached_file(RANDOM_BERT, CONFIG_NAME, revision="9b8c223") self.assertEqual(archive_file, os.path.join(CACHE_DIR, "snapshots", FULL_COMMIT_HASH, CONFIG_NAME)) def test_cached_file_errors(self): with self.assertRaisesRegex(EnvironmentError, "is not a valid model identifier"): _ = cached_file("tiny-random-bert", CONFIG_NAME) with self.assertRaisesRegex(EnvironmentError, "is not a valid git identifier"): _ = cached_file(RANDOM_BERT, CONFIG_NAME, revision="aaaa") with self.assertRaisesRegex(EnvironmentError, "does not appear to have a file named"): _ = cached_file(RANDOM_BERT, "conf") def test_non_existence_is_cached(self): with self.assertRaisesRegex(EnvironmentError, "does not appear to have a file named"): _ = cached_file(RANDOM_BERT, "conf") with open(os.path.join(CACHE_DIR, "refs", "main")) as f: main_commit = f.read() self.assertTrue(os.path.isfile(os.path.join(CACHE_DIR, ".no_exist", main_commit, "conf"))) path = cached_file(RANDOM_BERT, "conf", _raise_exceptions_for_missing_entries=False) self.assertIsNone(path) path = cached_file(RANDOM_BERT, "conf", local_files_only=True, _raise_exceptions_for_missing_entries=False) self.assertIsNone(path) response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request", return_value=response_mock) as mock_head: path = cached_file(RANDOM_BERT, "conf", _raise_exceptions_for_connection_errors=False) self.assertIsNone(path) # This check we did call the fake head request mock_head.assert_called() def test_has_file(self): self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only", WEIGHTS_NAME)) self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only", TF2_WEIGHTS_NAME)) self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only", FLAX_WEIGHTS_NAME)) def test_get_file_from_repo_distant(self): # `get_file_from_repo` returns None if the file does not exist self.assertIsNone(get_file_from_repo("bert-base-cased", "ahah.txt")) # The function raises if the repository does not exist. with self.assertRaisesRegex(EnvironmentError, "is not a valid model identifier"): get_file_from_repo("bert-base-case", CONFIG_NAME) # The function raises if the revision does not exist. with self.assertRaisesRegex(EnvironmentError, "is not a valid git identifier"): get_file_from_repo("bert-base-cased", CONFIG_NAME, revision="ahaha") resolved_file = get_file_from_repo("bert-base-cased", CONFIG_NAME) # The name is the cached name which is not very easy to test, so instead we load the content. config = json.loads(open(resolved_file, "r").read()) self.assertEqual(config["hidden_size"], 768) def test_get_file_from_repo_local(self): with tempfile.TemporaryDirectory() as tmp_dir: filename = Path(tmp_dir) / "a.txt" filename.touch() self.assertEqual(get_file_from_repo(tmp_dir, "a.txt"), str(filename)) self.assertIsNone(get_file_from_repo(tmp_dir, "b.txt")) def test_get_file_gated_repo(self): """Test download file from a gated repo fails with correct message when not authenticated.""" with self.assertRaisesRegex(EnvironmentError, "You are trying to access a gated repo."): # All files except README.md are protected on a gated repo. cached_file(GATED_REPO, "gated_file.txt", token=False) def test_has_file_gated_repo(self): """Test check file existence from a gated repo fails with correct message when not authenticated.""" with self.assertRaisesRegex(EnvironmentError, "is a gated repository"): # All files except README.md are protected on a gated repo. has_file(GATED_REPO, "gated_file.txt", token=False)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/utils/test_hf_argparser.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 is_python_no_less_than_3_10 = sys.version_info >= (3, 10) def list_field(default=None, metadata=None): return field(default_factory=lambda: default, metadata=metadata) @dataclass class BasicExample: foo: int bar: float baz: str flag: bool @dataclass class WithDefaultExample: foo: int = 42 baz: str = field(default="toto", metadata={"help": "help message"}) @dataclass class WithDefaultBoolExample: foo: bool = False baz: bool = True opt: Optional[bool] = None class BasicEnum(Enum): titi = "titi" toto = "toto" class MixedTypeEnum(Enum): titi = "titi" toto = "toto" fourtytwo = 42 @dataclass class EnumExample: foo: BasicEnum = "toto" def __post_init__(self): self.foo = BasicEnum(self.foo) @dataclass class MixedTypeEnumExample: foo: MixedTypeEnum = "toto" def __post_init__(self): self.foo = MixedTypeEnum(self.foo) @dataclass class OptionalExample: foo: Optional[int] = None bar: Optional[float] = field(default=None, metadata={"help": "help message"}) baz: Optional[str] = None ces: Optional[List[str]] = list_field(default=[]) des: Optional[List[int]] = list_field(default=[]) @dataclass class ListExample: foo_int: List[int] = list_field(default=[]) bar_int: List[int] = list_field(default=[1, 2, 3]) foo_str: List[str] = list_field(default=["Hallo", "Bonjour", "Hello"]) foo_float: List[float] = list_field(default=[0.1, 0.2, 0.3]) @dataclass class RequiredExample: required_list: List[int] = field() required_str: str = field() required_enum: BasicEnum = field() def __post_init__(self): self.required_enum = BasicEnum(self.required_enum) @dataclass class StringLiteralAnnotationExample: foo: int required_enum: "BasicEnum" = field() opt: "Optional[bool]" = None baz: "str" = field(default="toto", metadata={"help": "help message"}) foo_str: "List[str]" = list_field(default=["Hallo", "Bonjour", "Hello"]) if is_python_no_less_than_3_10: @dataclass class WithDefaultBoolExamplePep604: foo: bool = False baz: bool = True opt: bool | None = None @dataclass class OptionalExamplePep604: foo: int | None = None bar: float | None = field(default=None, metadata={"help": "help message"}) baz: str | None = None ces: list[str] | None = list_field(default=[]) des: list[int] | None = list_field(default=[]) class HfArgumentParserTest(unittest.TestCase): def argparsersEqual(self, a: argparse.ArgumentParser, b: argparse.ArgumentParser): """ Small helper to check pseudo-equality of parsed arguments on `ArgumentParser` instances. """ self.assertEqual(len(a._actions), len(b._actions)) for x, y in zip(a._actions, b._actions): xx = {k: v for k, v in vars(x).items() if k != "container"} yy = {k: v for k, v in vars(y).items() if k != "container"} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get("choices", None) and yy.get("choices", None): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx["type"](expected_choice), yy["type"](expected_choice)) del xx["type"], yy["type"] self.assertEqual(xx, yy) def test_basic(self): parser = HfArgumentParser(BasicExample) expected = argparse.ArgumentParser() expected.add_argument("--foo", type=int, required=True) expected.add_argument("--bar", type=float, required=True) expected.add_argument("--baz", type=str, required=True) expected.add_argument("--flag", type=string_to_bool, default=False, const=True, nargs="?") self.argparsersEqual(parser, expected) args = ["--foo", "1", "--baz", "quux", "--bar", "0.5"] (example,) = parser.parse_args_into_dataclasses(args, look_for_args_file=False) self.assertFalse(example.flag) def test_with_default(self): parser = HfArgumentParser(WithDefaultExample) expected = argparse.ArgumentParser() expected.add_argument("--foo", default=42, type=int) expected.add_argument("--baz", default="toto", type=str, help="help message") self.argparsersEqual(parser, expected) def test_with_default_bool(self): expected = argparse.ArgumentParser() expected.add_argument("--foo", type=string_to_bool, default=False, const=True, nargs="?") expected.add_argument("--baz", type=string_to_bool, default=True, const=True, nargs="?") # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument("--no_baz", action="store_false", default=False, dest="baz") expected.add_argument("--opt", type=string_to_bool, default=None) dataclass_types = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(WithDefaultBoolExamplePep604) for dataclass_type in dataclass_types: parser = HfArgumentParser(dataclass_type) self.argparsersEqual(parser, expected) args = parser.parse_args([]) self.assertEqual(args, Namespace(foo=False, baz=True, opt=None)) args = parser.parse_args(["--foo", "--no_baz"]) self.assertEqual(args, Namespace(foo=True, baz=False, opt=None)) args = parser.parse_args(["--foo", "--baz"]) self.assertEqual(args, Namespace(foo=True, baz=True, opt=None)) args = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"]) self.assertEqual(args, Namespace(foo=True, baz=True, opt=True)) args = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"]) self.assertEqual(args, Namespace(foo=False, baz=False, opt=False)) def test_with_enum(self): parser = HfArgumentParser(MixedTypeEnumExample) expected = argparse.ArgumentParser() expected.add_argument( "--foo", default="toto", choices=["titi", "toto", 42], type=make_choice_type_function(["titi", "toto", 42]), ) self.argparsersEqual(parser, expected) args = parser.parse_args([]) self.assertEqual(args.foo, "toto") enum_ex = parser.parse_args_into_dataclasses([])[0] self.assertEqual(enum_ex.foo, MixedTypeEnum.toto) args = parser.parse_args(["--foo", "titi"]) self.assertEqual(args.foo, "titi") enum_ex = parser.parse_args_into_dataclasses(["--foo", "titi"])[0] self.assertEqual(enum_ex.foo, MixedTypeEnum.titi) args = parser.parse_args(["--foo", "42"]) self.assertEqual(args.foo, 42) enum_ex = parser.parse_args_into_dataclasses(["--foo", "42"])[0] self.assertEqual(enum_ex.foo, MixedTypeEnum.fourtytwo) def test_with_literal(self): @dataclass class LiteralExample: foo: Literal["titi", "toto", 42] = "toto" parser = HfArgumentParser(LiteralExample) expected = argparse.ArgumentParser() expected.add_argument( "--foo", default="toto", choices=("titi", "toto", 42), type=make_choice_type_function(["titi", "toto", 42]), ) self.argparsersEqual(parser, expected) args = parser.parse_args([]) self.assertEqual(args.foo, "toto") args = parser.parse_args(["--foo", "titi"]) self.assertEqual(args.foo, "titi") args = parser.parse_args(["--foo", "42"]) self.assertEqual(args.foo, 42) def test_with_list(self): parser = HfArgumentParser(ListExample) expected = argparse.ArgumentParser() expected.add_argument("--foo_int", nargs="+", default=[], type=int) expected.add_argument("--bar_int", nargs="+", default=[1, 2, 3], type=int) expected.add_argument("--foo_str", nargs="+", default=["Hallo", "Bonjour", "Hello"], type=str) expected.add_argument("--foo_float", nargs="+", default=[0.1, 0.2, 0.3], type=float) self.argparsersEqual(parser, expected) args = parser.parse_args([]) self.assertEqual( args, Namespace(foo_int=[], bar_int=[1, 2, 3], foo_str=["Hallo", "Bonjour", "Hello"], foo_float=[0.1, 0.2, 0.3]), ) args = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split()) self.assertEqual(args, Namespace(foo_int=[1], bar_int=[2, 3], foo_str=["a", "b", "c"], foo_float=[0.1, 0.7])) def test_with_optional(self): expected = argparse.ArgumentParser() expected.add_argument("--foo", default=None, type=int) expected.add_argument("--bar", default=None, type=float, help="help message") expected.add_argument("--baz", default=None, type=str) expected.add_argument("--ces", nargs="+", default=[], type=str) expected.add_argument("--des", nargs="+", default=[], type=int) dataclass_types = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(OptionalExamplePep604) for dataclass_type in dataclass_types: parser = HfArgumentParser(dataclass_type) self.argparsersEqual(parser, expected) args = parser.parse_args([]) self.assertEqual(args, Namespace(foo=None, bar=None, baz=None, ces=[], des=[])) args = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split()) self.assertEqual(args, Namespace(foo=12, bar=3.14, baz="42", ces=["a", "b", "c"], des=[1, 2, 3])) def test_with_required(self): parser = HfArgumentParser(RequiredExample) expected = argparse.ArgumentParser() expected.add_argument("--required_list", nargs="+", type=int, required=True) expected.add_argument("--required_str", type=str, required=True) expected.add_argument( "--required_enum", type=make_choice_type_function(["titi", "toto"]), choices=["titi", "toto"], required=True, ) self.argparsersEqual(parser, expected) def test_with_string_literal_annotation(self): parser = HfArgumentParser(StringLiteralAnnotationExample) expected = argparse.ArgumentParser() expected.add_argument("--foo", type=int, required=True) expected.add_argument( "--required_enum", type=make_choice_type_function(["titi", "toto"]), choices=["titi", "toto"], required=True, ) expected.add_argument("--opt", type=string_to_bool, default=None) expected.add_argument("--baz", default="toto", type=str, help="help message") expected.add_argument("--foo_str", nargs="+", default=["Hallo", "Bonjour", "Hello"], type=str) self.argparsersEqual(parser, expected) def test_parse_dict(self): parser = HfArgumentParser(BasicExample) args_dict = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, } parsed_args = parser.parse_dict(args_dict)[0] args = BasicExample(**args_dict) self.assertEqual(parsed_args, args) def test_parse_dict_extra_key(self): parser = HfArgumentParser(BasicExample) args_dict = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, "extra": 42, } self.assertRaises(ValueError, parser.parse_dict, args_dict, allow_extra_keys=False) def test_parse_json(self): parser = HfArgumentParser(BasicExample) args_dict_for_json = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, } with tempfile.TemporaryDirectory() as tmp_dir: temp_local_path = os.path.join(tmp_dir, "temp_json") os.mkdir(temp_local_path) with open(temp_local_path + ".json", "w+") as f: json.dump(args_dict_for_json, f) parsed_args = parser.parse_yaml_file(Path(temp_local_path + ".json"))[0] args = BasicExample(**args_dict_for_json) self.assertEqual(parsed_args, args) def test_parse_yaml(self): parser = HfArgumentParser(BasicExample) args_dict_for_yaml = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, } with tempfile.TemporaryDirectory() as tmp_dir: temp_local_path = os.path.join(tmp_dir, "temp_yaml") os.mkdir(temp_local_path) with open(temp_local_path + ".yaml", "w+") as f: yaml.dump(args_dict_for_yaml, f) parsed_args = parser.parse_yaml_file(Path(temp_local_path + ".yaml"))[0] args = BasicExample(**args_dict_for_yaml) self.assertEqual(parsed_args, args) def test_integration_training_args(self): parser = HfArgumentParser(TrainingArguments) self.assertIsNotNone(parser)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/utils/test_add_new_model_like.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import re import tempfile import unittest from pathlib import Path import transformers from transformers.commands.add_new_model_like import ( ModelPatterns, _re_class_func, add_content_to_file, add_content_to_text, clean_frameworks_in_init, duplicate_doc_file, duplicate_module, filter_framework_files, find_base_model_checkpoint, get_model_files, get_module_from_file, parse_module_content, replace_model_patterns, retrieve_info_for_model, retrieve_model_classes, simplify_replacements, ) from transformers.testing_utils import require_flax, require_tf, require_torch BERT_MODEL_FILES = { "src/transformers/models/bert/__init__.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/models/bert/tokenization_bert.py", "src/transformers/models/bert/tokenization_bert_fast.py", "src/transformers/models/bert/tokenization_bert_tf.py", "src/transformers/models/bert/modeling_bert.py", "src/transformers/models/bert/modeling_flax_bert.py", "src/transformers/models/bert/modeling_tf_bert.py", "src/transformers/models/bert/convert_bert_original_tf_checkpoint_to_pytorch.py", "src/transformers/models/bert/convert_bert_original_tf2_checkpoint_to_pytorch.py", "src/transformers/models/bert/convert_bert_pytorch_checkpoint_to_original_tf.py", "src/transformers/models/bert/convert_bert_token_dropping_original_tf2_checkpoint_to_pytorch.py", } VIT_MODEL_FILES = { "src/transformers/models/vit/__init__.py", "src/transformers/models/vit/configuration_vit.py", "src/transformers/models/vit/convert_dino_to_pytorch.py", "src/transformers/models/vit/convert_vit_timm_to_pytorch.py", "src/transformers/models/vit/feature_extraction_vit.py", "src/transformers/models/vit/image_processing_vit.py", "src/transformers/models/vit/modeling_vit.py", "src/transformers/models/vit/modeling_tf_vit.py", "src/transformers/models/vit/modeling_flax_vit.py", } WAV2VEC2_MODEL_FILES = { "src/transformers/models/wav2vec2/__init__.py", "src/transformers/models/wav2vec2/configuration_wav2vec2.py", "src/transformers/models/wav2vec2/convert_wav2vec2_original_pytorch_checkpoint_to_pytorch.py", "src/transformers/models/wav2vec2/convert_wav2vec2_original_s3prl_checkpoint_to_pytorch.py", "src/transformers/models/wav2vec2/feature_extraction_wav2vec2.py", "src/transformers/models/wav2vec2/modeling_wav2vec2.py", "src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py", "src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py", "src/transformers/models/wav2vec2/processing_wav2vec2.py", "src/transformers/models/wav2vec2/tokenization_wav2vec2.py", } REPO_PATH = Path(transformers.__path__[0]).parent.parent @require_torch @require_tf @require_flax class TestAddNewModelLike(unittest.TestCase): def init_file(self, file_name, content): with open(file_name, "w", encoding="utf-8") as f: f.write(content) def check_result(self, file_name, expected_result): with open(file_name, "r", encoding="utf-8") as f: result = f.read() self.assertEqual(result, expected_result) def test_re_class_func(self): self.assertEqual(_re_class_func.search("def my_function(x, y):").groups()[0], "my_function") self.assertEqual(_re_class_func.search("class MyClass:").groups()[0], "MyClass") self.assertEqual(_re_class_func.search("class MyClass(SuperClass):").groups()[0], "MyClass") def test_model_patterns_defaults(self): model_patterns = ModelPatterns("GPT-New new", "huggingface/gpt-new-base") self.assertEqual(model_patterns.model_type, "gpt-new-new") self.assertEqual(model_patterns.model_lower_cased, "gpt_new_new") self.assertEqual(model_patterns.model_camel_cased, "GPTNewNew") self.assertEqual(model_patterns.model_upper_cased, "GPT_NEW_NEW") self.assertEqual(model_patterns.config_class, "GPTNewNewConfig") self.assertIsNone(model_patterns.tokenizer_class) self.assertIsNone(model_patterns.feature_extractor_class) self.assertIsNone(model_patterns.processor_class) def test_parse_module_content(self): test_code = """SOME_CONSTANT = a constant CONSTANT_DEFINED_ON_SEVERAL_LINES = [ first_item, second_item ] def function(args): some code # Copied from transformers.some_module class SomeClass: some code """ expected_parts = [ "SOME_CONSTANT = a constant\n", "CONSTANT_DEFINED_ON_SEVERAL_LINES = [\n first_item,\n second_item\n]", "", "def function(args):\n some code\n", "# Copied from transformers.some_module\nclass SomeClass:\n some code\n", ] self.assertEqual(parse_module_content(test_code), expected_parts) def test_add_content_to_text(self): test_text = """all_configs = { "gpt": "GPTConfig", "bert": "BertConfig", "t5": "T5Config", }""" expected = """all_configs = { "gpt": "GPTConfig", "gpt2": "GPT2Config", "bert": "BertConfig", "t5": "T5Config", }""" line = ' "gpt2": "GPT2Config",' self.assertEqual(add_content_to_text(test_text, line, add_before="bert"), expected) self.assertEqual(add_content_to_text(test_text, line, add_before="bert", exact_match=True), test_text) self.assertEqual( add_content_to_text(test_text, line, add_before=' "bert": "BertConfig",', exact_match=True), expected ) self.assertEqual(add_content_to_text(test_text, line, add_before=re.compile(r'^\s*"bert":')), expected) self.assertEqual(add_content_to_text(test_text, line, add_after="gpt"), expected) self.assertEqual(add_content_to_text(test_text, line, add_after="gpt", exact_match=True), test_text) self.assertEqual( add_content_to_text(test_text, line, add_after=' "gpt": "GPTConfig",', exact_match=True), expected ) self.assertEqual(add_content_to_text(test_text, line, add_after=re.compile(r'^\s*"gpt":')), expected) def test_add_content_to_file(self): test_text = """all_configs = { "gpt": "GPTConfig", "bert": "BertConfig", "t5": "T5Config", }""" expected = """all_configs = { "gpt": "GPTConfig", "gpt2": "GPT2Config", "bert": "BertConfig", "t5": "T5Config", }""" line = ' "gpt2": "GPT2Config",' with tempfile.TemporaryDirectory() as tmp_dir: file_name = os.path.join(tmp_dir, "code.py") self.init_file(file_name, test_text) add_content_to_file(file_name, line, add_before="bert") self.check_result(file_name, expected) self.init_file(file_name, test_text) add_content_to_file(file_name, line, add_before="bert", exact_match=True) self.check_result(file_name, test_text) self.init_file(file_name, test_text) add_content_to_file(file_name, line, add_before=' "bert": "BertConfig",', exact_match=True) self.check_result(file_name, expected) self.init_file(file_name, test_text) add_content_to_file(file_name, line, add_before=re.compile(r'^\s*"bert":')) self.check_result(file_name, expected) self.init_file(file_name, test_text) add_content_to_file(file_name, line, add_after="gpt") self.check_result(file_name, expected) self.init_file(file_name, test_text) add_content_to_file(file_name, line, add_after="gpt", exact_match=True) self.check_result(file_name, test_text) self.init_file(file_name, test_text) add_content_to_file(file_name, line, add_after=' "gpt": "GPTConfig",', exact_match=True) self.check_result(file_name, expected) self.init_file(file_name, test_text) add_content_to_file(file_name, line, add_after=re.compile(r'^\s*"gpt":')) self.check_result(file_name, expected) def test_simplify_replacements(self): self.assertEqual(simplify_replacements([("Bert", "NewBert")]), [("Bert", "NewBert")]) self.assertEqual( simplify_replacements([("Bert", "NewBert"), ("bert", "new-bert")]), [("Bert", "NewBert"), ("bert", "new-bert")], ) self.assertEqual( simplify_replacements([("BertConfig", "NewBertConfig"), ("Bert", "NewBert"), ("bert", "new-bert")]), [("Bert", "NewBert"), ("bert", "new-bert")], ) def test_replace_model_patterns(self): bert_model_patterns = ModelPatterns("Bert", "bert-base-cased") new_bert_model_patterns = ModelPatterns("New Bert", "huggingface/bert-new-base") bert_test = '''class TFBertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = BertConfig load_tf_weights = load_tf_weights_in_bert base_model_prefix = "bert" is_parallelizable = True supports_gradient_checkpointing = True model_type = "bert" BERT_CONSTANT = "value" ''' bert_expected = '''class TFNewBertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = NewBertConfig load_tf_weights = load_tf_weights_in_new_bert base_model_prefix = "new_bert" is_parallelizable = True supports_gradient_checkpointing = True model_type = "new-bert" NEW_BERT_CONSTANT = "value" ''' bert_converted, replacements = replace_model_patterns(bert_test, bert_model_patterns, new_bert_model_patterns) self.assertEqual(bert_converted, bert_expected) # Replacements are empty here since bert as been replaced by bert_new in some instances and bert-new # in others. self.assertEqual(replacements, "") # If we remove the model type, we will get replacements bert_test = bert_test.replace(' model_type = "bert"\n', "") bert_expected = bert_expected.replace(' model_type = "new-bert"\n', "") bert_converted, replacements = replace_model_patterns(bert_test, bert_model_patterns, new_bert_model_patterns) self.assertEqual(bert_converted, bert_expected) self.assertEqual(replacements, "BERT->NEW_BERT,Bert->NewBert,bert->new_bert") gpt_model_patterns = ModelPatterns("GPT2", "gpt2") new_gpt_model_patterns = ModelPatterns("GPT-New new", "huggingface/gpt-new-base") gpt_test = '''class GPT2PreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = GPT2Config load_tf_weights = load_tf_weights_in_gpt2 base_model_prefix = "transformer" is_parallelizable = True supports_gradient_checkpointing = True GPT2_CONSTANT = "value" ''' gpt_expected = '''class GPTNewNewPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = GPTNewNewConfig load_tf_weights = load_tf_weights_in_gpt_new_new base_model_prefix = "transformer" is_parallelizable = True supports_gradient_checkpointing = True GPT_NEW_NEW_CONSTANT = "value" ''' gpt_converted, replacements = replace_model_patterns(gpt_test, gpt_model_patterns, new_gpt_model_patterns) self.assertEqual(gpt_converted, gpt_expected) # Replacements are empty here since GPT2 as been replaced by GPTNewNew in some instances and GPT_NEW_NEW # in others. self.assertEqual(replacements, "") roberta_model_patterns = ModelPatterns("RoBERTa", "roberta-base", model_camel_cased="Roberta") new_roberta_model_patterns = ModelPatterns( "RoBERTa-New", "huggingface/roberta-new-base", model_camel_cased="RobertaNew" ) roberta_test = '''# Copied from transformers.models.bert.BertModel with Bert->Roberta class RobertaModel(RobertaPreTrainedModel): """ The base RoBERTa model. """ checkpoint = roberta-base base_model_prefix = "roberta" ''' roberta_expected = '''# Copied from transformers.models.bert.BertModel with Bert->RobertaNew class RobertaNewModel(RobertaNewPreTrainedModel): """ The base RoBERTa-New model. """ checkpoint = huggingface/roberta-new-base base_model_prefix = "roberta_new" ''' roberta_converted, replacements = replace_model_patterns( roberta_test, roberta_model_patterns, new_roberta_model_patterns ) self.assertEqual(roberta_converted, roberta_expected) def test_get_module_from_file(self): self.assertEqual( get_module_from_file("/git/transformers/src/transformers/models/bert/modeling_tf_bert.py"), "transformers.models.bert.modeling_tf_bert", ) self.assertEqual( get_module_from_file("/transformers/models/gpt2/modeling_gpt2.py"), "transformers.models.gpt2.modeling_gpt2", ) with self.assertRaises(ValueError): get_module_from_file("/models/gpt2/modeling_gpt2.py") def test_duplicate_module(self): bert_model_patterns = ModelPatterns("Bert", "bert-base-cased") new_bert_model_patterns = ModelPatterns("New Bert", "huggingface/bert-new-base") bert_test = '''class TFBertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = BertConfig load_tf_weights = load_tf_weights_in_bert base_model_prefix = "bert" is_parallelizable = True supports_gradient_checkpointing = True BERT_CONSTANT = "value" ''' bert_expected = '''class TFNewBertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = NewBertConfig load_tf_weights = load_tf_weights_in_new_bert base_model_prefix = "new_bert" is_parallelizable = True supports_gradient_checkpointing = True NEW_BERT_CONSTANT = "value" ''' bert_expected_with_copied_from = ( "# Copied from transformers.bert_module.TFBertPreTrainedModel with Bert->NewBert,bert->new_bert\n" + bert_expected ) with tempfile.TemporaryDirectory() as tmp_dir: work_dir = os.path.join(tmp_dir, "transformers") os.makedirs(work_dir) file_name = os.path.join(work_dir, "bert_module.py") dest_file_name = os.path.join(work_dir, "new_bert_module.py") self.init_file(file_name, bert_test) duplicate_module(file_name, bert_model_patterns, new_bert_model_patterns) self.check_result(dest_file_name, bert_expected_with_copied_from) self.init_file(file_name, bert_test) duplicate_module(file_name, bert_model_patterns, new_bert_model_patterns, add_copied_from=False) self.check_result(dest_file_name, bert_expected) def test_duplicate_module_with_copied_from(self): bert_model_patterns = ModelPatterns("Bert", "bert-base-cased") new_bert_model_patterns = ModelPatterns("New Bert", "huggingface/bert-new-base") bert_test = '''# Copied from transformers.models.xxx.XxxModel with Xxx->Bert class TFBertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = BertConfig load_tf_weights = load_tf_weights_in_bert base_model_prefix = "bert" is_parallelizable = True supports_gradient_checkpointing = True BERT_CONSTANT = "value" ''' bert_expected = '''# Copied from transformers.models.xxx.XxxModel with Xxx->NewBert class TFNewBertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = NewBertConfig load_tf_weights = load_tf_weights_in_new_bert base_model_prefix = "new_bert" is_parallelizable = True supports_gradient_checkpointing = True NEW_BERT_CONSTANT = "value" ''' with tempfile.TemporaryDirectory() as tmp_dir: work_dir = os.path.join(tmp_dir, "transformers") os.makedirs(work_dir) file_name = os.path.join(work_dir, "bert_module.py") dest_file_name = os.path.join(work_dir, "new_bert_module.py") self.init_file(file_name, bert_test) duplicate_module(file_name, bert_model_patterns, new_bert_model_patterns) # There should not be a new Copied from statement, the old one should be adapated. self.check_result(dest_file_name, bert_expected) self.init_file(file_name, bert_test) duplicate_module(file_name, bert_model_patterns, new_bert_model_patterns, add_copied_from=False) self.check_result(dest_file_name, bert_expected) def test_filter_framework_files(self): files = ["modeling_bert.py", "modeling_tf_bert.py", "modeling_flax_bert.py", "configuration_bert.py"] self.assertEqual(filter_framework_files(files), files) self.assertEqual(set(filter_framework_files(files, ["pt", "tf", "flax"])), set(files)) self.assertEqual(set(filter_framework_files(files, ["pt"])), {"modeling_bert.py", "configuration_bert.py"}) self.assertEqual(set(filter_framework_files(files, ["tf"])), {"modeling_tf_bert.py", "configuration_bert.py"}) self.assertEqual( set(filter_framework_files(files, ["flax"])), {"modeling_flax_bert.py", "configuration_bert.py"} ) self.assertEqual( set(filter_framework_files(files, ["pt", "tf"])), {"modeling_tf_bert.py", "modeling_bert.py", "configuration_bert.py"}, ) self.assertEqual( set(filter_framework_files(files, ["tf", "flax"])), {"modeling_tf_bert.py", "modeling_flax_bert.py", "configuration_bert.py"}, ) self.assertEqual( set(filter_framework_files(files, ["pt", "flax"])), {"modeling_bert.py", "modeling_flax_bert.py", "configuration_bert.py"}, ) def test_get_model_files(self): # BERT bert_files = get_model_files("bert") doc_file = str(Path(bert_files["doc_file"]).relative_to(REPO_PATH)) self.assertEqual(doc_file, "docs/source/en/model_doc/bert.md") model_files = {str(Path(f).relative_to(REPO_PATH)) for f in bert_files["model_files"]} self.assertEqual(model_files, BERT_MODEL_FILES) self.assertEqual(bert_files["module_name"], "bert") test_files = {str(Path(f).relative_to(REPO_PATH)) for f in bert_files["test_files"]} bert_test_files = { "tests/models/bert/test_tokenization_bert.py", "tests/models/bert/test_modeling_bert.py", "tests/models/bert/test_modeling_tf_bert.py", "tests/models/bert/test_modeling_flax_bert.py", } self.assertEqual(test_files, bert_test_files) # VIT vit_files = get_model_files("vit") doc_file = str(Path(vit_files["doc_file"]).relative_to(REPO_PATH)) self.assertEqual(doc_file, "docs/source/en/model_doc/vit.md") model_files = {str(Path(f).relative_to(REPO_PATH)) for f in vit_files["model_files"]} self.assertEqual(model_files, VIT_MODEL_FILES) self.assertEqual(vit_files["module_name"], "vit") test_files = {str(Path(f).relative_to(REPO_PATH)) for f in vit_files["test_files"]} vit_test_files = { "tests/models/vit/test_image_processing_vit.py", "tests/models/vit/test_modeling_vit.py", "tests/models/vit/test_modeling_tf_vit.py", "tests/models/vit/test_modeling_flax_vit.py", } self.assertEqual(test_files, vit_test_files) # Wav2Vec2 wav2vec2_files = get_model_files("wav2vec2") doc_file = str(Path(wav2vec2_files["doc_file"]).relative_to(REPO_PATH)) self.assertEqual(doc_file, "docs/source/en/model_doc/wav2vec2.md") model_files = {str(Path(f).relative_to(REPO_PATH)) for f in wav2vec2_files["model_files"]} self.assertEqual(model_files, WAV2VEC2_MODEL_FILES) self.assertEqual(wav2vec2_files["module_name"], "wav2vec2") test_files = {str(Path(f).relative_to(REPO_PATH)) for f in wav2vec2_files["test_files"]} wav2vec2_test_files = { "tests/models/wav2vec2/test_feature_extraction_wav2vec2.py", "tests/models/wav2vec2/test_modeling_wav2vec2.py", "tests/models/wav2vec2/test_modeling_tf_wav2vec2.py", "tests/models/wav2vec2/test_modeling_flax_wav2vec2.py", "tests/models/wav2vec2/test_processor_wav2vec2.py", "tests/models/wav2vec2/test_tokenization_wav2vec2.py", } self.assertEqual(test_files, wav2vec2_test_files) def test_get_model_files_only_pt(self): # BERT bert_files = get_model_files("bert", frameworks=["pt"]) doc_file = str(Path(bert_files["doc_file"]).relative_to(REPO_PATH)) self.assertEqual(doc_file, "docs/source/en/model_doc/bert.md") model_files = {str(Path(f).relative_to(REPO_PATH)) for f in bert_files["model_files"]} bert_model_files = BERT_MODEL_FILES - { "src/transformers/models/bert/modeling_tf_bert.py", "src/transformers/models/bert/modeling_flax_bert.py", } self.assertEqual(model_files, bert_model_files) self.assertEqual(bert_files["module_name"], "bert") test_files = {str(Path(f).relative_to(REPO_PATH)) for f in bert_files["test_files"]} bert_test_files = { "tests/models/bert/test_tokenization_bert.py", "tests/models/bert/test_modeling_bert.py", } self.assertEqual(test_files, bert_test_files) # VIT vit_files = get_model_files("vit", frameworks=["pt"]) doc_file = str(Path(vit_files["doc_file"]).relative_to(REPO_PATH)) self.assertEqual(doc_file, "docs/source/en/model_doc/vit.md") model_files = {str(Path(f).relative_to(REPO_PATH)) for f in vit_files["model_files"]} vit_model_files = VIT_MODEL_FILES - { "src/transformers/models/vit/modeling_tf_vit.py", "src/transformers/models/vit/modeling_flax_vit.py", } self.assertEqual(model_files, vit_model_files) self.assertEqual(vit_files["module_name"], "vit") test_files = {str(Path(f).relative_to(REPO_PATH)) for f in vit_files["test_files"]} vit_test_files = { "tests/models/vit/test_image_processing_vit.py", "tests/models/vit/test_modeling_vit.py", } self.assertEqual(test_files, vit_test_files) # Wav2Vec2 wav2vec2_files = get_model_files("wav2vec2", frameworks=["pt"]) doc_file = str(Path(wav2vec2_files["doc_file"]).relative_to(REPO_PATH)) self.assertEqual(doc_file, "docs/source/en/model_doc/wav2vec2.md") model_files = {str(Path(f).relative_to(REPO_PATH)) for f in wav2vec2_files["model_files"]} wav2vec2_model_files = WAV2VEC2_MODEL_FILES - { "src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py", "src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py", } self.assertEqual(model_files, wav2vec2_model_files) self.assertEqual(wav2vec2_files["module_name"], "wav2vec2") test_files = {str(Path(f).relative_to(REPO_PATH)) for f in wav2vec2_files["test_files"]} wav2vec2_test_files = { "tests/models/wav2vec2/test_feature_extraction_wav2vec2.py", "tests/models/wav2vec2/test_modeling_wav2vec2.py", "tests/models/wav2vec2/test_processor_wav2vec2.py", "tests/models/wav2vec2/test_tokenization_wav2vec2.py", } self.assertEqual(test_files, wav2vec2_test_files) def test_get_model_files_tf_and_flax(self): # BERT bert_files = get_model_files("bert", frameworks=["tf", "flax"]) doc_file = str(Path(bert_files["doc_file"]).relative_to(REPO_PATH)) self.assertEqual(doc_file, "docs/source/en/model_doc/bert.md") model_files = {str(Path(f).relative_to(REPO_PATH)) for f in bert_files["model_files"]} bert_model_files = BERT_MODEL_FILES - {"src/transformers/models/bert/modeling_bert.py"} self.assertEqual(model_files, bert_model_files) self.assertEqual(bert_files["module_name"], "bert") test_files = {str(Path(f).relative_to(REPO_PATH)) for f in bert_files["test_files"]} bert_test_files = { "tests/models/bert/test_tokenization_bert.py", "tests/models/bert/test_modeling_tf_bert.py", "tests/models/bert/test_modeling_flax_bert.py", } self.assertEqual(test_files, bert_test_files) # VIT vit_files = get_model_files("vit", frameworks=["tf", "flax"]) doc_file = str(Path(vit_files["doc_file"]).relative_to(REPO_PATH)) self.assertEqual(doc_file, "docs/source/en/model_doc/vit.md") model_files = {str(Path(f).relative_to(REPO_PATH)) for f in vit_files["model_files"]} vit_model_files = VIT_MODEL_FILES - {"src/transformers/models/vit/modeling_vit.py"} self.assertEqual(model_files, vit_model_files) self.assertEqual(vit_files["module_name"], "vit") test_files = {str(Path(f).relative_to(REPO_PATH)) for f in vit_files["test_files"]} vit_test_files = { "tests/models/vit/test_image_processing_vit.py", "tests/models/vit/test_modeling_tf_vit.py", "tests/models/vit/test_modeling_flax_vit.py", } self.assertEqual(test_files, vit_test_files) # Wav2Vec2 wav2vec2_files = get_model_files("wav2vec2", frameworks=["tf", "flax"]) doc_file = str(Path(wav2vec2_files["doc_file"]).relative_to(REPO_PATH)) self.assertEqual(doc_file, "docs/source/en/model_doc/wav2vec2.md") model_files = {str(Path(f).relative_to(REPO_PATH)) for f in wav2vec2_files["model_files"]} wav2vec2_model_files = WAV2VEC2_MODEL_FILES - {"src/transformers/models/wav2vec2/modeling_wav2vec2.py"} self.assertEqual(model_files, wav2vec2_model_files) self.assertEqual(wav2vec2_files["module_name"], "wav2vec2") test_files = {str(Path(f).relative_to(REPO_PATH)) for f in wav2vec2_files["test_files"]} wav2vec2_test_files = { "tests/models/wav2vec2/test_feature_extraction_wav2vec2.py", "tests/models/wav2vec2/test_modeling_tf_wav2vec2.py", "tests/models/wav2vec2/test_modeling_flax_wav2vec2.py", "tests/models/wav2vec2/test_processor_wav2vec2.py", "tests/models/wav2vec2/test_tokenization_wav2vec2.py", } self.assertEqual(test_files, wav2vec2_test_files) def test_find_base_model_checkpoint(self): self.assertEqual(find_base_model_checkpoint("bert"), "bert-base-uncased") self.assertEqual(find_base_model_checkpoint("gpt2"), "gpt2") def test_retrieve_model_classes(self): gpt_classes = {k: set(v) for k, v in retrieve_model_classes("gpt2").items()} expected_gpt_classes = { "pt": {"GPT2ForTokenClassification", "GPT2Model", "GPT2LMHeadModel", "GPT2ForSequenceClassification"}, "tf": {"TFGPT2Model", "TFGPT2ForSequenceClassification", "TFGPT2LMHeadModel"}, "flax": {"FlaxGPT2Model", "FlaxGPT2LMHeadModel"}, } self.assertEqual(gpt_classes, expected_gpt_classes) del expected_gpt_classes["flax"] gpt_classes = {k: set(v) for k, v in retrieve_model_classes("gpt2", frameworks=["pt", "tf"]).items()} self.assertEqual(gpt_classes, expected_gpt_classes) del expected_gpt_classes["pt"] gpt_classes = {k: set(v) for k, v in retrieve_model_classes("gpt2", frameworks=["tf"]).items()} self.assertEqual(gpt_classes, expected_gpt_classes) def test_retrieve_info_for_model_with_bert(self): bert_info = retrieve_info_for_model("bert") bert_classes = [ "BertForTokenClassification", "BertForQuestionAnswering", "BertForNextSentencePrediction", "BertForSequenceClassification", "BertForMaskedLM", "BertForMultipleChoice", "BertModel", "BertForPreTraining", "BertLMHeadModel", ] expected_model_classes = { "pt": set(bert_classes), "tf": {f"TF{m}" for m in bert_classes}, "flax": {f"Flax{m}" for m in bert_classes[:-1] + ["BertForCausalLM"]}, } self.assertEqual(set(bert_info["frameworks"]), {"pt", "tf", "flax"}) model_classes = {k: set(v) for k, v in bert_info["model_classes"].items()} self.assertEqual(model_classes, expected_model_classes) all_bert_files = bert_info["model_files"] model_files = {str(Path(f).relative_to(REPO_PATH)) for f in all_bert_files["model_files"]} self.assertEqual(model_files, BERT_MODEL_FILES) test_files = {str(Path(f).relative_to(REPO_PATH)) for f in all_bert_files["test_files"]} bert_test_files = { "tests/models/bert/test_tokenization_bert.py", "tests/models/bert/test_modeling_bert.py", "tests/models/bert/test_modeling_tf_bert.py", "tests/models/bert/test_modeling_flax_bert.py", } self.assertEqual(test_files, bert_test_files) doc_file = str(Path(all_bert_files["doc_file"]).relative_to(REPO_PATH)) self.assertEqual(doc_file, "docs/source/en/model_doc/bert.md") self.assertEqual(all_bert_files["module_name"], "bert") bert_model_patterns = bert_info["model_patterns"] self.assertEqual(bert_model_patterns.model_name, "BERT") self.assertEqual(bert_model_patterns.checkpoint, "bert-base-uncased") self.assertEqual(bert_model_patterns.model_type, "bert") self.assertEqual(bert_model_patterns.model_lower_cased, "bert") self.assertEqual(bert_model_patterns.model_camel_cased, "Bert") self.assertEqual(bert_model_patterns.model_upper_cased, "BERT") self.assertEqual(bert_model_patterns.config_class, "BertConfig") self.assertEqual(bert_model_patterns.tokenizer_class, "BertTokenizer") self.assertIsNone(bert_model_patterns.feature_extractor_class) self.assertIsNone(bert_model_patterns.processor_class) def test_retrieve_info_for_model_pt_tf_with_bert(self): bert_info = retrieve_info_for_model("bert", frameworks=["pt", "tf"]) bert_classes = [ "BertForTokenClassification", "BertForQuestionAnswering", "BertForNextSentencePrediction", "BertForSequenceClassification", "BertForMaskedLM", "BertForMultipleChoice", "BertModel", "BertForPreTraining", "BertLMHeadModel", ] expected_model_classes = {"pt": set(bert_classes), "tf": {f"TF{m}" for m in bert_classes}} self.assertEqual(set(bert_info["frameworks"]), {"pt", "tf"}) model_classes = {k: set(v) for k, v in bert_info["model_classes"].items()} self.assertEqual(model_classes, expected_model_classes) all_bert_files = bert_info["model_files"] model_files = {str(Path(f).relative_to(REPO_PATH)) for f in all_bert_files["model_files"]} bert_model_files = BERT_MODEL_FILES - {"src/transformers/models/bert/modeling_flax_bert.py"} self.assertEqual(model_files, bert_model_files) test_files = {str(Path(f).relative_to(REPO_PATH)) for f in all_bert_files["test_files"]} bert_test_files = { "tests/models/bert/test_tokenization_bert.py", "tests/models/bert/test_modeling_bert.py", "tests/models/bert/test_modeling_tf_bert.py", } self.assertEqual(test_files, bert_test_files) doc_file = str(Path(all_bert_files["doc_file"]).relative_to(REPO_PATH)) self.assertEqual(doc_file, "docs/source/en/model_doc/bert.md") self.assertEqual(all_bert_files["module_name"], "bert") bert_model_patterns = bert_info["model_patterns"] self.assertEqual(bert_model_patterns.model_name, "BERT") self.assertEqual(bert_model_patterns.checkpoint, "bert-base-uncased") self.assertEqual(bert_model_patterns.model_type, "bert") self.assertEqual(bert_model_patterns.model_lower_cased, "bert") self.assertEqual(bert_model_patterns.model_camel_cased, "Bert") self.assertEqual(bert_model_patterns.model_upper_cased, "BERT") self.assertEqual(bert_model_patterns.config_class, "BertConfig") self.assertEqual(bert_model_patterns.tokenizer_class, "BertTokenizer") self.assertIsNone(bert_model_patterns.feature_extractor_class) self.assertIsNone(bert_model_patterns.processor_class) def test_retrieve_info_for_model_with_vit(self): vit_info = retrieve_info_for_model("vit") vit_classes = ["ViTForImageClassification", "ViTModel"] pt_only_classes = ["ViTForMaskedImageModeling"] expected_model_classes = { "pt": set(vit_classes + pt_only_classes), "tf": {f"TF{m}" for m in vit_classes}, "flax": {f"Flax{m}" for m in vit_classes}, } self.assertEqual(set(vit_info["frameworks"]), {"pt", "tf", "flax"}) model_classes = {k: set(v) for k, v in vit_info["model_classes"].items()} self.assertEqual(model_classes, expected_model_classes) all_vit_files = vit_info["model_files"] model_files = {str(Path(f).relative_to(REPO_PATH)) for f in all_vit_files["model_files"]} self.assertEqual(model_files, VIT_MODEL_FILES) test_files = {str(Path(f).relative_to(REPO_PATH)) for f in all_vit_files["test_files"]} vit_test_files = { "tests/models/vit/test_image_processing_vit.py", "tests/models/vit/test_modeling_vit.py", "tests/models/vit/test_modeling_tf_vit.py", "tests/models/vit/test_modeling_flax_vit.py", } self.assertEqual(test_files, vit_test_files) doc_file = str(Path(all_vit_files["doc_file"]).relative_to(REPO_PATH)) self.assertEqual(doc_file, "docs/source/en/model_doc/vit.md") self.assertEqual(all_vit_files["module_name"], "vit") vit_model_patterns = vit_info["model_patterns"] self.assertEqual(vit_model_patterns.model_name, "ViT") self.assertEqual(vit_model_patterns.checkpoint, "google/vit-base-patch16-224-in21k") self.assertEqual(vit_model_patterns.model_type, "vit") self.assertEqual(vit_model_patterns.model_lower_cased, "vit") self.assertEqual(vit_model_patterns.model_camel_cased, "ViT") self.assertEqual(vit_model_patterns.model_upper_cased, "VIT") self.assertEqual(vit_model_patterns.config_class, "ViTConfig") self.assertEqual(vit_model_patterns.feature_extractor_class, "ViTFeatureExtractor") self.assertEqual(vit_model_patterns.image_processor_class, "ViTImageProcessor") self.assertIsNone(vit_model_patterns.tokenizer_class) self.assertIsNone(vit_model_patterns.processor_class) def test_retrieve_info_for_model_with_wav2vec2(self): wav2vec2_info = retrieve_info_for_model("wav2vec2") wav2vec2_classes = [ "Wav2Vec2Model", "Wav2Vec2ForPreTraining", "Wav2Vec2ForAudioFrameClassification", "Wav2Vec2ForCTC", "Wav2Vec2ForMaskedLM", "Wav2Vec2ForSequenceClassification", "Wav2Vec2ForXVector", ] expected_model_classes = { "pt": set(wav2vec2_classes), "tf": {f"TF{m}" for m in wav2vec2_classes[:1]}, "flax": {f"Flax{m}" for m in wav2vec2_classes[:2]}, } self.assertEqual(set(wav2vec2_info["frameworks"]), {"pt", "tf", "flax"}) model_classes = {k: set(v) for k, v in wav2vec2_info["model_classes"].items()} self.assertEqual(model_classes, expected_model_classes) all_wav2vec2_files = wav2vec2_info["model_files"] model_files = {str(Path(f).relative_to(REPO_PATH)) for f in all_wav2vec2_files["model_files"]} self.assertEqual(model_files, WAV2VEC2_MODEL_FILES) test_files = {str(Path(f).relative_to(REPO_PATH)) for f in all_wav2vec2_files["test_files"]} wav2vec2_test_files = { "tests/models/wav2vec2/test_feature_extraction_wav2vec2.py", "tests/models/wav2vec2/test_modeling_wav2vec2.py", "tests/models/wav2vec2/test_modeling_tf_wav2vec2.py", "tests/models/wav2vec2/test_modeling_flax_wav2vec2.py", "tests/models/wav2vec2/test_processor_wav2vec2.py", "tests/models/wav2vec2/test_tokenization_wav2vec2.py", } self.assertEqual(test_files, wav2vec2_test_files) doc_file = str(Path(all_wav2vec2_files["doc_file"]).relative_to(REPO_PATH)) self.assertEqual(doc_file, "docs/source/en/model_doc/wav2vec2.md") self.assertEqual(all_wav2vec2_files["module_name"], "wav2vec2") wav2vec2_model_patterns = wav2vec2_info["model_patterns"] self.assertEqual(wav2vec2_model_patterns.model_name, "Wav2Vec2") self.assertEqual(wav2vec2_model_patterns.checkpoint, "facebook/wav2vec2-base-960h") self.assertEqual(wav2vec2_model_patterns.model_type, "wav2vec2") self.assertEqual(wav2vec2_model_patterns.model_lower_cased, "wav2vec2") self.assertEqual(wav2vec2_model_patterns.model_camel_cased, "Wav2Vec2") self.assertEqual(wav2vec2_model_patterns.model_upper_cased, "WAV_2_VEC_2") self.assertEqual(wav2vec2_model_patterns.config_class, "Wav2Vec2Config") self.assertEqual(wav2vec2_model_patterns.feature_extractor_class, "Wav2Vec2FeatureExtractor") self.assertEqual(wav2vec2_model_patterns.processor_class, "Wav2Vec2Processor") self.assertEqual(wav2vec2_model_patterns.tokenizer_class, "Wav2Vec2CTCTokenizer") def test_clean_frameworks_in_init_with_gpt(self): test_init = """ from typing import TYPE_CHECKING from ...utils import _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available _import_structure = { "configuration_gpt2": ["GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPT2Config", "GPT2OnnxConfig"], "tokenization_gpt2": ["GPT2Tokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_gpt2_fast"] = ["GPT2TokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_gpt2"] = ["GPT2Model"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_tf_gpt2"] = ["TFGPT2Model"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_flax_gpt2"] = ["FlaxGPT2Model"] if TYPE_CHECKING: from .configuration_gpt2 import GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2Config, GPT2OnnxConfig from .tokenization_gpt2 import GPT2Tokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt2_fast import GPT2TokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt2 import GPT2Model try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_gpt2 import TFGPT2Model try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt2 import FlaxGPT2Model else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) """ init_no_tokenizer = """ from typing import TYPE_CHECKING from ...utils import _LazyModule, is_flax_available, is_tf_available, is_torch_available _import_structure = { "configuration_gpt2": ["GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPT2Config", "GPT2OnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_gpt2"] = ["GPT2Model"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_tf_gpt2"] = ["TFGPT2Model"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_flax_gpt2"] = ["FlaxGPT2Model"] if TYPE_CHECKING: from .configuration_gpt2 import GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2Config, GPT2OnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt2 import GPT2Model try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_gpt2 import TFGPT2Model try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt2 import FlaxGPT2Model else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) """ init_pt_only = """ from typing import TYPE_CHECKING from ...utils import _LazyModule, is_tokenizers_available, is_torch_available _import_structure = { "configuration_gpt2": ["GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPT2Config", "GPT2OnnxConfig"], "tokenization_gpt2": ["GPT2Tokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_gpt2_fast"] = ["GPT2TokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_gpt2"] = ["GPT2Model"] if TYPE_CHECKING: from .configuration_gpt2 import GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2Config, GPT2OnnxConfig from .tokenization_gpt2 import GPT2Tokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt2_fast import GPT2TokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt2 import GPT2Model else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) """ init_pt_only_no_tokenizer = """ from typing import TYPE_CHECKING from ...utils import _LazyModule, is_torch_available _import_structure = { "configuration_gpt2": ["GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPT2Config", "GPT2OnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_gpt2"] = ["GPT2Model"] if TYPE_CHECKING: from .configuration_gpt2 import GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2Config, GPT2OnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt2 import GPT2Model else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) """ with tempfile.TemporaryDirectory() as tmp_dir: file_name = os.path.join(tmp_dir, "../__init__.py") self.init_file(file_name, test_init) clean_frameworks_in_init(file_name, keep_processing=False) self.check_result(file_name, init_no_tokenizer) self.init_file(file_name, test_init) clean_frameworks_in_init(file_name, frameworks=["pt"]) self.check_result(file_name, init_pt_only) self.init_file(file_name, test_init) clean_frameworks_in_init(file_name, frameworks=["pt"], keep_processing=False) self.check_result(file_name, init_pt_only_no_tokenizer) def test_clean_frameworks_in_init_with_vit(self): test_init = """ from typing import TYPE_CHECKING from ...utils import _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available _import_structure = { "configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["image_processing_vit"] = ["ViTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_vit"] = ["ViTModel"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_tf_vit"] = ["TFViTModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_flax_vit"] = ["FlaxViTModel"] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ViTModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) """ init_no_feature_extractor = """ from typing import TYPE_CHECKING from ...utils import _LazyModule, is_flax_available, is_tf_available, is_torch_available _import_structure = { "configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_vit"] = ["ViTModel"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_tf_vit"] = ["TFViTModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_flax_vit"] = ["FlaxViTModel"] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ViTModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) """ init_pt_only = """ from typing import TYPE_CHECKING from ...utils import _LazyModule, is_torch_available, is_vision_available _import_structure = { "configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["image_processing_vit"] = ["ViTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_vit"] = ["ViTModel"] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ViTModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) """ init_pt_only_no_feature_extractor = """ from typing import TYPE_CHECKING from ...utils import _LazyModule, is_torch_available _import_structure = { "configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_vit"] = ["ViTModel"] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ViTModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) """ with tempfile.TemporaryDirectory() as tmp_dir: file_name = os.path.join(tmp_dir, "../__init__.py") self.init_file(file_name, test_init) clean_frameworks_in_init(file_name, keep_processing=False) self.check_result(file_name, init_no_feature_extractor) self.init_file(file_name, test_init) clean_frameworks_in_init(file_name, frameworks=["pt"]) self.check_result(file_name, init_pt_only) self.init_file(file_name, test_init) clean_frameworks_in_init(file_name, frameworks=["pt"], keep_processing=False) self.check_result(file_name, init_pt_only_no_feature_extractor) def test_duplicate_doc_file(self): test_doc = """ # GPT2 ## Overview Overview of the model. ## GPT2Config [[autodoc]] GPT2Config ## GPT2Tokenizer [[autodoc]] GPT2Tokenizer - save_vocabulary ## GPT2TokenizerFast [[autodoc]] GPT2TokenizerFast ## GPT2 specific outputs [[autodoc]] models.gpt2.modeling_gpt2.GPT2DoubleHeadsModelOutput [[autodoc]] models.gpt2.modeling_tf_gpt2.TFGPT2DoubleHeadsModelOutput ## GPT2Model [[autodoc]] GPT2Model - forward ## TFGPT2Model [[autodoc]] TFGPT2Model - call ## FlaxGPT2Model [[autodoc]] FlaxGPT2Model - __call__ """ test_new_doc = """ # GPT-New New ## Overview The GPT-New New model was proposed in [<INSERT PAPER NAME HERE>](<INSERT PAPER LINK HERE>) by <INSERT AUTHORS HERE>. <INSERT SHORT SUMMARY HERE> The abstract from the paper is the following: *<INSERT PAPER ABSTRACT HERE>* Tips: <INSERT TIPS ABOUT MODEL HERE> This model was contributed by [INSERT YOUR HF USERNAME HERE](https://huggingface.co/<INSERT YOUR HF USERNAME HERE>). The original code can be found [here](<INSERT LINK TO GITHUB REPO HERE>). ## GPTNewNewConfig [[autodoc]] GPTNewNewConfig ## GPTNewNewTokenizer [[autodoc]] GPTNewNewTokenizer - save_vocabulary ## GPTNewNewTokenizerFast [[autodoc]] GPTNewNewTokenizerFast ## GPTNewNew specific outputs [[autodoc]] models.gpt_new_new.modeling_gpt_new_new.GPTNewNewDoubleHeadsModelOutput [[autodoc]] models.gpt_new_new.modeling_tf_gpt_new_new.TFGPTNewNewDoubleHeadsModelOutput ## GPTNewNewModel [[autodoc]] GPTNewNewModel - forward ## TFGPTNewNewModel [[autodoc]] TFGPTNewNewModel - call ## FlaxGPTNewNewModel [[autodoc]] FlaxGPTNewNewModel - __call__ """ with tempfile.TemporaryDirectory() as tmp_dir: doc_file = os.path.join(tmp_dir, "gpt2.md") new_doc_file = os.path.join(tmp_dir, "gpt-new-new.md") gpt2_model_patterns = ModelPatterns("GPT2", "gpt2", tokenizer_class="GPT2Tokenizer") new_model_patterns = ModelPatterns( "GPT-New New", "huggingface/gpt-new-new", tokenizer_class="GPTNewNewTokenizer" ) self.init_file(doc_file, test_doc) duplicate_doc_file(doc_file, gpt2_model_patterns, new_model_patterns) self.check_result(new_doc_file, test_new_doc) test_new_doc_pt_only = test_new_doc.replace( """ ## TFGPTNewNewModel [[autodoc]] TFGPTNewNewModel - call ## FlaxGPTNewNewModel [[autodoc]] FlaxGPTNewNewModel - __call__ """, "", ) self.init_file(doc_file, test_doc) duplicate_doc_file(doc_file, gpt2_model_patterns, new_model_patterns, frameworks=["pt"]) self.check_result(new_doc_file, test_new_doc_pt_only) test_new_doc_no_tok = test_new_doc.replace( """ ## GPTNewNewTokenizer [[autodoc]] GPTNewNewTokenizer - save_vocabulary ## GPTNewNewTokenizerFast [[autodoc]] GPTNewNewTokenizerFast """, "", ) new_model_patterns = ModelPatterns( "GPT-New New", "huggingface/gpt-new-new", tokenizer_class="GPT2Tokenizer" ) self.init_file(doc_file, test_doc) duplicate_doc_file(doc_file, gpt2_model_patterns, new_model_patterns) print(test_new_doc_no_tok) self.check_result(new_doc_file, test_new_doc_no_tok) test_new_doc_pt_only_no_tok = test_new_doc_no_tok.replace( """ ## TFGPTNewNewModel [[autodoc]] TFGPTNewNewModel - call ## FlaxGPTNewNewModel [[autodoc]] FlaxGPTNewNewModel - __call__ """, "", ) self.init_file(doc_file, test_doc) duplicate_doc_file(doc_file, gpt2_model_patterns, new_model_patterns, frameworks=["pt"]) self.check_result(new_doc_file, test_new_doc_pt_only_no_tok)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/utils/test_image_processing_utils.py
# coding=utf-8 # Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers.image_processing_utils import get_size_dict class ImageProcessingUtilsTester(unittest.TestCase): def test_get_size_dict(self): # Test a dict with the wrong keys raises an error inputs = {"wrong_key": 224} with self.assertRaises(ValueError): get_size_dict(inputs) inputs = {"height": 224} with self.assertRaises(ValueError): get_size_dict(inputs) inputs = {"width": 224, "shortest_edge": 224} with self.assertRaises(ValueError): get_size_dict(inputs) # Test a dict with the correct keys is returned as is inputs = {"height": 224, "width": 224} outputs = get_size_dict(inputs) self.assertEqual(outputs, inputs) inputs = {"shortest_edge": 224} outputs = get_size_dict(inputs) self.assertEqual(outputs, {"shortest_edge": 224}) inputs = {"longest_edge": 224, "shortest_edge": 224} outputs = get_size_dict(inputs) self.assertEqual(outputs, {"longest_edge": 224, "shortest_edge": 224}) # Test a single int value which represents (size, size) outputs = get_size_dict(224) self.assertEqual(outputs, {"height": 224, "width": 224}) # Test a single int value which represents the shortest edge outputs = get_size_dict(224, default_to_square=False) self.assertEqual(outputs, {"shortest_edge": 224}) # Test a tuple of ints which represents (height, width) outputs = get_size_dict((150, 200)) self.assertEqual(outputs, {"height": 150, "width": 200}) # Test a tuple of ints which represents (width, height) outputs = get_size_dict((150, 200), height_width_order=False) self.assertEqual(outputs, {"height": 200, "width": 150}) # Test an int representing the shortest edge and max_size which represents the longest edge outputs = get_size_dict(224, max_size=256, default_to_square=False) self.assertEqual(outputs, {"shortest_edge": 224, "longest_edge": 256}) # Test int with default_to_square=True and max_size fails with self.assertRaises(ValueError): get_size_dict(224, max_size=256, default_to_square=True)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/utils/test_convert_slow_tokenizer.py
import unittest import warnings from dataclasses import dataclass from transformers.convert_slow_tokenizer import SpmConverter from transformers.testing_utils import get_tests_dir @dataclass class FakeOriginalTokenizer: vocab_file: str class ConvertSlowTokenizerTest(unittest.TestCase): def test_spm_converter_bytefallback_warning(self): spm_model_file_without_bytefallback = get_tests_dir("fixtures/test_sentencepiece.model") spm_model_file_with_bytefallback = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model") original_tokenizer_without_bytefallback = FakeOriginalTokenizer(vocab_file=spm_model_file_without_bytefallback) with warnings.catch_warnings(record=True) as w: _ = SpmConverter(original_tokenizer_without_bytefallback) self.assertEqual(len(w), 0) original_tokenizer_with_bytefallback = FakeOriginalTokenizer(vocab_file=spm_model_file_with_bytefallback) with warnings.catch_warnings(record=True) as w: _ = SpmConverter(original_tokenizer_with_bytefallback) self.assertEqual(len(w), 1) self.assertIn( "The sentencepiece tokenizer that you are converting to a fast tokenizer uses the byte fallback option" " which is not implemented in the fast tokenizers.", str(w[0].message), )
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/utils/test_audio_utils.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import pytest from transformers.audio_utils import ( amplitude_to_db, hertz_to_mel, mel_filter_bank, mel_to_hertz, power_to_db, spectrogram, window_function, ) class AudioUtilsFunctionTester(unittest.TestCase): def test_hertz_to_mel(self): self.assertEqual(hertz_to_mel(0.0), 0.0) self.assertAlmostEqual(hertz_to_mel(100), 150.48910241) inputs = np.array([100, 200]) expected = np.array([150.48910241, 283.22989816]) self.assertTrue(np.allclose(hertz_to_mel(inputs), expected)) self.assertEqual(hertz_to_mel(0.0, "slaney"), 0.0) self.assertEqual(hertz_to_mel(100, "slaney"), 1.5) inputs = np.array([60, 100, 200, 1000, 1001, 2000]) expected = np.array([0.9, 1.5, 3.0, 15.0, 15.01453781, 25.08188016]) self.assertTrue(np.allclose(hertz_to_mel(inputs, "slaney"), expected)) inputs = np.array([60, 100, 200, 1000, 1001, 2000]) expected = np.array([92.6824, 150.4899, 283.2313, 999.9907, 1000.6534, 1521.3674]) self.assertTrue(np.allclose(hertz_to_mel(inputs, "kaldi"), expected)) with pytest.raises(ValueError): hertz_to_mel(100, mel_scale=None) def test_mel_to_hertz(self): self.assertEqual(mel_to_hertz(0.0), 0.0) self.assertAlmostEqual(mel_to_hertz(150.48910241), 100) inputs = np.array([150.48910241, 283.22989816]) expected = np.array([100, 200]) self.assertTrue(np.allclose(mel_to_hertz(inputs), expected)) self.assertEqual(mel_to_hertz(0.0, "slaney"), 0.0) self.assertEqual(mel_to_hertz(1.5, "slaney"), 100) inputs = np.array([0.9, 1.5, 3.0, 15.0, 15.01453781, 25.08188016]) expected = np.array([60, 100, 200, 1000, 1001, 2000]) self.assertTrue(np.allclose(mel_to_hertz(inputs, "slaney"), expected)) inputs = np.array([92.6824, 150.4899, 283.2313, 999.9907, 1000.6534, 1521.3674]) expected = np.array([60, 100, 200, 1000, 1001, 2000]) self.assertTrue(np.allclose(mel_to_hertz(inputs, "kaldi"), expected)) with pytest.raises(ValueError): mel_to_hertz(100, mel_scale=None) def test_mel_filter_bank_shape(self): mel_filters = mel_filter_bank( num_frequency_bins=513, num_mel_filters=13, min_frequency=100, max_frequency=4000, sampling_rate=16000, norm=None, mel_scale="htk", ) self.assertEqual(mel_filters.shape, (513, 13)) mel_filters = mel_filter_bank( num_frequency_bins=513, num_mel_filters=13, min_frequency=100, max_frequency=4000, sampling_rate=16000, norm="slaney", mel_scale="slaney", ) self.assertEqual(mel_filters.shape, (513, 13)) mel_filters = mel_filter_bank( num_frequency_bins=513, num_mel_filters=13, min_frequency=100, max_frequency=4000, sampling_rate=16000, norm="slaney", mel_scale="slaney", triangularize_in_mel_space=True, ) self.assertEqual(mel_filters.shape, (513, 13)) def test_mel_filter_bank_htk(self): mel_filters = mel_filter_bank( num_frequency_bins=16, num_mel_filters=4, min_frequency=0, max_frequency=2000, sampling_rate=4000, norm=None, mel_scale="htk", ) # fmt: off expected = np.array([ [0.0 , 0.0 , 0.0 , 0.0 ], [0.61454786, 0.0 , 0.0 , 0.0 ], [0.82511046, 0.17488954, 0.0 , 0.0 ], [0.35597035, 0.64402965, 0.0 , 0.0 ], [0.0 , 0.91360726, 0.08639274, 0.0 ], [0.0 , 0.55547007, 0.44452993, 0.0 ], [0.0 , 0.19733289, 0.80266711, 0.0 ], [0.0 , 0.0 , 0.87724349, 0.12275651], [0.0 , 0.0 , 0.6038449 , 0.3961551 ], [0.0 , 0.0 , 0.33044631, 0.66955369], [0.0 , 0.0 , 0.05704771, 0.94295229], [0.0 , 0.0 , 0.0 , 0.83483975], [0.0 , 0.0 , 0.0 , 0.62612982], [0.0 , 0.0 , 0.0 , 0.41741988], [0.0 , 0.0 , 0.0 , 0.20870994], [0.0 , 0.0 , 0.0 , 0.0 ] ]) # fmt: on self.assertTrue(np.allclose(mel_filters, expected)) def test_mel_filter_bank_slaney(self): mel_filters = mel_filter_bank( num_frequency_bins=16, num_mel_filters=4, min_frequency=0, max_frequency=2000, sampling_rate=4000, norm=None, mel_scale="slaney", ) # fmt: off expected = np.array([ [0.0 , 0.0 , 0.0 , 0.0 ], [0.39869419, 0.0 , 0.0 , 0.0 ], [0.79738839, 0.0 , 0.0 , 0.0 ], [0.80391742, 0.19608258, 0.0 , 0.0 ], [0.40522322, 0.59477678, 0.0 , 0.0 ], [0.00652903, 0.99347097, 0.0 , 0.0 ], [0.0 , 0.60796161, 0.39203839, 0.0 ], [0.0 , 0.20939631, 0.79060369, 0.0 ], [0.0 , 0.0 , 0.84685344, 0.15314656], [0.0 , 0.0 , 0.52418477, 0.47581523], [0.0 , 0.0 , 0.2015161 , 0.7984839 ], [0.0 , 0.0 , 0.0 , 0.9141874 ], [0.0 , 0.0 , 0.0 , 0.68564055], [0.0 , 0.0 , 0.0 , 0.4570937 ], [0.0 , 0.0 , 0.0 , 0.22854685], [0.0 , 0.0 , 0.0 , 0.0 ] ]) # fmt: on self.assertTrue(np.allclose(mel_filters, expected)) def test_mel_filter_bank_kaldi(self): mel_filters = mel_filter_bank( num_frequency_bins=16, num_mel_filters=4, min_frequency=0, max_frequency=2000, sampling_rate=4000, norm=None, mel_scale="kaldi", triangularize_in_mel_space=True, ) # fmt: off expected = np.array( [[0.0000, 0.0000, 0.0000, 0.0000], [0.6086, 0.0000, 0.0000, 0.0000], [0.8689, 0.1311, 0.0000, 0.0000], [0.4110, 0.5890, 0.0000, 0.0000], [0.0036, 0.9964, 0.0000, 0.0000], [0.0000, 0.6366, 0.3634, 0.0000], [0.0000, 0.3027, 0.6973, 0.0000], [0.0000, 0.0000, 0.9964, 0.0036], [0.0000, 0.0000, 0.7135, 0.2865], [0.0000, 0.0000, 0.4507, 0.5493], [0.0000, 0.0000, 0.2053, 0.7947], [0.0000, 0.0000, 0.0000, 0.9752], [0.0000, 0.0000, 0.0000, 0.7585], [0.0000, 0.0000, 0.0000, 0.5539], [0.0000, 0.0000, 0.0000, 0.3599], [0.0000, 0.0000, 0.0000, 0.1756]] ) # fmt: on self.assertTrue(np.allclose(mel_filters, expected, atol=5e-5)) def test_mel_filter_bank_slaney_norm(self): mel_filters = mel_filter_bank( num_frequency_bins=16, num_mel_filters=4, min_frequency=0, max_frequency=2000, sampling_rate=4000, norm="slaney", mel_scale="slaney", ) # fmt: off expected = np.array([ [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], [1.19217795e-03, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], [2.38435591e-03, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], [2.40387905e-03, 5.86232616e-04, 0.00000000e+00, 0.00000000e+00], [1.21170110e-03, 1.77821783e-03, 0.00000000e+00, 0.00000000e+00], [1.95231437e-05, 2.97020305e-03, 0.00000000e+00, 0.00000000e+00], [0.00000000e+00, 1.81763684e-03, 1.04857612e-03, 0.00000000e+00], [0.00000000e+00, 6.26036972e-04, 2.11460963e-03, 0.00000000e+00], [0.00000000e+00, 0.00000000e+00, 2.26505954e-03, 3.07332945e-04], [0.00000000e+00, 0.00000000e+00, 1.40202503e-03, 9.54861093e-04], [0.00000000e+00, 0.00000000e+00, 5.38990521e-04, 1.60238924e-03], [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.83458185e-03], [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.37593638e-03], [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 9.17290923e-04], [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 4.58645462e-04], [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00] ]) # fmt: on self.assertTrue(np.allclose(mel_filters, expected)) def test_window_function(self): window = window_function(16, "hann") self.assertEqual(len(window), 16) # fmt: off expected = np.array([ 0.0, 0.03806023, 0.14644661, 0.30865828, 0.5, 0.69134172, 0.85355339, 0.96193977, 1.0, 0.96193977, 0.85355339, 0.69134172, 0.5, 0.30865828, 0.14644661, 0.03806023, ]) # fmt: on self.assertTrue(np.allclose(window, expected)) def _load_datasamples(self, num_samples): from datasets import load_dataset ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in speech_samples] def test_spectrogram_impulse(self): waveform = np.zeros(40) waveform[9] = 1.0 # impulse shifted in time spec = spectrogram( waveform, window_function(12, "hann", frame_length=16), frame_length=16, hop_length=4, power=1.0, center=True, pad_mode="reflect", onesided=True, ) self.assertEqual(spec.shape, (9, 11)) expected = np.array([[0.0, 0.0669873, 0.9330127, 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]) self.assertTrue(np.allclose(spec, expected)) def test_spectrogram_integration_test(self): waveform = self._load_datasamples(1)[0] spec = spectrogram( waveform, window_function(400, "hann", frame_length=512), frame_length=512, hop_length=128, power=1.0, center=True, pad_mode="reflect", onesided=True, ) self.assertEqual(spec.shape, (257, 732)) # fmt: off expected = np.array([ 0.02464888, 0.04648664, 0.05872392, 0.02311783, 0.0327175 , 0.02433643, 0.01198814, 0.02055709, 0.01559287, 0.01394357, 0.01299037, 0.01728045, 0.0254554 , 0.02486533, 0.02011792, 0.01755333, 0.02100457, 0.02337024, 0.01436963, 0.01464558, 0.0211017 , 0.0193489 , 0.01272165, 0.01858462, 0.03722598, 0.0456542 , 0.03281558, 0.00620586, 0.02226466, 0.03618042, 0.03508182, 0.02271432, 0.01051649, 0.01225771, 0.02315293, 0.02331886, 0.01417785, 0.0106844 , 0.01791214, 0.017177 , 0.02125114, 0.05028201, 0.06830665, 0.05216664, 0.01963666, 0.06941418, 0.11513043, 0.12257859, 0.10948435, 0.08568069, 0.05509328, 0.05047818, 0.047112 , 0.05060737, 0.02982424, 0.02803827, 0.02933729, 0.01760491, 0.00587815, 0.02117637, 0.0293578 , 0.03452379, 0.02194803, 0.01676056, ]) # fmt: on self.assertTrue(np.allclose(spec[:64, 400], expected)) spec = spectrogram( waveform, window_function(400, "hann"), frame_length=400, hop_length=128, fft_length=512, power=1.0, center=True, pad_mode="reflect", onesided=True, ) self.assertEqual(spec.shape, (257, 732)) self.assertTrue(np.allclose(spec[:64, 400], expected)) mel_filters = mel_filter_bank( num_frequency_bins=256, num_mel_filters=400, min_frequency=20, max_frequency=8000, sampling_rate=16000, norm=None, mel_scale="kaldi", triangularize_in_mel_space=True, ) mel_filters = np.pad(mel_filters, ((0, 1), (0, 0))) spec = spectrogram( waveform, window_function(400, "povey", periodic=False), frame_length=400, hop_length=160, fft_length=512, power=2.0, center=False, pad_mode="reflect", onesided=True, preemphasis=0.97, mel_filters=mel_filters, log_mel="log", mel_floor=1.1920928955078125e-07, remove_dc_offset=True, ) self.assertEqual(spec.shape, (400, 584)) # fmt: off expected = np.array([-15.94238515, -8.20712299, -8.22704352, -15.94238515, -15.94238515, -15.94238515, -15.94238515, -15.94238515, -6.52463769, -7.73677889, -15.94238515, -15.94238515, -15.94238515, -15.94238515, -4.18650018, -3.37195286, -15.94238515, -15.94238515, -15.94238515, -15.94238515, -4.70190154, -2.4217066 , -15.94238515, -15.94238515, -15.94238515, -15.94238515, -5.62755239, -3.53385194, -15.94238515, -15.94238515, -15.94238515, -15.94238515, -9.43303023, -8.77480925, -15.94238515, -15.94238515, -15.94238515, -15.94238515, -4.2951092 , -5.51585994, -15.94238515, -15.94238515, -15.94238515, -4.40151721, -3.95228878, -15.94238515, -15.94238515, -15.94238515, -6.10365415, -4.59494697, -15.94238515, -15.94238515, -15.94238515, -8.10727767, -6.2585298 , -15.94238515, -15.94238515, -15.94238515, -5.60161702, -4.47217004, -15.94238515, -15.94238515, -15.94238515, -5.91641988] ) # fmt: on self.assertTrue(np.allclose(spec[:64, 400], expected, atol=1e-5)) def test_spectrogram_center_padding(self): waveform = self._load_datasamples(1)[0] spec = spectrogram( waveform, window_function(512, "hann"), frame_length=512, hop_length=128, center=True, pad_mode="reflect", ) self.assertEqual(spec.shape, (257, 732)) # fmt: off expected = np.array([ 0.1287945 , 0.12792738, 0.08311573, 0.03155122, 0.02470202, 0.00727857, 0.00910694, 0.00686163, 0.01238981, 0.01473668, 0.00336144, 0.00370314, 0.00600871, 0.01120164, 0.01942998, 0.03132008, 0.0232842 , 0.01124642, 0.02754783, 0.02423725, 0.00147893, 0.00038027, 0.00112299, 0.00596233, 0.00571529, 0.02084235, 0.0231855 , 0.00810006, 0.01837943, 0.00651339, 0.00093931, 0.00067426, 0.01058399, 0.01270507, 0.00151734, 0.00331913, 0.00302416, 0.01081792, 0.00754549, 0.00148963, 0.00111943, 0.00152573, 0.00608017, 0.01749986, 0.01205949, 0.0143082 , 0.01910573, 0.00413786, 0.03916619, 0.09873404, 0.08302026, 0.02673891, 0.00401255, 0.01397392, 0.00751862, 0.01024884, 0.01544606, 0.00638907, 0.00623633, 0.0085103 , 0.00217659, 0.00276204, 0.00260835, 0.00299299, ]) # fmt: on self.assertTrue(np.allclose(spec[:64, 0], expected)) spec = spectrogram( waveform, window_function(512, "hann"), frame_length=512, hop_length=128, center=True, pad_mode="constant", ) self.assertEqual(spec.shape, (257, 732)) # fmt: off expected = np.array([ 0.06558744, 0.06889656, 0.06263352, 0.04264418, 0.03404115, 0.03244197, 0.02279134, 0.01646339, 0.01452216, 0.00826055, 0.00062093, 0.0031821 , 0.00419456, 0.00689327, 0.01106367, 0.01712119, 0.01721762, 0.00977533, 0.01606626, 0.02275621, 0.01727687, 0.00992739, 0.01217688, 0.01049927, 0.01022947, 0.01302475, 0.01166873, 0.01081812, 0.01057327, 0.00767912, 0.00429567, 0.00089625, 0.00654583, 0.00912084, 0.00700984, 0.00225026, 0.00290545, 0.00667712, 0.00730663, 0.00410813, 0.00073102, 0.00219296, 0.00527618, 0.00996585, 0.01123781, 0.00872816, 0.01165121, 0.02047945, 0.03681747, 0.0514379 , 0.05137928, 0.03960042, 0.02821562, 0.01813349, 0.01201322, 0.01260964, 0.00900654, 0.00207905, 0.00456714, 0.00850599, 0.00788239, 0.00664407, 0.00824227, 0.00628301, ]) # fmt: on self.assertTrue(np.allclose(spec[:64, 0], expected)) spec = spectrogram( waveform, window_function(512, "hann"), frame_length=512, hop_length=128, center=False, ) self.assertEqual(spec.shape, (257, 728)) # fmt: off expected = np.array([ 0.00250445, 0.02161521, 0.06232229, 0.04339567, 0.00937727, 0.01080616, 0.00248685, 0.0095264 , 0.00727476, 0.0079152 , 0.00839946, 0.00254932, 0.00716622, 0.005559 , 0.00272623, 0.00581774, 0.01896395, 0.01829788, 0.01020514, 0.01632692, 0.00870888, 0.02065827, 0.0136022 , 0.0132382 , 0.011827 , 0.00194505, 0.0189979 , 0.026874 , 0.02194014, 0.01923883, 0.01621437, 0.00661967, 0.00289517, 0.00470257, 0.00957801, 0.00191455, 0.00431664, 0.00544359, 0.01126213, 0.00785778, 0.00423469, 0.01322504, 0.02226548, 0.02318576, 0.03428908, 0.03648811, 0.0202938 , 0.011902 , 0.03226198, 0.06347476, 0.01306318, 0.05308729, 0.05474771, 0.03127991, 0.00998512, 0.01449977, 0.01272741, 0.00868176, 0.00850386, 0.00313876, 0.00811857, 0.00538216, 0.00685749, 0.00535275, ]) # fmt: on self.assertTrue(np.allclose(spec[:64, 0], expected)) def test_spectrogram_shapes(self): waveform = self._load_datasamples(1)[0] spec = spectrogram( waveform, window_function(400, "hann"), frame_length=400, hop_length=128, power=1.0, center=True, pad_mode="reflect", onesided=True, ) self.assertEqual(spec.shape, (201, 732)) spec = spectrogram( waveform, window_function(400, "hann"), frame_length=400, hop_length=128, power=1.0, center=False, pad_mode="reflect", onesided=True, ) self.assertEqual(spec.shape, (201, 729)) spec = spectrogram( waveform, window_function(400, "hann"), frame_length=400, hop_length=128, fft_length=512, power=1.0, center=True, pad_mode="reflect", onesided=True, ) self.assertEqual(spec.shape, (257, 732)) spec = spectrogram( waveform, window_function(400, "hann", frame_length=512), frame_length=512, hop_length=64, power=1.0, center=True, pad_mode="reflect", onesided=False, ) self.assertEqual(spec.shape, (512, 1464)) spec = spectrogram( waveform, window_function(512, "hann"), frame_length=512, hop_length=64, power=1.0, center=True, pad_mode="reflect", onesided=False, ) self.assertEqual(spec.shape, (512, 1464)) spec = spectrogram( waveform, window_function(512, "hann"), frame_length=512, hop_length=512, power=1.0, center=True, pad_mode="reflect", onesided=False, ) self.assertEqual(spec.shape, (512, 183)) def test_mel_spectrogram(self): waveform = self._load_datasamples(1)[0] mel_filters = mel_filter_bank( num_frequency_bins=513, num_mel_filters=13, min_frequency=100, max_frequency=4000, sampling_rate=16000, norm=None, mel_scale="htk", ) self.assertEqual(mel_filters.shape, (513, 13)) spec = spectrogram( waveform, window_function(800, "hann", frame_length=1024), frame_length=1024, hop_length=128, power=2.0, ) self.assertEqual(spec.shape, (513, 732)) spec = spectrogram( waveform, window_function(800, "hann", frame_length=1024), frame_length=1024, hop_length=128, power=2.0, mel_filters=mel_filters, ) self.assertEqual(spec.shape, (13, 732)) # fmt: off expected = np.array([ 1.08027889e+02, 1.48080673e+01, 7.70758213e+00, 9.57676639e-01, 8.81639061e-02, 5.26073833e-02, 1.52736155e-02, 9.95350117e-03, 7.95364356e-03, 1.01148004e-02, 4.29241020e-03, 9.90708797e-03, 9.44153646e-04 ]) # fmt: on self.assertTrue(np.allclose(spec[:, 300], expected)) def test_spectrogram_power(self): waveform = self._load_datasamples(1)[0] spec = spectrogram( waveform, window_function(400, "hann", frame_length=512), frame_length=512, hop_length=128, power=None, ) self.assertEqual(spec.shape, (257, 732)) self.assertEqual(spec.dtype, np.complex64) # fmt: off expected = np.array([ 0.01452305+0.01820039j, -0.01737362-0.01641946j, 0.0121028 +0.01565081j, -0.02794554-0.03021514j, 0.04719803+0.04086519j, -0.04391563-0.02779365j, 0.05682834+0.01571325j, -0.08604821-0.02023657j, 0.07497991+0.0186641j , -0.06366091-0.00922475j, 0.11003416+0.0114788j , -0.13677941-0.01523552j, 0.10934535-0.00117226j, -0.11635598+0.02551187j, 0.14708674-0.03469823j, -0.1328196 +0.06034218j, 0.12667368-0.13973421j, -0.14764774+0.18912019j, 0.10235471-0.12181523j, -0.00773012+0.04730498j, -0.01487191-0.07312611j, -0.02739162+0.09619419j, 0.02895459-0.05398273j, 0.01198589+0.05276592j, -0.02117299-0.10123465j, 0.00666388+0.09526499j, -0.01672773-0.05649684j, 0.02723125+0.05939891j, -0.01879361-0.062954j , 0.03686557+0.04568823j, -0.07394181-0.07949649j, 0.06238583+0.13905765j, ]) # fmt: on self.assertTrue(np.allclose(spec[64:96, 321], expected)) spec = spectrogram( waveform, window_function(400, "hann", frame_length=512), frame_length=512, hop_length=128, power=1.0, ) self.assertEqual(spec.shape, (257, 732)) self.assertEqual(spec.dtype, np.float64) # fmt: off expected = np.array([ 0.02328461, 0.02390484, 0.01978448, 0.04115711, 0.0624309 , 0.05197181, 0.05896072, 0.08839577, 0.07726794, 0.06432579, 0.11063128, 0.13762532, 0.10935163, 0.11911998, 0.15112405, 0.14588428, 0.18860507, 0.23992978, 0.15910825, 0.04793241, 0.07462307, 0.10001811, 0.06125769, 0.05411011, 0.10342509, 0.09549777, 0.05892122, 0.06534349, 0.06569936, 0.05870678, 0.10856833, 0.1524107 , 0.11463385, 0.05766969, 0.12385171, 0.14472842, 0.11978184, 0.10353675, 0.07244056, 0.03461861, 0.02624896, 0.02227475, 0.01238363, 0.00885281, 0.0110049 , 0.00807005, 0.01033663, 0.01703181, 0.01445856, 0.00585615, 0.0132431 , 0.02754132, 0.01524478, 0.0204908 , 0.07453328, 0.10716327, 0.07195779, 0.08816078, 0.18340898, 0.16449876, 0.12322842, 0.1621659 , 0.12334293, 0.06033659, ]) # fmt: on self.assertTrue(np.allclose(spec[64:128, 321], expected)) spec = spectrogram( waveform, window_function(400, "hann", frame_length=512), frame_length=512, hop_length=128, power=2.0, ) self.assertEqual(spec.shape, (257, 732)) self.assertEqual(spec.dtype, np.float64) # fmt: off expected = np.array([ 5.42173162e-04, 5.71441371e-04, 3.91425507e-04, 1.69390778e-03, 3.89761780e-03, 2.70106923e-03, 3.47636663e-03, 7.81381316e-03, 5.97033510e-03, 4.13780799e-03, 1.22392802e-02, 1.89407300e-02, 1.19577805e-02, 1.41895693e-02, 2.28384770e-02, 2.12822221e-02, 3.55718732e-02, 5.75663000e-02, 2.53154356e-02, 2.29751552e-03, 5.56860259e-03, 1.00036217e-02, 3.75250424e-03, 2.92790355e-03, 1.06967501e-02, 9.11982451e-03, 3.47171025e-03, 4.26977174e-03, 4.31640586e-03, 3.44648538e-03, 1.17870830e-02, 2.32290216e-02, 1.31409196e-02, 3.32579296e-03, 1.53392460e-02, 2.09463164e-02, 1.43476883e-02, 1.07198600e-02, 5.24763530e-03, 1.19844836e-03, 6.89007982e-04, 4.96164430e-04, 1.53354369e-04, 7.83722571e-05, 1.21107812e-04, 6.51257360e-05, 1.06845939e-04, 2.90082477e-04, 2.09049831e-04, 3.42945241e-05, 1.75379610e-04, 7.58524227e-04, 2.32403356e-04, 4.19872697e-04, 5.55520924e-03, 1.14839673e-02, 5.17792348e-03, 7.77232368e-03, 3.36388536e-02, 2.70598419e-02, 1.51852425e-02, 2.62977779e-02, 1.52134784e-02, 3.64050455e-03, ]) # fmt: on self.assertTrue(np.allclose(spec[64:128, 321], expected)) def test_power_to_db(self): spectrogram = np.zeros((2, 3)) spectrogram[0, 0] = 2.0 spectrogram[0, 1] = 0.5 spectrogram[0, 2] = 0.707 spectrogram[1, 1] = 1.0 output = power_to_db(spectrogram, reference=1.0) expected = np.array([[3.01029996, -3.01029996, -1.50580586], [-100.0, 0.0, -100.0]]) self.assertTrue(np.allclose(output, expected)) output = power_to_db(spectrogram, reference=2.0) expected = np.array([[0.0, -6.02059991, -4.51610582], [-103.01029996, -3.01029996, -103.01029996]]) self.assertTrue(np.allclose(output, expected)) output = power_to_db(spectrogram, min_value=1e-6) expected = np.array([[3.01029996, -3.01029996, -1.50580586], [-60.0, 0.0, -60.0]]) self.assertTrue(np.allclose(output, expected)) output = power_to_db(spectrogram, db_range=80) expected = np.array([[3.01029996, -3.01029996, -1.50580586], [-76.98970004, 0.0, -76.98970004]]) self.assertTrue(np.allclose(output, expected)) output = power_to_db(spectrogram, reference=2.0, db_range=80) expected = np.array([[0.0, -6.02059991, -4.51610582], [-80.0, -3.01029996, -80.0]]) self.assertTrue(np.allclose(output, expected)) output = power_to_db(spectrogram, reference=2.0, min_value=1e-6, db_range=80) expected = np.array([[0.0, -6.02059991, -4.51610582], [-63.01029996, -3.01029996, -63.01029996]]) self.assertTrue(np.allclose(output, expected)) with pytest.raises(ValueError): power_to_db(spectrogram, reference=0.0) with pytest.raises(ValueError): power_to_db(spectrogram, min_value=0.0) with pytest.raises(ValueError): power_to_db(spectrogram, db_range=-80) def test_amplitude_to_db(self): spectrogram = np.zeros((2, 3)) spectrogram[0, 0] = 2.0 spectrogram[0, 1] = 0.5 spectrogram[0, 2] = 0.707 spectrogram[1, 1] = 1.0 output = amplitude_to_db(spectrogram, reference=1.0) expected = np.array([[6.02059991, -6.02059991, -3.01161172], [-100.0, 0.0, -100.0]]) self.assertTrue(np.allclose(output, expected)) output = amplitude_to_db(spectrogram, reference=2.0) expected = np.array([[0.0, -12.04119983, -9.03221164], [-106.02059991, -6.02059991, -106.02059991]]) self.assertTrue(np.allclose(output, expected)) output = amplitude_to_db(spectrogram, min_value=1e-3) expected = np.array([[6.02059991, -6.02059991, -3.01161172], [-60.0, 0.0, -60.0]]) self.assertTrue(np.allclose(output, expected)) output = amplitude_to_db(spectrogram, db_range=80) expected = np.array([[6.02059991, -6.02059991, -3.01161172], [-73.97940009, 0.0, -73.97940009]]) self.assertTrue(np.allclose(output, expected)) output = amplitude_to_db(spectrogram, reference=2.0, db_range=80) expected = np.array([[0.0, -12.04119983, -9.03221164], [-80.0, -6.02059991, -80.0]]) self.assertTrue(np.allclose(output, expected)) output = amplitude_to_db(spectrogram, reference=2.0, min_value=1e-3, db_range=80) expected = np.array([[0.0, -12.04119983, -9.03221164], [-66.02059991, -6.02059991, -66.02059991]]) self.assertTrue(np.allclose(output, expected)) with pytest.raises(ValueError): amplitude_to_db(spectrogram, reference=0.0) with pytest.raises(ValueError): amplitude_to_db(spectrogram, min_value=0.0) with pytest.raises(ValueError): amplitude_to_db(spectrogram, db_range=-80)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/utils/test_versions_utils.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib.metadata import sys from transformers.testing_utils import TestCasePlus from transformers.utils.versions import require_version, require_version_core numpy_ver = importlib.metadata.version("numpy") python_ver = ".".join([str(x) for x in sys.version_info[:3]]) class DependencyVersionCheckTest(TestCasePlus): def test_core(self): # lt + different version strings require_version_core("numpy<1000.4.5") require_version_core("numpy<1000.4") require_version_core("numpy<1000") # le require_version_core("numpy<=1000.4.5") require_version_core(f"numpy<={numpy_ver}") # eq require_version_core(f"numpy=={numpy_ver}") # ne require_version_core("numpy!=1000.4.5") # ge require_version_core("numpy>=1.0") require_version_core("numpy>=1.0.0") require_version_core(f"numpy>={numpy_ver}") # gt require_version_core("numpy>1.0.0") # mix require_version_core("numpy>1.0.0,<1000") # requirement w/o version require_version_core("numpy") # unmet requirements due to version conflict for req in ["numpy==1.0.0", "numpy>=1000.0.0", f"numpy<{numpy_ver}"]: try: require_version_core(req) except ImportError as e: self.assertIn(f"{req} is required", str(e)) self.assertIn("but found", str(e)) # unmet requirements due to missing module for req in ["numpipypie>1", "numpipypie2"]: try: require_version_core(req) except importlib.metadata.PackageNotFoundError as e: self.assertIn(f"The '{req}' distribution was not found and is required by this application", str(e)) self.assertIn("Try: pip install transformers -U", str(e)) # bogus requirements formats: # 1. whole thing for req in ["numpy??1.0.0", "numpy1.0.0"]: try: require_version_core(req) except ValueError as e: self.assertIn("requirement needs to be in the pip package format", str(e)) # 2. only operators for req in ["numpy=1.0.0", "numpy == 1.00", "numpy<>1.0.0", "numpy><1.00", "numpy>>1.0.0"]: try: require_version_core(req) except ValueError as e: self.assertIn("need one of ", str(e)) def test_python(self): # matching requirement require_version("python>=3.6.0") # not matching requirements for req in ["python>9.9.9", "python<3.0.0"]: try: require_version_core(req) except ImportError as e: self.assertIn(f"{req} is required", str(e)) self.assertIn(f"but found python=={python_ver}", str(e))
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/utils/test_logging.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class HfArgumentParserTest(unittest.TestCase): def test_set_level(self): logger = logging.get_logger() # the current default level is logging.WARNING level_origin = logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel(), logging.get_verbosity()) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel(), logging.get_verbosity()) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel(), logging.get_verbosity()) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel(), logging.get_verbosity()) # restore to the original level logging.set_verbosity(level_origin) def test_integration(self): level_origin = logging.get_verbosity() logger = logging.get_logger("transformers.models.bart.tokenization_bart") msg = "Testing 1, 2, 3" # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(logger) as cl: logger.warning(msg) self.assertEqual(cl.out, msg + "\n") # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(logger) as cl: logger.warning(msg) self.assertEqual(cl.out, "") # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(logger) as cl: logger.warning(msg) self.assertEqual(cl.out, msg + "\n") # restore to the original level logging.set_verbosity(level_origin) @mockenv(TRANSFORMERS_VERBOSITY="error") def test_env_override(self): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() # this action activates the env var _ = logging.get_logger("transformers.models.bart.tokenization_bart") env_level_str = os.getenv("TRANSFORMERS_VERBOSITY", None) env_level = logging.log_levels[env_level_str] current_level = logging.get_verbosity() self.assertEqual( env_level, current_level, f"TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}", ) # restore to the original level os.environ["TRANSFORMERS_VERBOSITY"] = "" transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY="super-error") def test_env_invalid_override(self): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() logger = logging.logging.getLogger() with CaptureLogger(logger) as cl: # this action activates the env var logging.get_logger("transformers.models.bart.tokenization_bart") self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error", cl.out) # no need to restore as nothing was changed def test_advisory_warnings(self): # testing `logger.warning_advice()` transformers.utils.logging._reset_library_root_logger() logger = logging.get_logger("transformers.models.bart.tokenization_bart") msg = "Testing 1, 2, 3" with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1"): # nothing should be logged as env var disables this method with CaptureLogger(logger) as cl: logger.warning_advice(msg) self.assertEqual(cl.out, "") with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS=""): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(logger) as cl: logger.warning_advice(msg) self.assertEqual(cl.out, msg + "\n") def test_set_progress_bar_enabled(): disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/utils/test_modeling_tf_core.py
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import copy import os import tempfile from importlib import import_module from math import isnan from transformers import is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import _tf_gpu_memory_limit, require_tf, slow from ..test_modeling_tf_common import ids_tensor if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TFSharedEmbeddings, ) if _tf_gpu_memory_limit is not None: gpus = tf.config.list_physical_devices("GPU") for gpu in gpus: # Restrict TensorFlow to only allocate x GB of memory on the GPUs try: tf.config.set_logical_device_configuration( gpu, [tf.config.LogicalDeviceConfiguration(memory_limit=_tf_gpu_memory_limit)] ) logical_gpus = tf.config.list_logical_devices("GPU") print("Logical GPUs", logical_gpus) except RuntimeError as e: # Virtual devices must be set before GPUs have been initialized print(e) @require_tf class TFCoreModelTesterMixin: model_tester = None all_model_classes = () all_generative_model_classes = () test_mismatched_shapes = True test_resize_embeddings = True test_head_masking = True is_encoder_decoder = False def _prepare_for_class(self, inputs_dict, model_class, return_labels=False) -> dict: inputs_dict = copy.deepcopy(inputs_dict) if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict = { k: tf.tile(tf.expand_dims(v, 1), (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1)) if isinstance(v, tf.Tensor) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict["labels"] = tf.ones(self.model_tester.batch_size, dtype=tf.int32) elif model_class in get_values(TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING): inputs_dict["start_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) inputs_dict["end_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) elif model_class in [ *get_values(TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING), *get_values(TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING), ]: inputs_dict["labels"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) elif model_class in get_values(TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING): inputs_dict["next_sentence_label"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) elif model_class in [ *get_values(TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING), *get_values(TF_MODEL_FOR_CAUSAL_LM_MAPPING), *get_values(TF_MODEL_FOR_MASKED_LM_MAPPING), *get_values(TF_MODEL_FOR_PRETRAINING_MAPPING), *get_values(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING), ]: inputs_dict["labels"] = tf.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32 ) return inputs_dict @slow def test_graph_mode(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:2]: inputs = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @tf.function def run_in_graph_mode(): return model(inputs) outputs = run_in_graph_mode() self.assertIsNotNone(outputs) @slow def test_xla_mode(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:2]: inputs = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @tf.function(experimental_compile=True) def run_in_graph_mode(): return model(inputs) outputs = run_in_graph_mode() self.assertIsNotNone(outputs) @slow def test_xla_fit(self): # This is a copy of the test_keras_fit method, but we use XLA compilation instead of eager config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:2]: model = model_class(config) if getattr(model, "hf_compute_loss", None): # Test that model correctly compute the loss with kwargs prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) # Is there a better way to remove these decoder inputs? prepared_for_class = { key: val for key, val in prepared_for_class.items() if key not in ("head_mask", "decoder_head_mask", "cross_attn_head_mask", "decoder_input_ids") } possible_label_cols = { "labels", "label", "label_ids", "start_positions", "start_position", "end_positions", "end_position", "next_sentence_label", } label_names = possible_label_cols.intersection(set(prepared_for_class)) self.assertGreater(len(label_names), 0, msg="No matching label names found!") labels = {key: val for key, val in prepared_for_class.items() if key in label_names} inputs_minus_labels = {key: val for key, val in prepared_for_class.items() if key not in label_names} self.assertGreater(len(inputs_minus_labels), 0) # Make sure it works with XLA! model.compile(optimizer=tf.keras.optimizers.SGD(0.0), jit_compile=True) # Make sure the model fits without crashing regardless of where we pass the labels history = model.fit( prepared_for_class, validation_data=prepared_for_class, steps_per_epoch=1, validation_steps=1, shuffle=False, verbose=0, ) loss = history.history["loss"][0] self.assertTrue(not isnan(loss)) val_loss = history.history["val_loss"][0] self.assertTrue(not isnan(val_loss)) # Now test it with separate labels, to make sure that path works in XLA too. model = model_class(config) model.compile(optimizer=tf.keras.optimizers.SGD(0.0), jit_compile=True) history = model.fit( inputs_minus_labels, labels, validation_data=(inputs_minus_labels, labels), steps_per_epoch=1, validation_steps=1, shuffle=False, verbose=0, ) loss = history.history["loss"][0] self.assertTrue(not isnan(loss)) val_loss = history.history["val_loss"][0] self.assertTrue(not isnan(val_loss)) @slow def test_saved_model_creation_extended(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True if hasattr(config, "use_cache"): config.use_cache = True encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) for model_class in self.all_model_classes[:2]: class_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) model.build() num_out = len(model(class_inputs_dict)) for key in list(class_inputs_dict.keys()): # Remove keys not in the serving signature, as the SavedModel will not be compiled to deal with them if key not in model.input_signature: del class_inputs_dict[key] # Check it's a tensor, in case the inputs dict has some bools in it too elif isinstance(class_inputs_dict[key], tf.Tensor) and class_inputs_dict[key].dtype.is_integer: class_inputs_dict[key] = tf.cast(class_inputs_dict[key], tf.int32) if set(class_inputs_dict.keys()) != set(model.input_signature.keys()): continue # Some models have inputs that the preparation functions don't create, we skip those with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, saved_model=True) saved_model_dir = os.path.join(tmpdirname, "saved_model", "1") model = tf.keras.models.load_model(saved_model_dir) outputs = model(class_inputs_dict) if self.is_encoder_decoder: output_hidden_states = outputs["encoder_hidden_states"] output_attentions = outputs["encoder_attentions"] else: output_hidden_states = outputs["hidden_states"] output_attentions = outputs["attentions"] self.assertEqual(len(outputs), num_out) expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(output_hidden_states), expected_num_layers) self.assertListEqual( list(output_hidden_states[0].shape[-2:]), [self.model_tester.seq_length, self.model_tester.hidden_size], ) self.assertEqual(len(output_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(output_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) @slow def test_mixed_precision(self): tf.keras.mixed_precision.set_global_policy("mixed_float16") # try/finally block to ensure subsequent tests run in float32 try: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:2]: class_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) outputs = model(class_inputs_dict) self.assertIsNotNone(outputs) finally: tf.keras.mixed_precision.set_global_policy("float32") @slow def test_train_pipeline_custom_model(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # head_mask and decoder_head_mask has different shapes than other input args if "head_mask" in inputs_dict: del inputs_dict["head_mask"] if "decoder_head_mask" in inputs_dict: del inputs_dict["decoder_head_mask"] if "cross_attn_head_mask" in inputs_dict: del inputs_dict["cross_attn_head_mask"] tf_main_layer_classes = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__),) for module_member_name in dir(module) if module_member_name.endswith("MainLayer") for module_member in (getattr(module, module_member_name),) if isinstance(module_member, type) and tf.keras.layers.Layer in module_member.__bases__ and getattr(module_member, "_keras_serializable", False) } for main_layer_class in tf_main_layer_classes: # T5MainLayer needs an embed_tokens parameter when called without the inputs_embeds parameter if "T5" in main_layer_class.__name__: # Take the same values than in TFT5ModelTester for this shared layer shared = TFSharedEmbeddings(self.model_tester.vocab_size, self.model_tester.hidden_size, name="shared") config.use_cache = False main_layer = main_layer_class(config, embed_tokens=shared) else: main_layer = main_layer_class(config) symbolic_inputs = { name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype) for name, tensor in inputs_dict.items() } if hasattr(self.model_tester, "num_labels"): num_labels = self.model_tester.num_labels else: num_labels = 2 X = tf.data.Dataset.from_tensor_slices( (inputs_dict, np.ones((self.model_tester.batch_size, self.model_tester.seq_length, num_labels, 1))) ).batch(1) hidden_states = main_layer(symbolic_inputs)[0] outputs = tf.keras.layers.Dense(num_labels, activation="softmax", name="outputs")(hidden_states) model = tf.keras.models.Model(inputs=symbolic_inputs, outputs=[outputs]) model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["binary_accuracy"]) model.fit(X, epochs=1) with tempfile.TemporaryDirectory() as tmpdirname: filepath = os.path.join(tmpdirname, "keras_model.h5") model.save(filepath) if "T5" in main_layer_class.__name__: model = tf.keras.models.load_model( filepath, custom_objects={ main_layer_class.__name__: main_layer_class, "TFSharedEmbeddings": TFSharedEmbeddings, }, ) else: model = tf.keras.models.load_model( filepath, custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(model, tf.keras.Model) model(inputs_dict) @slow def test_graph_mode_with_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:2]: model = model_class(config) inputs = copy.deepcopy(inputs_dict) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) if not self.is_encoder_decoder: inputs["inputs_embeds"] = model.get_input_embeddings()(input_ids) else: inputs["inputs_embeds"] = model.get_input_embeddings()(encoder_input_ids) inputs["decoder_inputs_embeds"] = model.get_input_embeddings()(decoder_input_ids) inputs = self._prepare_for_class(inputs, model_class) @tf.function def run_in_graph_mode(): return model(inputs) outputs = run_in_graph_mode() self.assertIsNotNone(outputs) def _generate_random_bad_tokens(self, num_bad_tokens, model): # special tokens cannot be bad tokens special_tokens = [] if model.config.bos_token_id is not None: special_tokens.append(model.config.bos_token_id) if model.config.pad_token_id is not None: special_tokens.append(model.config.pad_token_id) if model.config.eos_token_id is not None: special_tokens.append(model.config.eos_token_id) # create random bad tokens that are not special tokens bad_tokens = [] while len(bad_tokens) < num_bad_tokens: token = tf.squeeze(ids_tensor((1, 1), self.model_tester.vocab_size), 0).numpy()[0] if token not in special_tokens: bad_tokens.append(token) return bad_tokens def _check_generated_ids(self, output_ids): for token_id in output_ids[0].numpy().tolist(): self.assertGreaterEqual(token_id, 0) self.assertLess(token_id, self.model_tester.vocab_size) def _check_match_tokens(self, generated_ids, bad_words_ids): # for all bad word tokens for bad_word_ids in bad_words_ids: # for all slices in batch for generated_ids_slice in generated_ids: # for all word idx for i in range(len(bad_word_ids), len(generated_ids_slice)): # if tokens match if generated_ids_slice[i - len(bad_word_ids) : i] == bad_word_ids: return True return False
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/utils/test_dynamic_module_utils.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pytest from transformers.dynamic_module_utils import get_imports TOP_LEVEL_IMPORT = """ import os """ IMPORT_IN_FUNCTION = """ def foo(): import os return False """ DEEPLY_NESTED_IMPORT = """ def foo(): def bar(): if True: import os return False return bar() """ TOP_LEVEL_TRY_IMPORT = """ import os try: import bar except ImportError: raise ValueError() """ TRY_IMPORT_IN_FUNCTION = """ import os def foo(): try: import bar except ImportError: raise ValueError() """ MULTIPLE_EXCEPTS_IMPORT = """ import os try: import bar except (ImportError, AttributeError): raise ValueError() """ EXCEPT_AS_IMPORT = """ import os try: import bar except ImportError as e: raise ValueError() """ GENERIC_EXCEPT_IMPORT = """ import os try: import bar except: raise ValueError() """ MULTILINE_TRY_IMPORT = """ import os try: import bar import baz except ImportError: raise ValueError() """ MULTILINE_BOTH_IMPORT = """ import os try: import bar import baz except ImportError: x = 1 raise ValueError() """ CASES = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize("case", CASES) def test_import_parsing(tmp_path, case): tmp_file_path = os.path.join(tmp_path, "test_file.py") with open(tmp_file_path, "w") as _tmp_file: _tmp_file.write(case) parsed_imports = get_imports(tmp_file_path) assert parsed_imports == ["os"]
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/utils/test_cli.py
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import shutil import unittest from unittest.mock import patch from transformers.testing_utils import CaptureStd, is_pt_tf_cross_test, require_torch class CLITest(unittest.TestCase): @patch("sys.argv", ["fakeprogrampath", "env"]) def test_cli_env(self): # test transformers-cli env import transformers.commands.transformers_cli with CaptureStd() as cs: transformers.commands.transformers_cli.main() self.assertIn("Python version", cs.out) self.assertIn("Platform", cs.out) self.assertIn("Using distributed or parallel set-up in script?", cs.out) @is_pt_tf_cross_test @patch( "sys.argv", ["fakeprogrampath", "pt-to-tf", "--model-name", "hf-internal-testing/tiny-random-gptj", "--no-pr"] ) def test_cli_pt_to_tf(self): import transformers.commands.transformers_cli shutil.rmtree("/tmp/hf-internal-testing/tiny-random-gptj", ignore_errors=True) # cleans potential past runs transformers.commands.transformers_cli.main() self.assertTrue(os.path.exists("/tmp/hf-internal-testing/tiny-random-gptj/tf_model.h5")) @require_torch @patch("sys.argv", ["fakeprogrampath", "download", "hf-internal-testing/tiny-random-gptj", "--cache-dir", "/tmp"]) def test_cli_download(self): import transformers.commands.transformers_cli # # remove any previously downloaded model to start clean shutil.rmtree("/tmp/models--hf-internal-testing--tiny-random-gptj", ignore_errors=True) # run the command transformers.commands.transformers_cli.main() # check if the model files are downloaded correctly on /tmp/models--hf-internal-testing--tiny-random-gptj self.assertTrue(os.path.exists("/tmp/models--hf-internal-testing--tiny-random-gptj/blobs")) self.assertTrue(os.path.exists("/tmp/models--hf-internal-testing--tiny-random-gptj/refs")) self.assertTrue(os.path.exists("/tmp/models--hf-internal-testing--tiny-random-gptj/snapshots")) @require_torch @patch( "sys.argv", [ "fakeprogrampath", "download", "hf-internal-testing/test_dynamic_model_with_tokenizer", "--trust-remote-code", "--cache-dir", "/tmp", ], ) def test_cli_download_trust_remote(self): import transformers.commands.transformers_cli # # remove any previously downloaded model to start clean shutil.rmtree("/tmp/models--hf-internal-testing--test_dynamic_model_with_tokenizer", ignore_errors=True) # run the command transformers.commands.transformers_cli.main() # check if the model files are downloaded correctly on /tmp/models--hf-internal-testing--test_dynamic_model_with_tokenizer self.assertTrue(os.path.exists("/tmp/models--hf-internal-testing--test_dynamic_model_with_tokenizer/blobs")) self.assertTrue(os.path.exists("/tmp/models--hf-internal-testing--test_dynamic_model_with_tokenizer/refs")) self.assertTrue( os.path.exists("/tmp/models--hf-internal-testing--test_dynamic_model_with_tokenizer/snapshots") )
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/utils/test_model_card.py
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import tempfile import unittest from transformers.modelcard import ModelCard class ModelCardTester(unittest.TestCase): def setUp(self): self.inputs_dict = { "model_details": { "Organization": "testing", "Model date": "today", "Model version": "v2.1, Developed by Test Corp in 2019.", "Architecture": "Convolutional Neural Network.", }, "metrics": "BLEU and ROUGE-1", "evaluation_data": { "Datasets": {"BLEU": "My-great-dataset-v1", "ROUGE-1": "My-short-dataset-v2.1"}, "Preprocessing": "See details on https://arxiv.org/pdf/1810.03993.pdf", }, "training_data": { "Dataset": "English Wikipedia dump dated 2018-12-01", "Preprocessing": ( "Using SentencePiece vocabulary of size 52k tokens. See details on" " https://arxiv.org/pdf/1810.03993.pdf" ), }, "quantitative_analyses": {"BLEU": 55.1, "ROUGE-1": 76}, } def test_model_card_common_properties(self): modelcard = ModelCard.from_dict(self.inputs_dict) self.assertTrue(hasattr(modelcard, "model_details")) self.assertTrue(hasattr(modelcard, "intended_use")) self.assertTrue(hasattr(modelcard, "factors")) self.assertTrue(hasattr(modelcard, "metrics")) self.assertTrue(hasattr(modelcard, "evaluation_data")) self.assertTrue(hasattr(modelcard, "training_data")) self.assertTrue(hasattr(modelcard, "quantitative_analyses")) self.assertTrue(hasattr(modelcard, "ethical_considerations")) self.assertTrue(hasattr(modelcard, "caveats_and_recommendations")) def test_model_card_to_json_string(self): modelcard = ModelCard.from_dict(self.inputs_dict) obj = json.loads(modelcard.to_json_string()) for key, value in self.inputs_dict.items(): self.assertEqual(obj[key], value) def test_model_card_to_json_file(self): model_card_first = ModelCard.from_dict(self.inputs_dict) with tempfile.TemporaryDirectory() as tmpdirname: filename = os.path.join(tmpdirname, "modelcard.json") model_card_first.to_json_file(filename) model_card_second = ModelCard.from_json_file(filename) self.assertEqual(model_card_second.to_dict(), model_card_first.to_dict()) def test_model_card_from_and_save_pretrained(self): model_card_first = ModelCard.from_dict(self.inputs_dict) with tempfile.TemporaryDirectory() as tmpdirname: model_card_first.save_pretrained(tmpdirname) model_card_second = ModelCard.from_pretrained(tmpdirname) self.assertEqual(model_card_second.to_dict(), model_card_first.to_dict())
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/utils/test_doc_samples.py
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow logger = logging.getLogger() @unittest.skip("Temporarily disable the doc tests.") @require_torch @require_tf @slow class TestCodeExamples(unittest.TestCase): def analyze_directory( self, directory: Path, identifier: Union[str, None] = None, ignore_files: Union[List[str], None] = None, n_identifier: Union[str, List[str], None] = None, only_modules: bool = True, ): """ Runs through the specific directory, looking for the files identified with `identifier`. Executes the doctests in those files Args: directory (`Path`): Directory containing the files identifier (`str`): Will parse files containing this ignore_files (`List[str]`): List of files to skip n_identifier (`str` or `List[str]`): Will not parse files containing this/these identifiers. only_modules (`bool`): Whether to only analyze modules """ files = [file for file in os.listdir(directory) if os.path.isfile(os.path.join(directory, file))] if identifier is not None: files = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(n_identifier, List): for n_ in n_identifier: files = [file for file in files if n_ not in file] else: files = [file for file in files if n_identifier not in file] ignore_files = ignore_files or [] ignore_files.append("__init__.py") files = [file for file in files if file not in ignore_files] for file in files: # Open all files print("Testing", file) if only_modules: module_identifier = file.split(".")[0] try: module_identifier = getattr(transformers, module_identifier) suite = doctest.DocTestSuite(module_identifier) result = unittest.TextTestRunner().run(suite) self.assertIs(len(result.failures), 0) except AttributeError: logger.info(f"{module_identifier} is not a module.") else: result = doctest.testfile(str(".." / directory / file), optionflags=doctest.ELLIPSIS) self.assertIs(result.failed, 0) def test_modeling_examples(self): transformers_directory = Path("src/transformers") files = "modeling" ignore_files = [ "modeling_ctrl.py", "modeling_tf_ctrl.py", ] self.analyze_directory(transformers_directory, identifier=files, ignore_files=ignore_files) def test_tokenization_examples(self): transformers_directory = Path("src/transformers") files = "tokenization" self.analyze_directory(transformers_directory, identifier=files) def test_configuration_examples(self): transformers_directory = Path("src/transformers") files = "configuration" self.analyze_directory(transformers_directory, identifier=files) def test_remaining_examples(self): transformers_directory = Path("src/transformers") n_identifiers = ["configuration", "modeling", "tokenization"] self.analyze_directory(transformers_directory, n_identifier=n_identifiers) def test_doc_sources(self): doc_source_directory = Path("docs/source") ignore_files = ["favicon.ico"] self.analyze_directory(doc_source_directory, ignore_files=ignore_files, only_modules=False)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/utils/test_image_utils.py
# coding=utf-8 # Copyright 2021 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import unittest import datasets import numpy as np import pytest from huggingface_hub.file_download import http_get from requests import ConnectTimeout, ReadTimeout from tests.pipelines.test_pipelines_document_question_answering import INVOICE_URL from transformers import is_torch_available, is_vision_available from transformers.image_utils import ChannelDimension, get_channel_dimension_axis, make_list_of_images from transformers.testing_utils import is_flaky, require_torch, require_vision if is_torch_available(): import torch if is_vision_available(): import PIL.Image from transformers import ImageFeatureExtractionMixin from transformers.image_utils import get_image_size, infer_channel_dimension_format, load_image def get_random_image(height, width): random_array = np.random.randint(0, 256, (height, width, 3), dtype=np.uint8) return PIL.Image.fromarray(random_array) @require_vision class ImageFeatureExtractionTester(unittest.TestCase): def test_conversion_image_to_array(self): feature_extractor = ImageFeatureExtractionMixin() image = get_random_image(16, 32) # Conversion with defaults (rescale + channel first) array1 = feature_extractor.to_numpy_array(image) self.assertTrue(array1.dtype, np.float32) self.assertEqual(array1.shape, (3, 16, 32)) # Conversion with rescale and not channel first array2 = feature_extractor.to_numpy_array(image, channel_first=False) self.assertTrue(array2.dtype, np.float32) self.assertEqual(array2.shape, (16, 32, 3)) self.assertTrue(np.array_equal(array1, array2.transpose(2, 0, 1))) # Conversion with no rescale and channel first array3 = feature_extractor.to_numpy_array(image, rescale=False) self.assertTrue(array3.dtype, np.uint8) self.assertEqual(array3.shape, (3, 16, 32)) self.assertTrue(np.array_equal(array1, array3.astype(np.float32) * (1 / 255.0))) # Conversion with no rescale and not channel first array4 = feature_extractor.to_numpy_array(image, rescale=False, channel_first=False) self.assertTrue(array4.dtype, np.uint8) self.assertEqual(array4.shape, (16, 32, 3)) self.assertTrue(np.array_equal(array2, array4.astype(np.float32) * (1 / 255.0))) def test_conversion_array_to_array(self): feature_extractor = ImageFeatureExtractionMixin() array = np.random.randint(0, 256, (16, 32, 3), dtype=np.uint8) # By default, rescale (for an array of ints) and channel permute array1 = feature_extractor.to_numpy_array(array) self.assertTrue(array1.dtype, np.float32) self.assertEqual(array1.shape, (3, 16, 32)) self.assertTrue(np.array_equal(array1, array.transpose(2, 0, 1).astype(np.float32) * (1 / 255.0))) # Same with no permute array2 = feature_extractor.to_numpy_array(array, channel_first=False) self.assertTrue(array2.dtype, np.float32) self.assertEqual(array2.shape, (16, 32, 3)) self.assertTrue(np.array_equal(array2, array.astype(np.float32) * (1 / 255.0))) # Force rescale to False array3 = feature_extractor.to_numpy_array(array, rescale=False) self.assertTrue(array3.dtype, np.uint8) self.assertEqual(array3.shape, (3, 16, 32)) self.assertTrue(np.array_equal(array3, array.transpose(2, 0, 1))) # Force rescale to False and no channel permute array4 = feature_extractor.to_numpy_array(array, rescale=False, channel_first=False) self.assertTrue(array4.dtype, np.uint8) self.assertEqual(array4.shape, (16, 32, 3)) self.assertTrue(np.array_equal(array4, array)) # Now test the default rescale for a float array (defaults to False) array5 = feature_extractor.to_numpy_array(array2) self.assertTrue(array5.dtype, np.float32) self.assertEqual(array5.shape, (3, 16, 32)) self.assertTrue(np.array_equal(array5, array1)) def test_make_list_of_images_numpy(self): # Test a single image is converted to a list of 1 image images = np.random.randint(0, 256, (16, 32, 3)) images_list = make_list_of_images(images) self.assertEqual(len(images_list), 1) self.assertTrue(np.array_equal(images_list[0], images)) self.assertIsInstance(images_list, list) # Test a batch of images is converted to a list of images images = np.random.randint(0, 256, (4, 16, 32, 3)) images_list = make_list_of_images(images) self.assertEqual(len(images_list), 4) self.assertTrue(np.array_equal(images_list[0], images[0])) self.assertIsInstance(images_list, list) # Test a list of images is not modified images = [np.random.randint(0, 256, (16, 32, 3)) for _ in range(4)] images_list = make_list_of_images(images) self.assertEqual(len(images_list), 4) self.assertTrue(np.array_equal(images_list[0], images[0])) self.assertIsInstance(images_list, list) # Test batched masks with no channel dimension are converted to a list of masks masks = np.random.randint(0, 2, (4, 16, 32)) masks_list = make_list_of_images(masks, expected_ndims=2) self.assertEqual(len(masks_list), 4) self.assertTrue(np.array_equal(masks_list[0], masks[0])) self.assertIsInstance(masks_list, list) @require_torch def test_make_list_of_images_torch(self): # Test a single image is converted to a list of 1 image images = torch.randint(0, 256, (16, 32, 3)) images_list = make_list_of_images(images) self.assertEqual(len(images_list), 1) self.assertTrue(np.array_equal(images_list[0], images)) self.assertIsInstance(images_list, list) # Test a batch of images is converted to a list of images images = torch.randint(0, 256, (4, 16, 32, 3)) images_list = make_list_of_images(images) self.assertEqual(len(images_list), 4) self.assertTrue(np.array_equal(images_list[0], images[0])) self.assertIsInstance(images_list, list) # Test a list of images is left unchanged images = [torch.randint(0, 256, (16, 32, 3)) for _ in range(4)] images_list = make_list_of_images(images) self.assertEqual(len(images_list), 4) self.assertTrue(np.array_equal(images_list[0], images[0])) self.assertIsInstance(images_list, list) @require_torch def test_conversion_torch_to_array(self): feature_extractor = ImageFeatureExtractionMixin() tensor = torch.randint(0, 256, (16, 32, 3)) array = tensor.numpy() # By default, rescale (for a tensor of ints) and channel permute array1 = feature_extractor.to_numpy_array(array) self.assertTrue(array1.dtype, np.float32) self.assertEqual(array1.shape, (3, 16, 32)) self.assertTrue(np.array_equal(array1, array.transpose(2, 0, 1).astype(np.float32) * (1 / 255.0))) # Same with no permute array2 = feature_extractor.to_numpy_array(array, channel_first=False) self.assertTrue(array2.dtype, np.float32) self.assertEqual(array2.shape, (16, 32, 3)) self.assertTrue(np.array_equal(array2, array.astype(np.float32) * (1 / 255.0))) # Force rescale to False array3 = feature_extractor.to_numpy_array(array, rescale=False) self.assertTrue(array3.dtype, np.uint8) self.assertEqual(array3.shape, (3, 16, 32)) self.assertTrue(np.array_equal(array3, array.transpose(2, 0, 1))) # Force rescale to False and no channel permute array4 = feature_extractor.to_numpy_array(array, rescale=False, channel_first=False) self.assertTrue(array4.dtype, np.uint8) self.assertEqual(array4.shape, (16, 32, 3)) self.assertTrue(np.array_equal(array4, array)) # Now test the default rescale for a float tensor (defaults to False) array5 = feature_extractor.to_numpy_array(array2) self.assertTrue(array5.dtype, np.float32) self.assertEqual(array5.shape, (3, 16, 32)) self.assertTrue(np.array_equal(array5, array1)) def test_conversion_image_to_image(self): feature_extractor = ImageFeatureExtractionMixin() image = get_random_image(16, 32) # On an image, `to_pil_image1` is a noop. image1 = feature_extractor.to_pil_image(image) self.assertTrue(isinstance(image, PIL.Image.Image)) self.assertTrue(np.array_equal(np.array(image), np.array(image1))) def test_conversion_array_to_image(self): feature_extractor = ImageFeatureExtractionMixin() array = np.random.randint(0, 256, (16, 32, 3), dtype=np.uint8) # By default, no rescale (for an array of ints) image1 = feature_extractor.to_pil_image(array) self.assertTrue(isinstance(image1, PIL.Image.Image)) self.assertTrue(np.array_equal(np.array(image1), array)) # If the array is channel-first, proper reordering of the channels is done. image2 = feature_extractor.to_pil_image(array.transpose(2, 0, 1)) self.assertTrue(isinstance(image2, PIL.Image.Image)) self.assertTrue(np.array_equal(np.array(image2), array)) # If the array has floating type, it's rescaled by default. image3 = feature_extractor.to_pil_image(array.astype(np.float32) * (1 / 255.0)) self.assertTrue(isinstance(image3, PIL.Image.Image)) self.assertTrue(np.array_equal(np.array(image3), array)) # You can override the default to rescale. image4 = feature_extractor.to_pil_image(array.astype(np.float32), rescale=False) self.assertTrue(isinstance(image4, PIL.Image.Image)) self.assertTrue(np.array_equal(np.array(image4), array)) # And with floats + channel first. image5 = feature_extractor.to_pil_image(array.transpose(2, 0, 1).astype(np.float32) * (1 / 255.0)) self.assertTrue(isinstance(image5, PIL.Image.Image)) self.assertTrue(np.array_equal(np.array(image5), array)) @require_torch def test_conversion_tensor_to_image(self): feature_extractor = ImageFeatureExtractionMixin() tensor = torch.randint(0, 256, (16, 32, 3)) array = tensor.numpy() # By default, no rescale (for a tensor of ints) image1 = feature_extractor.to_pil_image(tensor) self.assertTrue(isinstance(image1, PIL.Image.Image)) self.assertTrue(np.array_equal(np.array(image1), array)) # If the tensor is channel-first, proper reordering of the channels is done. image2 = feature_extractor.to_pil_image(tensor.permute(2, 0, 1)) self.assertTrue(isinstance(image2, PIL.Image.Image)) self.assertTrue(np.array_equal(np.array(image2), array)) # If the tensor has floating type, it's rescaled by default. image3 = feature_extractor.to_pil_image(tensor.float() / 255.0) self.assertTrue(isinstance(image3, PIL.Image.Image)) self.assertTrue(np.array_equal(np.array(image3), array)) # You can override the default to rescale. image4 = feature_extractor.to_pil_image(tensor.float(), rescale=False) self.assertTrue(isinstance(image4, PIL.Image.Image)) self.assertTrue(np.array_equal(np.array(image4), array)) # And with floats + channel first. image5 = feature_extractor.to_pil_image(tensor.permute(2, 0, 1).float() * (1 / 255.0)) self.assertTrue(isinstance(image5, PIL.Image.Image)) self.assertTrue(np.array_equal(np.array(image5), array)) def test_resize_image_and_array(self): feature_extractor = ImageFeatureExtractionMixin() image = get_random_image(16, 32) array = np.array(image) # Size can be an int or a tuple of ints. resized_image = feature_extractor.resize(image, 8) self.assertTrue(isinstance(resized_image, PIL.Image.Image)) self.assertEqual(resized_image.size, (8, 8)) resized_image1 = feature_extractor.resize(image, (8, 16)) self.assertTrue(isinstance(resized_image1, PIL.Image.Image)) self.assertEqual(resized_image1.size, (8, 16)) # Passing an array converts it to a PIL Image. resized_image2 = feature_extractor.resize(array, 8) self.assertTrue(isinstance(resized_image2, PIL.Image.Image)) self.assertEqual(resized_image2.size, (8, 8)) self.assertTrue(np.array_equal(np.array(resized_image), np.array(resized_image2))) resized_image3 = feature_extractor.resize(image, (8, 16)) self.assertTrue(isinstance(resized_image3, PIL.Image.Image)) self.assertEqual(resized_image3.size, (8, 16)) self.assertTrue(np.array_equal(np.array(resized_image1), np.array(resized_image3))) def test_resize_image_and_array_non_default_to_square(self): feature_extractor = ImageFeatureExtractionMixin() heights_widths = [ # height, width # square image (28, 28), (27, 27), # rectangular image: h < w (28, 34), (29, 35), # rectangular image: h > w (34, 28), (35, 29), ] # single integer or single integer in tuple/list sizes = [22, 27, 28, 36, [22], (27,)] for (height, width), size in zip(heights_widths, sizes): for max_size in (None, 37, 1000): image = get_random_image(height, width) array = np.array(image) size = size[0] if isinstance(size, (list, tuple)) else size # Size can be an int or a tuple of ints. # If size is an int, smaller edge of the image will be matched to this number. # i.e, if height > width, then image will be rescaled to (size * height / width, size). if height < width: exp_w, exp_h = (int(size * width / height), size) if max_size is not None and max_size < exp_w: exp_w, exp_h = max_size, int(max_size * exp_h / exp_w) elif width < height: exp_w, exp_h = (size, int(size * height / width)) if max_size is not None and max_size < exp_h: exp_w, exp_h = int(max_size * exp_w / exp_h), max_size else: exp_w, exp_h = (size, size) if max_size is not None and max_size < size: exp_w, exp_h = max_size, max_size resized_image = feature_extractor.resize(image, size=size, default_to_square=False, max_size=max_size) self.assertTrue(isinstance(resized_image, PIL.Image.Image)) self.assertEqual(resized_image.size, (exp_w, exp_h)) # Passing an array converts it to a PIL Image. resized_image2 = feature_extractor.resize(array, size=size, default_to_square=False, max_size=max_size) self.assertTrue(isinstance(resized_image2, PIL.Image.Image)) self.assertEqual(resized_image2.size, (exp_w, exp_h)) self.assertTrue(np.array_equal(np.array(resized_image), np.array(resized_image2))) @require_torch def test_resize_tensor(self): feature_extractor = ImageFeatureExtractionMixin() tensor = torch.randint(0, 256, (16, 32, 3)) array = tensor.numpy() # Size can be an int or a tuple of ints. resized_image = feature_extractor.resize(tensor, 8) self.assertTrue(isinstance(resized_image, PIL.Image.Image)) self.assertEqual(resized_image.size, (8, 8)) resized_image1 = feature_extractor.resize(tensor, (8, 16)) self.assertTrue(isinstance(resized_image1, PIL.Image.Image)) self.assertEqual(resized_image1.size, (8, 16)) # Check we get the same results as with NumPy arrays. resized_image2 = feature_extractor.resize(array, 8) self.assertTrue(np.array_equal(np.array(resized_image), np.array(resized_image2))) resized_image3 = feature_extractor.resize(array, (8, 16)) self.assertTrue(np.array_equal(np.array(resized_image1), np.array(resized_image3))) def test_normalize_image(self): feature_extractor = ImageFeatureExtractionMixin() image = get_random_image(16, 32) array = np.array(image) mean = [0.1, 0.5, 0.9] std = [0.2, 0.4, 0.6] # PIL Image are converted to NumPy arrays for the normalization normalized_image = feature_extractor.normalize(image, mean, std) self.assertTrue(isinstance(normalized_image, np.ndarray)) self.assertEqual(normalized_image.shape, (3, 16, 32)) # During the conversion rescale and channel first will be applied. expected = array.transpose(2, 0, 1).astype(np.float32) * (1 / 255.0) np_mean = np.array(mean).astype(np.float32)[:, None, None] np_std = np.array(std).astype(np.float32)[:, None, None] expected = (expected - np_mean) / np_std self.assertTrue(np.array_equal(normalized_image, expected)) def test_normalize_array(self): feature_extractor = ImageFeatureExtractionMixin() array = np.random.random((16, 32, 3)) mean = [0.1, 0.5, 0.9] std = [0.2, 0.4, 0.6] # mean and std can be passed as lists or NumPy arrays. expected = (array - np.array(mean)) / np.array(std) normalized_array = feature_extractor.normalize(array, mean, std) self.assertTrue(np.array_equal(normalized_array, expected)) normalized_array = feature_extractor.normalize(array, np.array(mean), np.array(std)) self.assertTrue(np.array_equal(normalized_array, expected)) # Normalize will detect automatically if channel first or channel last is used. array = np.random.random((3, 16, 32)) expected = (array - np.array(mean)[:, None, None]) / np.array(std)[:, None, None] normalized_array = feature_extractor.normalize(array, mean, std) self.assertTrue(np.array_equal(normalized_array, expected)) normalized_array = feature_extractor.normalize(array, np.array(mean), np.array(std)) self.assertTrue(np.array_equal(normalized_array, expected)) @require_torch def test_normalize_tensor(self): feature_extractor = ImageFeatureExtractionMixin() tensor = torch.rand(16, 32, 3) mean = [0.1, 0.5, 0.9] std = [0.2, 0.4, 0.6] # mean and std can be passed as lists or tensors. expected = (tensor - torch.tensor(mean)) / torch.tensor(std) normalized_tensor = feature_extractor.normalize(tensor, mean, std) self.assertTrue(torch.equal(normalized_tensor, expected)) normalized_tensor = feature_extractor.normalize(tensor, torch.tensor(mean), torch.tensor(std)) self.assertTrue(torch.equal(normalized_tensor, expected)) # Normalize will detect automatically if channel first or channel last is used. tensor = torch.rand(3, 16, 32) expected = (tensor - torch.tensor(mean)[:, None, None]) / torch.tensor(std)[:, None, None] normalized_tensor = feature_extractor.normalize(tensor, mean, std) self.assertTrue(torch.equal(normalized_tensor, expected)) normalized_tensor = feature_extractor.normalize(tensor, torch.tensor(mean), torch.tensor(std)) self.assertTrue(torch.equal(normalized_tensor, expected)) def test_center_crop_image(self): feature_extractor = ImageFeatureExtractionMixin() image = get_random_image(16, 32) # Test various crop sizes: bigger on all dimensions, on one of the dimensions only and on both dimensions. crop_sizes = [8, (8, 64), 20, (32, 64)] for size in crop_sizes: cropped_image = feature_extractor.center_crop(image, size) self.assertTrue(isinstance(cropped_image, PIL.Image.Image)) # PIL Image.size is transposed compared to NumPy or PyTorch (width first instead of height first). expected_size = (size, size) if isinstance(size, int) else (size[1], size[0]) self.assertEqual(cropped_image.size, expected_size) def test_center_crop_array(self): feature_extractor = ImageFeatureExtractionMixin() image = get_random_image(16, 32) array = feature_extractor.to_numpy_array(image) # Test various crop sizes: bigger on all dimensions, on one of the dimensions only and on both dimensions. crop_sizes = [8, (8, 64), 20, (32, 64)] for size in crop_sizes: cropped_array = feature_extractor.center_crop(array, size) self.assertTrue(isinstance(cropped_array, np.ndarray)) expected_size = (size, size) if isinstance(size, int) else size self.assertEqual(cropped_array.shape[-2:], expected_size) # Check result is consistent with PIL.Image.crop cropped_image = feature_extractor.center_crop(image, size) self.assertTrue(np.array_equal(cropped_array, feature_extractor.to_numpy_array(cropped_image))) @require_torch def test_center_crop_tensor(self): feature_extractor = ImageFeatureExtractionMixin() image = get_random_image(16, 32) array = feature_extractor.to_numpy_array(image) tensor = torch.tensor(array) # Test various crop sizes: bigger on all dimensions, on one of the dimensions only and on both dimensions. crop_sizes = [8, (8, 64), 20, (32, 64)] for size in crop_sizes: cropped_tensor = feature_extractor.center_crop(tensor, size) self.assertTrue(isinstance(cropped_tensor, torch.Tensor)) expected_size = (size, size) if isinstance(size, int) else size self.assertEqual(cropped_tensor.shape[-2:], expected_size) # Check result is consistent with PIL.Image.crop cropped_image = feature_extractor.center_crop(image, size) self.assertTrue(torch.equal(cropped_tensor, torch.tensor(feature_extractor.to_numpy_array(cropped_image)))) @require_vision class LoadImageTester(unittest.TestCase): def test_load_img_url(self): img = load_image(INVOICE_URL) img_arr = np.array(img) self.assertEqual(img_arr.shape, (1061, 750, 3)) @is_flaky() def test_load_img_url_timeout(self): with self.assertRaises((ReadTimeout, ConnectTimeout)): load_image(INVOICE_URL, timeout=0.001) def test_load_img_local(self): img = load_image("./tests/fixtures/tests_samples/COCO/000000039769.png") img_arr = np.array(img) self.assertEqual( img_arr.shape, (480, 640, 3), ) def test_load_img_base64_prefix(self): try: tmp_file = tempfile.mktemp() with open(tmp_file, "wb") as f: http_get( "https://huggingface.co/datasets/hf-internal-testing/dummy-base64-images/raw/main/image_0.txt", f ) with open(tmp_file, encoding="utf-8") as b64: img = load_image(b64.read()) img_arr = np.array(img) finally: os.remove(tmp_file) self.assertEqual(img_arr.shape, (64, 32, 3)) def test_load_img_base64(self): try: tmp_file = tempfile.mktemp() with open(tmp_file, "wb") as f: http_get( "https://huggingface.co/datasets/hf-internal-testing/dummy-base64-images/raw/main/image_1.txt", f ) with open(tmp_file, encoding="utf-8") as b64: img = load_image(b64.read()) img_arr = np.array(img) finally: os.remove(tmp_file) self.assertEqual(img_arr.shape, (64, 32, 3)) def test_load_img_rgba(self): dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test") img = load_image(dataset[0]["file"]) # img with mode RGBA img_arr = np.array(img) self.assertEqual( img_arr.shape, (512, 512, 3), ) def test_load_img_la(self): dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test") img = load_image(dataset[1]["file"]) # img with mode LA img_arr = np.array(img) self.assertEqual( img_arr.shape, (512, 768, 3), ) def test_load_img_l(self): dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test") img = load_image(dataset[2]["file"]) # img with mode L img_arr = np.array(img) self.assertEqual( img_arr.shape, (381, 225, 3), ) def test_load_img_exif_transpose(self): dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test") img_file = dataset[3]["file"] img_without_exif_transpose = PIL.Image.open(img_file) img_arr_without_exif_transpose = np.array(img_without_exif_transpose) self.assertEqual( img_arr_without_exif_transpose.shape, (333, 500, 3), ) img_with_exif_transpose = load_image(img_file) img_arr_with_exif_transpose = np.array(img_with_exif_transpose) self.assertEqual( img_arr_with_exif_transpose.shape, (500, 333, 3), ) class UtilFunctionTester(unittest.TestCase): def test_get_image_size(self): # Test we can infer the size and channel dimension of an image. image = np.random.randint(0, 256, (32, 64, 3)) self.assertEqual(get_image_size(image), (32, 64)) image = np.random.randint(0, 256, (3, 32, 64)) self.assertEqual(get_image_size(image), (32, 64)) # Test the channel dimension can be overriden image = np.random.randint(0, 256, (3, 32, 64)) self.assertEqual(get_image_size(image, channel_dim=ChannelDimension.LAST), (3, 32)) def test_infer_channel_dimension(self): # Test we fail with invalid input with pytest.raises(ValueError): infer_channel_dimension_format(np.random.randint(0, 256, (10, 10))) with pytest.raises(ValueError): infer_channel_dimension_format(np.random.randint(0, 256, (10, 10, 10, 10, 10))) # Test we fail if neither first not last dimension is of size 3 or 1 with pytest.raises(ValueError): infer_channel_dimension_format(np.random.randint(0, 256, (10, 1, 50))) # But if we explicitly set one of the number of channels to 50 it works inferred_dim = infer_channel_dimension_format(np.random.randint(0, 256, (10, 1, 50)), num_channels=50) self.assertEqual(inferred_dim, ChannelDimension.LAST) # Test we correctly identify the channel dimension image = np.random.randint(0, 256, (3, 4, 5)) inferred_dim = infer_channel_dimension_format(image) self.assertEqual(inferred_dim, ChannelDimension.FIRST) image = np.random.randint(0, 256, (1, 4, 5)) inferred_dim = infer_channel_dimension_format(image) self.assertEqual(inferred_dim, ChannelDimension.FIRST) image = np.random.randint(0, 256, (4, 5, 3)) inferred_dim = infer_channel_dimension_format(image) self.assertEqual(inferred_dim, ChannelDimension.LAST) image = np.random.randint(0, 256, (4, 5, 1)) inferred_dim = infer_channel_dimension_format(image) self.assertEqual(inferred_dim, ChannelDimension.LAST) # We can take a batched array of images and find the dimension image = np.random.randint(0, 256, (1, 3, 4, 5)) inferred_dim = infer_channel_dimension_format(image) self.assertEqual(inferred_dim, ChannelDimension.FIRST) def test_get_channel_dimension_axis(self): # Test we correctly identify the channel dimension image = np.random.randint(0, 256, (3, 4, 5)) inferred_axis = get_channel_dimension_axis(image) self.assertEqual(inferred_axis, 0) image = np.random.randint(0, 256, (1, 4, 5)) inferred_axis = get_channel_dimension_axis(image) self.assertEqual(inferred_axis, 0) image = np.random.randint(0, 256, (4, 5, 3)) inferred_axis = get_channel_dimension_axis(image) self.assertEqual(inferred_axis, 2) image = np.random.randint(0, 256, (4, 5, 1)) inferred_axis = get_channel_dimension_axis(image) self.assertEqual(inferred_axis, 2) # We can take a batched array of images and find the dimension image = np.random.randint(0, 256, (1, 3, 4, 5)) inferred_axis = get_channel_dimension_axis(image) self.assertEqual(inferred_axis, 1)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/utils/test_skip_decorators.py
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # # this test validates that we can stack skip decorators in groups and whether # they work correctly with other decorators # # since the decorators have already built their decision params (like checking # env[], we can't mock the env and test each of the combinations), so ideally # the following 4 should be run. But since we have different CI jobs running # different configs, all combinations should get covered # # RUN_SLOW=1 pytest -rA tests/test_skip_decorators.py # RUN_SLOW=1 CUDA_VISIBLE_DEVICES="" pytest -rA tests/test_skip_decorators.py # RUN_SLOW=0 pytest -rA tests/test_skip_decorators.py # RUN_SLOW=0 CUDA_VISIBLE_DEVICES="" pytest -rA tests/test_skip_decorators.py import os import unittest import pytest from parameterized import parameterized from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device # skipping in unittest tests params = [(1,)] # test that we can stack our skip decorators with 3rd party decorators def check_slow(): run_slow = bool(os.getenv("RUN_SLOW", 0)) if run_slow: assert True else: assert False, "should have been skipped" # test that we can stack our skip decorators def check_slow_torch_cuda(): run_slow = bool(os.getenv("RUN_SLOW", 0)) if run_slow and torch_device == "cuda": assert True else: assert False, "should have been skipped" @require_torch class SkipTester(unittest.TestCase): @slow @require_torch_gpu def test_2_skips_slow_first(self): check_slow_torch_cuda() @require_torch_gpu @slow def test_2_skips_slow_last(self): check_slow_torch_cuda() # The combination of any skip decorator, followed by parameterized fails to skip the tests # 1. @slow manages to correctly skip `test_param_slow_first` # 2. but then `parameterized` creates new tests, with a unique name for each parameter groups. # It has no idea that they are to be skipped and so they all run, ignoring @slow # Therefore skip decorators must come after `parameterized` # # @slow # @parameterized.expand(params) # def test_param_slow_first(self, param=None): # check_slow() # This works as expected: # 1. `parameterized` creates new tests with unique names # 2. each of them gets an opportunity to be skipped @parameterized.expand(params) @slow def test_param_slow_last(self, param=None): check_slow() # skipping in non-unittest tests # no problem at all here @slow @require_torch_gpu def test_pytest_2_skips_slow_first(): check_slow_torch_cuda() @require_torch_gpu @slow def test_pytest_2_skips_slow_last(): check_slow_torch_cuda() @slow @pytest.mark.parametrize("param", [1]) def test_pytest_param_slow_first(param): check_slow() @pytest.mark.parametrize("param", [1]) @slow def test_pytest_param_slow_last(param): check_slow()
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/utils/test_generic.py
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class GenericTester(unittest.TestCase): def test_flatten_dict(self): input_dict = { "task_specific_params": { "summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4}, "summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4}, "summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6}, } } expected_dict = { "task_specific_params.summarization.length_penalty": 1.0, "task_specific_params.summarization.max_length": 128, "task_specific_params.summarization.min_length": 12, "task_specific_params.summarization.num_beams": 4, "task_specific_params.summarization_cnn.length_penalty": 2.0, "task_specific_params.summarization_cnn.max_length": 142, "task_specific_params.summarization_cnn.min_length": 56, "task_specific_params.summarization_cnn.num_beams": 4, "task_specific_params.summarization_xsum.length_penalty": 1.0, "task_specific_params.summarization_xsum.max_length": 62, "task_specific_params.summarization_xsum.min_length": 11, "task_specific_params.summarization_xsum.num_beams": 6, } self.assertEqual(flatten_dict(input_dict), expected_dict) def test_transpose_numpy(self): x = np.random.randn(3, 4) self.assertTrue(np.allclose(transpose(x), x.transpose())) x = np.random.randn(3, 4, 5) self.assertTrue(np.allclose(transpose(x, axes=(1, 2, 0)), x.transpose((1, 2, 0)))) @require_torch def test_transpose_torch(self): x = np.random.randn(3, 4) t = torch.tensor(x) self.assertTrue(np.allclose(transpose(x), transpose(t).numpy())) x = np.random.randn(3, 4, 5) t = torch.tensor(x) self.assertTrue(np.allclose(transpose(x, axes=(1, 2, 0)), transpose(t, axes=(1, 2, 0)).numpy())) @require_tf def test_transpose_tf(self): x = np.random.randn(3, 4) t = tf.constant(x) self.assertTrue(np.allclose(transpose(x), transpose(t).numpy())) x = np.random.randn(3, 4, 5) t = tf.constant(x) self.assertTrue(np.allclose(transpose(x, axes=(1, 2, 0)), transpose(t, axes=(1, 2, 0)).numpy())) @require_flax def test_transpose_flax(self): x = np.random.randn(3, 4) t = jnp.array(x) self.assertTrue(np.allclose(transpose(x), np.asarray(transpose(t)))) x = np.random.randn(3, 4, 5) t = jnp.array(x) self.assertTrue(np.allclose(transpose(x, axes=(1, 2, 0)), np.asarray(transpose(t, axes=(1, 2, 0))))) def test_reshape_numpy(self): x = np.random.randn(3, 4) self.assertTrue(np.allclose(reshape(x, (4, 3)), np.reshape(x, (4, 3)))) x = np.random.randn(3, 4, 5) self.assertTrue(np.allclose(reshape(x, (12, 5)), np.reshape(x, (12, 5)))) @require_torch def test_reshape_torch(self): x = np.random.randn(3, 4) t = torch.tensor(x) self.assertTrue(np.allclose(reshape(x, (4, 3)), reshape(t, (4, 3)).numpy())) x = np.random.randn(3, 4, 5) t = torch.tensor(x) self.assertTrue(np.allclose(reshape(x, (12, 5)), reshape(t, (12, 5)).numpy())) @require_tf def test_reshape_tf(self): x = np.random.randn(3, 4) t = tf.constant(x) self.assertTrue(np.allclose(reshape(x, (4, 3)), reshape(t, (4, 3)).numpy())) x = np.random.randn(3, 4, 5) t = tf.constant(x) self.assertTrue(np.allclose(reshape(x, (12, 5)), reshape(t, (12, 5)).numpy())) @require_flax def test_reshape_flax(self): x = np.random.randn(3, 4) t = jnp.array(x) self.assertTrue(np.allclose(reshape(x, (4, 3)), np.asarray(reshape(t, (4, 3))))) x = np.random.randn(3, 4, 5) t = jnp.array(x) self.assertTrue(np.allclose(reshape(x, (12, 5)), np.asarray(reshape(t, (12, 5))))) def test_squeeze_numpy(self): x = np.random.randn(1, 3, 4) self.assertTrue(np.allclose(squeeze(x), np.squeeze(x))) x = np.random.randn(1, 4, 1, 5) self.assertTrue(np.allclose(squeeze(x, axis=2), np.squeeze(x, axis=2))) @require_torch def test_squeeze_torch(self): x = np.random.randn(1, 3, 4) t = torch.tensor(x) self.assertTrue(np.allclose(squeeze(x), squeeze(t).numpy())) x = np.random.randn(1, 4, 1, 5) t = torch.tensor(x) self.assertTrue(np.allclose(squeeze(x, axis=2), squeeze(t, axis=2).numpy())) @require_tf def test_squeeze_tf(self): x = np.random.randn(1, 3, 4) t = tf.constant(x) self.assertTrue(np.allclose(squeeze(x), squeeze(t).numpy())) x = np.random.randn(1, 4, 1, 5) t = tf.constant(x) self.assertTrue(np.allclose(squeeze(x, axis=2), squeeze(t, axis=2).numpy())) @require_flax def test_squeeze_flax(self): x = np.random.randn(1, 3, 4) t = jnp.array(x) self.assertTrue(np.allclose(squeeze(x), np.asarray(squeeze(t)))) x = np.random.randn(1, 4, 1, 5) t = jnp.array(x) self.assertTrue(np.allclose(squeeze(x, axis=2), np.asarray(squeeze(t, axis=2)))) def test_expand_dims_numpy(self): x = np.random.randn(3, 4) self.assertTrue(np.allclose(expand_dims(x, axis=1), np.expand_dims(x, axis=1))) @require_torch def test_expand_dims_torch(self): x = np.random.randn(3, 4) t = torch.tensor(x) self.assertTrue(np.allclose(expand_dims(x, axis=1), expand_dims(t, axis=1).numpy())) @require_tf def test_expand_dims_tf(self): x = np.random.randn(3, 4) t = tf.constant(x) self.assertTrue(np.allclose(expand_dims(x, axis=1), expand_dims(t, axis=1).numpy())) @require_flax def test_expand_dims_flax(self): x = np.random.randn(3, 4) t = jnp.array(x) self.assertTrue(np.allclose(expand_dims(x, axis=1), np.asarray(expand_dims(t, axis=1))))
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/sagemaker/README.md
# Testing new Hugging Face Deep Learning Container. This document explains the testing strategy for releasing the new Hugging Face Deep Learning Container. AWS maintains 14 days of currency with framework releases. Besides framework releases, AWS release train is bi-weekly on Monday. Code cutoff date for any changes is the Wednesday before release-Monday. ## Test Case 1: Releasing a New Version (Minor/Major) of 🤗 Transformers ### Requirements: Test should run on Release Candidate for new `transformers` release to validate the new release is compatible with the DLCs. To run these tests you need credentials for the HF SageMaker AWS Account. You can ask @philschmid or @n1t0 to get access. ### Run Tests: Before we can run the tests we need to adjust the `requirements.txt` for PyTorch under `/tests/sagemaker/scripts/pytorch` and for TensorFlow under `/tests/sagemaker/scripts/pytorch`. We adjust the branch to the new RC-tag. ``` git+https://github.com/huggingface/transformers.git@v4.5.0.rc0 # install main or adjust ist with vX.X.X for installing version specific-transforms ``` After we adjusted the `requirements.txt` we can run Amazon SageMaker tests with: ```bash AWS_PROFILE=<enter-your-profile> make test-sagemaker ``` These tests take around 10-15 minutes to finish. Preferably make a screenshot of the successfully ran tests. ### After Transformers Release: After we have released the Release Candidate we need to create a PR at the [Deep Learning Container Repository](https://github.com/aws/deep-learning-containers). **Creating the update PR:** 1. Update the two latest `buildspec.yaml` config for [PyTorch](https://github.com/aws/deep-learning-containers/tree/master/huggingface/pytorch) and [TensorFlow](https://github.com/aws/deep-learning-containers/tree/master/huggingface/tensorflow). The two latest `buildspec.yaml` are the `buildspec.yaml` without a version tag and the one with the highest framework version, e.g. `buildspec-1-7-1.yml` and not `buildspec-1-6.yml`. To update the `buildspec.yaml` we need to adjust either the `transformers_version` or the `datasets_version` or both. Example for upgrading to `transformers 4.5.0` and `datasets 1.6.0`. ```yaml account_id: &ACCOUNT_ID <set-$ACCOUNT_ID-in-environment> region: &REGION <set-$REGION-in-environment> base_framework: &BASE_FRAMEWORK pytorch framework: &FRAMEWORK !join [ "huggingface_", *BASE_FRAMEWORK] version: &VERSION 1.6.0 short_version: &SHORT_VERSION 1.6 repository_info: training_repository: &TRAINING_REPOSITORY image_type: &TRAINING_IMAGE_TYPE training root: !join [ "huggingface/", *BASE_FRAMEWORK, "/", *TRAINING_IMAGE_TYPE ] repository_name: &REPOSITORY_NAME !join ["pr", "-", "huggingface", "-", *BASE_FRAMEWORK, "-", *TRAINING_IMAGE_TYPE] repository: &REPOSITORY !join [ *ACCOUNT_ID, .dkr.ecr., *REGION, .amazonaws.com/, *REPOSITORY_NAME ] images: BuildHuggingFacePytorchGpuPy37Cu110TrainingDockerImage: <<: *TRAINING_REPOSITORY build: &HUGGINGFACE_PYTORCH_GPU_TRAINING_PY3 false image_size_baseline: &IMAGE_SIZE_BASELINE 15000 device_type: &DEVICE_TYPE gpu python_version: &DOCKER_PYTHON_VERSION py3 tag_python_version: &TAG_PYTHON_VERSION py36 cuda_version: &CUDA_VERSION cu110 os_version: &OS_VERSION ubuntu18.04 transformers_version: &TRANSFORMERS_VERSION 4.5.0 # this was adjusted from 4.4.2 to 4.5.0 datasets_version: &DATASETS_VERSION 1.6.0 # this was adjusted from 1.5.0 to 1.6.0 tag: !join [ *VERSION, '-', 'transformers', *TRANSFORMERS_VERSION, '-', *DEVICE_TYPE, '-', *TAG_PYTHON_VERSION, '-', *CUDA_VERSION, '-', *OS_VERSION ] docker_file: !join [ docker/, *SHORT_VERSION, /, *DOCKER_PYTHON_VERSION, /, *CUDA_VERSION, /Dockerfile., *DEVICE_TYPE ] ``` 2. In the PR comment describe what test, we ran and with which package versions. Here you can copy the table from [Current Tests](#current-tests). 2. In the PR comment describe what test we ran and with which framework versions. Here you can copy the table from [Current Tests](#current-tests). You can take a look at this [PR](https://github.com/aws/deep-learning-containers/pull/1016), which information are needed. ## Test Case 2: Releasing a New AWS Framework DLC ## Execute Tests ### Requirements: AWS is going to release new DLCs for PyTorch and/or TensorFlow. The Tests should run on the new framework versions with current `transformers` release to validate the new framework release is compatible with the `transformers` version. To run these tests you need credentials for the HF SageMaker AWS Account. You can ask @philschmid or @n1t0 to get access. AWS will notify us with a new issue in the repository pointing to their framework upgrade PR. ### Run Tests: Before we can run the tests we need to adjust the `requirements.txt` for Pytorch under `/tests/sagemaker/scripts/pytorch` and for Tensorflow under `/tests/sagemaker/scripts/pytorch`. We add the new framework version to it. ``` torch==1.8.1 # for pytorch tensorflow-gpu==2.5.0 # for tensorflow ``` After we adjusted the `requirements.txt` we can run Amazon SageMaker tests with. ```bash AWS_PROFILE=<enter-your-profile> make test-sagemaker ``` These tests take around 10-15 minutes to finish. Preferably make a screenshot of the successfully ran tests. ### After successful Tests: After we have successfully run tests for the new framework version we need to create a PR at the [Deep Learning Container Repository](https://github.com/aws/deep-learning-containers). **Creating the update PR:** 1. Create a new `buildspec.yaml` config for [PyTorch](https://github.com/aws/deep-learning-containers/tree/master/huggingface/pytorch) and [TensorFlow](https://github.com/aws/deep-learning-containers/tree/master/huggingface/tensorflow) and rename the old `buildspec.yaml` to `buildespec-x.x.x`, where `x.x.x` is the base framework version, e.g. if pytorch 1.6.0 is the latest version in `buildspec.yaml` the file should be renamed to `buildspec-yaml-1-6.yaml`. To create the new `buildspec.yaml` we need to adjust the `version` and the `short_version`. Example for upgrading to `pytorch 1.7.1`. ```yaml account_id: &ACCOUNT_ID <set-$ACCOUNT_ID-in-environment> region: &REGION <set-$REGION-in-environment> base_framework: &BASE_FRAMEWORK pytorch framework: &FRAMEWORK !join [ "huggingface_", *BASE_FRAMEWORK] version: &VERSION 1.7.1 # this was adjusted from 1.6.0 to 1.7.1 short_version: &SHORT_VERSION 1.7 # this was adjusted from 1.6 to 1.7 repository_info: training_repository: &TRAINING_REPOSITORY image_type: &TRAINING_IMAGE_TYPE training root: !join [ "huggingface/", *BASE_FRAMEWORK, "/", *TRAINING_IMAGE_TYPE ] repository_name: &REPOSITORY_NAME !join ["pr", "-", "huggingface", "-", *BASE_FRAMEWORK, "-", *TRAINING_IMAGE_TYPE] repository: &REPOSITORY !join [ *ACCOUNT_ID, .dkr.ecr., *REGION, .amazonaws.com/, *REPOSITORY_NAME ] images: BuildHuggingFacePytorchGpuPy37Cu110TrainingDockerImage: <<: *TRAINING_REPOSITORY build: &HUGGINGFACE_PYTORCH_GPU_TRAINING_PY3 false image_size_baseline: &IMAGE_SIZE_BASELINE 15000 device_type: &DEVICE_TYPE gpu python_version: &DOCKER_PYTHON_VERSION py3 tag_python_version: &TAG_PYTHON_VERSION py36 cuda_version: &CUDA_VERSION cu110 os_version: &OS_VERSION ubuntu18.04 transformers_version: &TRANSFORMERS_VERSION 4.4.2 datasets_version: &DATASETS_VERSION 1.5.0 tag: !join [ *VERSION, '-', 'transformers', *TRANSFORMERS_VERSION, '-', *DEVICE_TYPE, '-', *TAG_PYTHON_VERSION, '-', *CUDA_VERSION, '-', *OS_VERSION ] docker_file: !join [ docker/, *SHORT_VERSION, /, *DOCKER_PYTHON_VERSION, /, *CUDA_VERSION, /Dockerfile., *DEVICE_TYPE ] ``` 2. In the PR comment describe what test we ran and with which framework versions. Here you can copy the table from [Current Tests](#current-tests). You can take a look at this [PR](https://github.com/aws/deep-learning-containers/pull/1025), which information are needed. ## Current Tests | ID | Description | Platform | #GPUS | Collected & evaluated metrics | |-------------------------------------|-------------------------------------------------------------------|-----------------------------|-------|------------------------------------------| | pytorch-transfromers-test-single | test bert finetuning using BERT fromtransformerlib+PT | SageMaker createTrainingJob | 1 | train_runtime, eval_accuracy & eval_loss | | pytorch-transfromers-test-2-ddp | test bert finetuning using BERT from transformer lib+ PT DPP | SageMaker createTrainingJob | 16 | train_runtime, eval_accuracy & eval_loss | | pytorch-transfromers-test-2-smd | test bert finetuning using BERT from transformer lib+ PT SM DDP | SageMaker createTrainingJob | 16 | train_runtime, eval_accuracy & eval_loss | | pytorch-transfromers-test-1-smp | test roberta finetuning using BERT from transformer lib+ PT SM MP | SageMaker createTrainingJob | 8 | train_runtime, eval_accuracy & eval_loss | | tensorflow-transfromers-test-single | Test bert finetuning using BERT from transformer lib+TF | SageMaker createTrainingJob | 1 | train_runtime, eval_accuracy & eval_loss | | tensorflow-transfromers-test-2-smd | test bert finetuning using BERT from transformer lib+ TF SM DDP | SageMaker createTrainingJob | 16 | train_runtime, eval_accuracy & eval_loss |
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/sagemaker/conftest.py
# we define a fixture function below and it will be "used" by # referencing its name from tests import os import pytest from attr import dataclass os.environ["AWS_DEFAULT_REGION"] = "us-east-1" # defaults region @dataclass class SageMakerTestEnvironment: framework: str role = "arn:aws:iam::558105141721:role/sagemaker_execution_role" hyperparameters = { "task_name": "mnli", "per_device_train_batch_size": 16, "per_device_eval_batch_size": 16, "do_train": True, "do_eval": True, "do_predict": True, "output_dir": "/opt/ml/model", "overwrite_output_dir": True, "max_steps": 500, "save_steps": 5500, } distributed_hyperparameters = {**hyperparameters, "max_steps": 1000} @property def metric_definitions(self) -> str: if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def base_job_name(self) -> str: return f"{self.framework}-transfromers-test" @property def test_path(self) -> str: return f"./tests/sagemaker/scripts/{self.framework}" @property def image_uri(self) -> str: if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope="class") def sm_env(request): request.cls.env = SageMakerTestEnvironment(framework=request.cls.framework)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/sagemaker/test_multi_node_data_parallel.py
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER", "False")) is not True, reason="Skipping test because should only be run when releasing minor transformers version", ) @pytest.mark.usefixtures("sm_env") @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "pytorch", "script": "run_ddp.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "tensorflow", "script": "run_tf_dist.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7}, }, ] ) class MultiNodeTest(unittest.TestCase): def setUp(self): if self.framework == "pytorch": subprocess.run( f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split(), encoding="utf-8", check=True, ) assert hasattr(self, "env") def create_estimator(self, instance_count): job_name = f"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}" # distributed data settings distribution = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None # creates estimator return HuggingFace( entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=job_name, instance_count=instance_count, instance_type=self.instance_type, debugger_hook_config=False, hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path}, metric_definitions=self.env.metric_definitions, distribution=distribution, py_version="py36", ) def save_results_as_csv(self, job_name): TrainingJobAnalytics(job_name).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv") # @parameterized.expand([(2,), (4,),]) @parameterized.expand([(2,)]) def test_script(self, instance_count): # create estimator estimator = self.create_estimator(instance_count) # run training estimator.fit() # result dataframe result_metrics_df = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis eval_accuracy = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"]) eval_loss = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"]) # get train time from SageMaker job, this includes starting, preprocessing, stopping train_runtime = ( Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds", 999999) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy) assert all(t <= self.results["eval_loss"] for t in eval_loss) # dump tests result into json file to share in PR with open(f"{estimator.latest_training_job.name}.json", "w") as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, outfile)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/sagemaker/test_multi_node_model_parallel.py
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER", "False")) is not True, reason="Skipping test because should only be run when releasing minor transformers version", ) @pytest.mark.usefixtures("sm_env") @parameterized_class( [ { "framework": "pytorch", "script": "run_glue_model_parallelism.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, ] ) class MultiNodeTest(unittest.TestCase): def setUp(self): if self.framework == "pytorch": subprocess.run( f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split(), encoding="utf-8", check=True, ) assert hasattr(self, "env") def create_estimator(self, instance_count): # configuration for running training on smdistributed Model Parallel mpi_options = { "enabled": True, "processes_per_host": 8, } smp_options = { "enabled": True, "parameters": { "microbatches": 4, "placement_strategy": "spread", "pipeline": "interleaved", "optimize": "speed", "partitions": 4, "ddp": True, }, } distribution = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options} name_extension = "trainer" if self.script == "run_glue.py" else "smtrainer" # creates estimator return HuggingFace( entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=f"{self.env.base_job_name}-{instance_count}-smp-{name_extension}", instance_count=instance_count, instance_type=self.instance_type, debugger_hook_config=False, hyperparameters={ **self.env.hyperparameters, "model_name_or_path": self.model_name_or_path, "max_steps": 500, }, metric_definitions=self.env.metric_definitions, distribution=distribution, py_version="py36", ) def save_results_as_csv(self, job_name): TrainingJobAnalytics(job_name).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv") # @parameterized.expand([(2,), (4,),]) @parameterized.expand([(1,)]) def test_scripz(self, instance_count): # create estimator estimator = self.create_estimator(instance_count) # run training estimator.fit() # result dataframe result_metrics_df = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis eval_accuracy = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"]) eval_loss = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"]) # get train time from SageMaker job, this includes starting, preprocessing, stopping train_runtime = ( Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds", 999999) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy) assert all(t <= self.results["eval_loss"] for t in eval_loss) # dump tests result into json file to share in PR with open(f"{estimator.latest_training_job.name}.json", "w") as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, outfile)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/sagemaker/__init__.py
import importlib def is_sagemaker_available(): return importlib.util.find_spec("sagemaker") is not None
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/sagemaker/test_single_node_gpu.py
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER", "False")) is not True, reason="Skipping test because should only be run when releasing minor transformers version", ) @pytest.mark.usefixtures("sm_env") @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 650, "eval_accuracy": 0.6, "eval_loss": 0.9}, }, { "framework": "tensorflow", "script": "run_tf.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.3, "eval_loss": 0.9}, }, ] ) class SingleNodeTest(unittest.TestCase): def setUp(self): if self.framework == "pytorch": subprocess.run( f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split(), encoding="utf-8", check=True, ) assert hasattr(self, "env") def create_estimator(self, instance_count=1): # creates estimator return HuggingFace( entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=f"{self.env.base_job_name}-single", instance_count=instance_count, instance_type=self.instance_type, debugger_hook_config=False, hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path}, metric_definitions=self.env.metric_definitions, py_version="py36", ) def save_results_as_csv(self, job_name): TrainingJobAnalytics(job_name).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv") def test_glue(self): # create estimator estimator = self.create_estimator() # run training estimator.fit() # result dataframe result_metrics_df = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis eval_accuracy = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"]) eval_loss = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"]) # get train time from SageMaker job, this includes starting, preprocessing, stopping train_runtime = ( Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds", 999999) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy) assert all(t <= self.results["eval_loss"] for t in eval_loss) # dump tests result into json file to share in PR with open(f"{estimator.latest_training_job.name}.json", "w") as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, outfile)
0
hf_public_repos/transformers/tests/sagemaker/scripts
hf_public_repos/transformers/tests/sagemaker/scripts/tensorflow/run_tf.py
import argparse import logging import sys import time import tensorflow as tf from datasets import load_dataset from transformers import AutoTokenizer, TFAutoModelForSequenceClassification if __name__ == "__main__": parser = argparse.ArgumentParser() # Hyperparameters sent by the client are passed as command-line arguments to the script. parser.add_argument("--epochs", type=int, default=1) parser.add_argument("--per_device_train_batch_size", type=int, default=16) parser.add_argument("--per_device_eval_batch_size", type=int, default=8) parser.add_argument("--model_name_or_path", type=str) parser.add_argument("--learning_rate", type=str, default=5e-5) parser.add_argument("--do_train", type=bool, default=True) parser.add_argument("--do_eval", type=bool, default=True) parser.add_argument("--output_dir", type=str) args, _ = parser.parse_known_args() # overwrite batch size until we have tf_glue.py args.per_device_train_batch_size = 16 args.per_device_eval_batch_size = 16 # Set up logging logger = logging.getLogger(__name__) logging.basicConfig( level=logging.getLevelName("INFO"), handlers=[logging.StreamHandler(sys.stdout)], format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", ) # Load model and tokenizer model = TFAutoModelForSequenceClassification.from_pretrained(args.model_name_or_path) tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path) # Load dataset train_dataset, test_dataset = load_dataset("imdb", split=["train", "test"]) train_dataset = train_dataset.shuffle().select(range(5000)) # smaller the size for train dataset to 5k test_dataset = test_dataset.shuffle().select(range(500)) # smaller the size for test dataset to 500 # Preprocess train dataset train_dataset = train_dataset.map( lambda e: tokenizer(e["text"], truncation=True, padding="max_length"), batched=True ) train_dataset.set_format(type="tensorflow", columns=["input_ids", "attention_mask", "label"]) train_features = { x: train_dataset[x].to_tensor(default_value=0, shape=[None, tokenizer.model_max_length]) for x in ["input_ids", "attention_mask"] } tf_train_dataset = tf.data.Dataset.from_tensor_slices((train_features, train_dataset["label"])).batch( args.per_device_train_batch_size ) # Preprocess test dataset test_dataset = test_dataset.map( lambda e: tokenizer(e["text"], truncation=True, padding="max_length"), batched=True ) test_dataset.set_format(type="tensorflow", columns=["input_ids", "attention_mask", "label"]) test_features = { x: test_dataset[x].to_tensor(default_value=0, shape=[None, tokenizer.model_max_length]) for x in ["input_ids", "attention_mask"] } tf_test_dataset = tf.data.Dataset.from_tensor_slices((test_features, test_dataset["label"])).batch( args.per_device_eval_batch_size ) # fine optimizer and loss optimizer = tf.keras.optimizers.Adam(learning_rate=args.learning_rate) loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) metrics = [tf.keras.metrics.SparseCategoricalAccuracy()] model.compile(optimizer=optimizer, loss=loss, metrics=metrics) start_train_time = time.time() train_results = model.fit(tf_train_dataset, epochs=args.epochs, batch_size=args.per_device_train_batch_size) end_train_time = time.time() - start_train_time logger.info("*** Train ***") logger.info(f"train_runtime = {end_train_time}") for key, value in train_results.history.items(): logger.info(f" {key} = {value}")
0
hf_public_repos/transformers/tests/sagemaker/scripts
hf_public_repos/transformers/tests/sagemaker/scripts/tensorflow/run_tf_dist.py
import argparse import logging import os import sys import time import tensorflow as tf from datasets import load_dataset from tqdm import tqdm from transformers import AutoTokenizer, TFAutoModelForSequenceClassification from transformers.utils import is_sagemaker_dp_enabled if os.environ.get("SDP_ENABLED") or is_sagemaker_dp_enabled(): SDP_ENABLED = True os.environ["SAGEMAKER_INSTANCE_TYPE"] = "p3dn.24xlarge" import smdistributed.dataparallel.tensorflow as sdp else: SDP_ENABLED = False def fit(model, loss, opt, train_dataset, epochs, train_batch_size, max_steps=None): pbar = tqdm(train_dataset) for i, batch in enumerate(pbar): with tf.GradientTape() as tape: inputs, targets = batch outputs = model(batch) loss_value = loss(targets, outputs.logits) if SDP_ENABLED: tape = sdp.DistributedGradientTape(tape, sparse_as_dense=True) grads = tape.gradient(loss_value, model.trainable_variables) opt.apply_gradients(zip(grads, model.trainable_variables)) pbar.set_description(f"Loss: {loss_value:.4f}") if SDP_ENABLED and i == 0: sdp.broadcast_variables(model.variables, root_rank=0) sdp.broadcast_variables(opt.variables(), root_rank=0) if max_steps and i >= max_steps: break train_results = {"loss": loss_value.numpy()} return train_results def get_datasets(tokenizer, train_batch_size, eval_batch_size): # Load dataset train_dataset, test_dataset = load_dataset("imdb", split=["train", "test"]) # Preprocess train dataset train_dataset = train_dataset.map( lambda e: tokenizer(e["text"], truncation=True, padding="max_length"), batched=True ) train_dataset.set_format(type="tensorflow", columns=["input_ids", "attention_mask", "label"]) train_features = { x: train_dataset[x].to_tensor(default_value=0, shape=[None, tokenizer.model_max_length]) for x in ["input_ids", "attention_mask"] } tf_train_dataset = tf.data.Dataset.from_tensor_slices((train_features, train_dataset["label"])) # Preprocess test dataset test_dataset = test_dataset.map( lambda e: tokenizer(e["text"], truncation=True, padding="max_length"), batched=True ) test_dataset.set_format(type="tensorflow", columns=["input_ids", "attention_mask", "label"]) test_features = { x: test_dataset[x].to_tensor(default_value=0, shape=[None, tokenizer.model_max_length]) for x in ["input_ids", "attention_mask"] } tf_test_dataset = tf.data.Dataset.from_tensor_slices((test_features, test_dataset["label"])) if SDP_ENABLED: tf_train_dataset = tf_train_dataset.shard(sdp.size(), sdp.rank()) tf_test_dataset = tf_test_dataset.shard(sdp.size(), sdp.rank()) tf_train_dataset = tf_train_dataset.batch(train_batch_size, drop_remainder=True) tf_test_dataset = tf_test_dataset.batch(eval_batch_size, drop_remainder=True) return tf_train_dataset, tf_test_dataset if __name__ == "__main__": parser = argparse.ArgumentParser() # Hyperparameters sent by the client are passed as command-line arguments to the script. parser.add_argument("--epochs", type=int, default=3) parser.add_argument("--per_device_train_batch_size", type=int, default=16) parser.add_argument("--per_device_eval_batch_size", type=int, default=8) parser.add_argument("--model_name_or_path", type=str) parser.add_argument("--learning_rate", type=str, default=5e-5) parser.add_argument("--do_train", type=bool, default=True) parser.add_argument("--do_eval", type=bool, default=True) parser.add_argument("--output_dir", type=str) parser.add_argument("--max_steps", type=int, default=None) # Data, model, and output directories parser.add_argument("--output_data_dir", type=str, default=os.environ["SM_OUTPUT_DATA_DIR"]) parser.add_argument("--model_dir", type=str, default=os.environ["SM_MODEL_DIR"]) parser.add_argument("--n_gpus", type=str, default=os.environ["SM_NUM_GPUS"]) args, _ = parser.parse_known_args() # Set up logging logger = logging.getLogger(__name__) logging.basicConfig( level=logging.getLevelName("INFO"), handlers=[logging.StreamHandler(sys.stdout)], format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", ) if SDP_ENABLED: sdp.init() gpus = tf.config.experimental.list_physical_devices("GPU") for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) if gpus: tf.config.experimental.set_visible_devices(gpus[sdp.local_rank()], "GPU") # Load model and tokenizer model = TFAutoModelForSequenceClassification.from_pretrained(args.model_name_or_path) tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path) # get datasets tf_train_dataset, tf_test_dataset = get_datasets( tokenizer=tokenizer, train_batch_size=args.per_device_train_batch_size, eval_batch_size=args.per_device_eval_batch_size, ) # fine optimizer and loss optimizer = tf.keras.optimizers.Adam(learning_rate=args.learning_rate) loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) metrics = [tf.keras.metrics.SparseCategoricalAccuracy()] model.compile(optimizer=optimizer, loss=loss, metrics=metrics) # Training if args.do_train: # train_results = model.fit(tf_train_dataset, epochs=args.epochs, batch_size=args.train_batch_size) start_train_time = time.time() train_results = fit( model, loss, optimizer, tf_train_dataset, args.epochs, args.per_device_train_batch_size, max_steps=args.max_steps, ) end_train_time = time.time() - start_train_time logger.info("*** Train ***") logger.info(f"train_runtime = {end_train_time}") output_eval_file = os.path.join(args.output_dir, "train_results.txt") if not SDP_ENABLED or sdp.rank() == 0: with open(output_eval_file, "w") as writer: logger.info("***** Train results *****") logger.info(train_results) for key, value in train_results.items(): logger.info(f" {key} = {value}") writer.write(f"{key} = {value}\n") # Evaluation if args.do_eval and (not SDP_ENABLED or sdp.rank() == 0): result = model.evaluate(tf_test_dataset, batch_size=args.per_device_eval_batch_size, return_dict=True) logger.info("*** Evaluate ***") output_eval_file = os.path.join(args.output_dir, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") logger.info(result) for key, value in result.items(): logger.info(f" {key} = {value}") writer.write(f"{key} = {value}\n") # Save result if SDP_ENABLED: if sdp.rank() == 0: model.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) else: model.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir)
0
hf_public_repos/transformers/tests/sagemaker/scripts
hf_public_repos/transformers/tests/sagemaker/scripts/tensorflow/requirements.txt
git+https://github.com/huggingface/transformers.git@main # install main or adjust ist with vX.X.X for installing version specific transforms
0
hf_public_repos/transformers/tests/sagemaker/scripts
hf_public_repos/transformers/tests/sagemaker/scripts/pytorch/run_glue_model_parallelism.py
#!/usr/bin/env python # coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Finetuning the library models for sequence classification on GLUE.""" # You can also adapt this script on your own text classification task. Pointers for this are left as comments. import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import numpy as np from datasets import load_dataset, load_metric import transformers from transformers import ( # Trainer,; TrainingArguments, AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, PretrainedConfig, default_data_collator, set_seed, ) # Will import SageMaker Model parallelism specific Trainer from transformers.sagemaker import SageMakerTrainer as Trainer from transformers.sagemaker import SageMakerTrainingArguments as TrainingArguments from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.4.2") task_to_keys = { "cola": ("sentence", None), "mnli": ("premise", "hypothesis"), "mrpc": ("sentence1", "sentence2"), "qnli": ("question", "sentence"), "qqp": ("question1", "question2"), "rte": ("sentence1", "sentence2"), "sst2": ("sentence", None), "stsb": ("sentence1", "sentence2"), "wnli": ("sentence1", "sentence2"), } logger = logging.getLogger(__name__) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command line. """ task_name: Optional[str] = field( default=None, metadata={"help": "The name of the task to train on: " + ", ".join(task_to_keys.keys())}, ) max_seq_length: int = field( default=128, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) pad_to_max_length: bool = field( default=True, metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_val_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of validation examples to this " "value if set." ) }, ) max_test_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of test examples to this " "value if set." ) }, ) train_file: Optional[str] = field( default=None, metadata={"help": "A csv or a json file containing the training data."} ) validation_file: Optional[str] = field( default=None, metadata={"help": "A csv or a json file containing the validation data."} ) test_file: Optional[str] = field(default=None, metadata={"help": "A csv or a json file containing the test data."}) def __post_init__(self): if self.task_name is not None: self.task_name = self.task_name.lower() if self.task_name not in task_to_keys.keys(): raise ValueError("Unknown task, you should pick one in " + ",".join(task_to_keys.keys())) elif self.train_file is None or self.validation_file is None: raise ValueError("Need either a GLUE task or a training/validation file.") else: train_extension = self.train_file.split(".")[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." validation_extension = self.validation_file.split(".")[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_auth_token: bool = field( default=False, metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, ) def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) logger.setLevel(logging.INFO if training_args.should_log else logging.WARN) # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) # Set the verbosity to info of the Transformers logger (on main process only): if training_args.should_log: transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info(f"Training/evaluation parameters {training_args}") # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the # sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named # label if at least two columns are provided. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.task_name is not None: # Downloading and loading a dataset from the hub. datasets = load_dataset("glue", data_args.task_name) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. data_files = {"train": data_args.train_file, "validation": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: train_extension = data_args.train_file.split(".")[-1] test_extension = data_args.test_file.split(".")[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." data_files["test"] = data_args.test_file else: raise ValueError("Need either a GLUE task or a test file for `do_predict`.") for key in data_files.keys(): logger.info(f"load a local file for {key}: {data_files[key]}") if data_args.train_file.endswith(".csv"): # Loading a dataset from local csv files datasets = load_dataset("csv", data_files=data_files) else: # Loading a dataset from local json files datasets = load_dataset("json", data_files=data_files) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets. # Labels if data_args.task_name is not None: is_regression = data_args.task_name == "stsb" if not is_regression: label_list = datasets["train"].features["label"].names num_labels = len(label_list) else: num_labels = 1 else: # Trying to have good defaults here, don't hesitate to tweak to your needs. is_regression = datasets["train"].features["label"].dtype in ["float32", "float64"] if is_regression: num_labels = 1 else: # A useful fast method: # https://huggingface.co/docs/datasets/package_reference/main_classes#datasets.Dataset.unique label_list = datasets["train"].unique("label") label_list.sort() # Let's sort it for determinism num_labels = len(label_list) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=True if model_args.use_auth_token else None, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, token=True if model_args.use_auth_token else None, ) model = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=True if model_args.use_auth_token else None, ) # Preprocessing the datasets if data_args.task_name is not None: sentence1_key, sentence2_key = task_to_keys[data_args.task_name] else: # Again, we try to have some nice defaults but don't hesitate to tweak to your use case. non_label_column_names = [name for name in datasets["train"].column_names if name != "label"] if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names: sentence1_key, sentence2_key = "sentence1", "sentence2" else: if len(non_label_column_names) >= 2: sentence1_key, sentence2_key = non_label_column_names[:2] else: sentence1_key, sentence2_key = non_label_column_names[0], None # Padding strategy if data_args.pad_to_max_length: padding = "max_length" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch padding = False # Some models have set the order of the labels to use, so let's make sure we do use it. label_to_id = None if ( model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id and data_args.task_name is not None and not is_regression ): # Some have all caps in their config, some don't. label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()} if sorted(label_name_to_id.keys()) == sorted(label_list): label_to_id = {i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)} else: logger.warning( "Your model seems to have been trained with labels, but they don't match the dataset: ", f"model labels: {sorted(label_name_to_id.keys())}, dataset labels: {sorted(label_list)}." "\nIgnoring the model labels as a result.", ) elif data_args.task_name is None and not is_regression: label_to_id = {v: i for i, v in enumerate(label_list)} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the " f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) def preprocess_function(examples): # Tokenize the texts args = ( (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key]) ) result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True) # Map labels to IDs (not necessary for GLUE tasks) if label_to_id is not None and "label" in examples: result["label"] = [(label_to_id[l] if l != -1 else -1) for l in examples["label"]] return result datasets = datasets.map(preprocess_function, batched=True, load_from_cache_file=not data_args.overwrite_cache) if training_args.do_train: if "train" not in datasets: raise ValueError("--do_train requires a train dataset") train_dataset = datasets["train"] if data_args.max_train_samples is not None: train_dataset = train_dataset.select(range(data_args.max_train_samples)) if training_args.do_eval: if "validation" not in datasets and "validation_matched" not in datasets: raise ValueError("--do_eval requires a validation dataset") eval_dataset = datasets["validation_matched" if data_args.task_name == "mnli" else "validation"] if data_args.max_val_samples is not None: eval_dataset = eval_dataset.select(range(data_args.max_val_samples)) if training_args.do_predict or data_args.task_name is not None or data_args.test_file is not None: if "test" not in datasets and "test_matched" not in datasets: raise ValueError("--do_predict requires a test dataset") test_dataset = datasets["test_matched" if data_args.task_name == "mnli" else "test"] if data_args.max_test_samples is not None: test_dataset = test_dataset.select(range(data_args.max_test_samples)) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(train_dataset)), 3): logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") # Get the metric function if data_args.task_name is not None: metric = load_metric("glue", data_args.task_name) # TODO: When datasets metrics include regular accuracy, make an else here and remove special branch from # compute_metrics # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(p: EvalPrediction): preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1) if data_args.task_name is not None: result = metric.compute(predictions=preds, references=p.label_ids) if len(result) > 1: result["combined_score"] = np.mean(list(result.values())).item() return result elif is_regression: return {"mse": ((preds - p.label_ids) ** 2).mean().item()} else: return {"accuracy": (preds == p.label_ids).astype(np.float32).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: data_collator = default_data_collator elif training_args.fp16: data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8) else: data_collator = None # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, compute_metrics=compute_metrics, tokenizer=tokenizer, data_collator=data_collator, ) # Training if training_args.do_train: checkpoint = None if last_checkpoint is not None: checkpoint = last_checkpoint elif os.path.isdir(model_args.model_name_or_path): # Check the config from that potential checkpoint has the right number of labels before using it as a # checkpoint. if AutoConfig.from_pretrained(model_args.model_name_or_path).num_labels == num_labels: checkpoint = model_args.model_name_or_path train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") # Loop to handle MNLI double evaluation (matched, mis-matched) tasks = [data_args.task_name] eval_datasets = [eval_dataset] if data_args.task_name == "mnli": tasks.append("mnli-mm") eval_datasets.append(datasets["validation_mismatched"]) for eval_dataset, task in zip(eval_datasets, tasks): metrics = trainer.evaluate(eval_dataset=eval_dataset) max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_val_samples, len(eval_dataset)) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) if training_args.do_predict: logger.info("*** Test ***") # Loop to handle MNLI double evaluation (matched, mis-matched) tasks = [data_args.task_name] test_datasets = [test_dataset] if data_args.task_name == "mnli": tasks.append("mnli-mm") test_datasets.append(datasets["test_mismatched"]) for test_dataset, task in zip(test_datasets, tasks): # Removing the `label` columns because it contains -1 and Trainer won't like that. test_dataset = test_dataset.remove_columns("label") predictions = trainer.predict(test_dataset=test_dataset).predictions predictions = np.squeeze(predictions) if is_regression else np.argmax(predictions, axis=1) output_test_file = os.path.join(training_args.output_dir, f"test_results_{task}.txt") if trainer.is_world_process_zero(): with open(output_test_file, "w") as writer: logger.info(f"***** Test results {task} *****") writer.write("index\tprediction\n") for index, item in enumerate(predictions): if is_regression: writer.write(f"{index}\t{item:3.3f}\n") else: item = label_list[item] writer.write(f"{index}\t{item}\n") def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
0
hf_public_repos/transformers/tests/sagemaker/scripts
hf_public_repos/transformers/tests/sagemaker/scripts/pytorch/run_ddp.py
import json import logging import os import subprocess from argparse import ArgumentParser logger = logging.getLogger(__name__) def parse_args(): parser = ArgumentParser() parsed, unknown = parser.parse_known_args() for arg in unknown: if arg.startswith(("-", "--")): parser.add_argument(arg.split("=")[0]) return parser.parse_args() def main(): args = parse_args() port = 8888 num_gpus = int(os.environ["SM_NUM_GPUS"]) hosts = json.loads(os.environ["SM_HOSTS"]) num_nodes = len(hosts) current_host = os.environ["SM_CURRENT_HOST"] rank = hosts.index(current_host) os.environ["NCCL_DEBUG"] = "INFO" if num_nodes > 1: cmd = f"""python -m torch.distributed.launch \ --nnodes={num_nodes} \ --node_rank={rank} \ --nproc_per_node={num_gpus} \ --master_addr={hosts[0]} \ --master_port={port} \ ./run_glue.py \ {"".join([f" --{parameter} {value}" for parameter,value in args.__dict__.items()])}""" else: cmd = f"""python -m torch.distributed.launch \ --nproc_per_node={num_gpus} \ ./run_glue.py \ {"".join([f" --{parameter} {value}" for parameter,value in args.__dict__.items()])}""" try: subprocess.run(cmd, shell=True) except Exception as e: logger.info(e) if __name__ == "__main__": main()
0
hf_public_repos/transformers/tests/sagemaker/scripts
hf_public_repos/transformers/tests/sagemaker/scripts/pytorch/requirements.txt
git+https://github.com/huggingface/transformers.git@main # install main or adjust it with vX.X.X for installing version specific transforms datasets==1.8.0
0
hf_public_repos/transformers/tests/quantization
hf_public_repos/transformers/tests/quantization/gptq/test_gptq.py
# coding=utf-8 # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import pytest from transformers import AutoModelForCausalLM, AutoTokenizer, GPTQConfig from transformers.testing_utils import ( is_torch_available, require_accelerate, require_auto_gptq, require_optimum, require_torch_gpu, require_torch_multi_gpu, slow, ) if is_torch_available(): import torch class GPTQConfigTest(unittest.TestCase): def test_bits(self): with self.assertRaises(ValueError): GPTQConfig(bits="") GPTQConfig(bits=1) GPTQConfig(bits=2) GPTQConfig(bits=4) def test_dataset(self): with self.assertRaises(ValueError): GPTQConfig(bits=2, dataset="auto_gpt") GPTQConfig(bits=2, dataset="c4") GPTQConfig(bits=2, dataset="ptb-new") def test_damp_percent(self): with self.assertRaises(ValueError): GPTQConfig(bits=2, damp_percent=10) GPTQConfig(bits=2, damp_percent=-1) GPTQConfig(bits=2, damp_percent="0") GPTQConfig(bits=2, damp_percent=0.01) def test_to_dict(self): quantization_config = GPTQConfig(bits=2) quantization_config.to_dict() def test_from_dict(self): dict = {"bits": 2} quantization_config = GPTQConfig.from_dict(dict) self.assertEqual(dict["bits"], quantization_config.bits) @require_optimum def test_optimum_config(self): from optimum.gptq import GPTQQuantizer config = GPTQConfig(bits=2) optimum_config = GPTQQuantizer.from_dict(config.to_dict_optimum()) self.assertEqual(optimum_config.bits, config.bits) new_config = GPTQConfig.from_dict_optimum(optimum_config.to_dict()) self.assertEqual(optimum_config.bits, new_config.bits) @slow @require_optimum @require_auto_gptq @require_torch_gpu class GPTQTest(unittest.TestCase): model_name = "bigscience/bloom-560m" input_text = "Hello my name is" EXPECTED_OUTPUTS = set() EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I") EXPECTED_OUTPUTS.add("Hello my name is John, I am a professional photographer and I") EXPECTED_OUTPUTS.add("Hello my name is John, I am a student in the University of") EXPECTED_OUTPUTS.add("Hello my name is John and I am a very good looking man.") EXPECTED_OUTPUTS.add("Hello my name is Alyson, I am a student in the") EXPECTED_OUTPUTS.add("Hello my name is Alyson and I am a very sweet,") # this seems a little small considering that we are doing 4bit quant but we have a small model and ww don't quantize the embeddings EXPECTED_RELATIVE_DIFFERENCE = 1.664253062 bits = 4 group_size = 128 desc_act = False use_exllama = False dataset = [ "auto-gptq is an easy-to-use model quantization library with user-friendly apis, based on GPTQ algorithm." ] device_map = None # called only once for all test in this class @classmethod def setUpClass(cls): """ Setup quantized model """ cls.model_fp16 = AutoModelForCausalLM.from_pretrained( cls.model_name, torch_dtype=torch.float16, device_map=cls.device_map ) cls.mem_fp16 = cls.model_fp16.get_memory_footprint() cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name, use_fast=True) quantization_config = GPTQConfig( bits=cls.bits, dataset=cls.dataset, tokenizer=cls.tokenizer, group_size=cls.group_size, desc_act=cls.desc_act, use_exllama=cls.use_exllama, ) cls.quantized_model = AutoModelForCausalLM.from_pretrained( cls.model_name, torch_dtype=torch.float16, device_map=cls.device_map, quantization_config=quantization_config, ) def test_memory_footprint(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model """ mem_quantized = self.quantized_model.get_memory_footprint() self.assertAlmostEqual(self.mem_fp16 / mem_quantized, self.EXPECTED_RELATIVE_DIFFERENCE) def test_device_and_dtype_assignment(self): r""" Test whether trying to cast (or assigning a device to) a model after quantization will throw an error. Checks also if other models are casted correctly. """ # This should work if self.device_map is None: _ = self.quantized_model.to(0) with self.assertRaises(ValueError): # Tries with a `dtype`` self.quantized_model.to(torch.float16) def test_original_dtype(self): r""" A simple test to check if the model succesfully stores the original dtype """ self.assertTrue(hasattr(self.quantized_model.config, "_pre_quantization_dtype")) self.assertFalse(hasattr(self.model_fp16.config, "_pre_quantization_dtype")) self.assertTrue(self.quantized_model.config._pre_quantization_dtype == torch.float16) def test_quantized_layers_class(self): """ Simple test to check if the model conversion has been done correctly by checking on the class type of the linear layers of the converted models """ from auto_gptq.utils.import_utils import dynamically_import_QuantLinear QuantLinear = dynamically_import_QuantLinear( use_triton=False, desc_act=self.desc_act, group_size=self.group_size, bits=self.bits, disable_exllama=not self.use_exllama, disable_exllamav2=True, ) self.assertTrue(self.quantized_model.transformer.h[0].mlp.dense_4h_to_h.__class__ == QuantLinear) def check_inference_correctness(self, model): r""" Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output. """ # Check that inference pass works on the model encoded_input = self.tokenizer(self.input_text, return_tensors="pt") # Check the exactness of the results output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) # Get the generation self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def check_quantized_layers_type(self, model, value): self.assertTrue(model.transformer.h[0].mlp.dense_4h_to_h.QUANT_TYPE == value) def test_generate_quality(self): """ Simple test to check the quality of the model by comparing the generated tokens with the expected tokens """ if self.device_map is None: self.check_inference_correctness(self.quantized_model.to(0)) else: self.check_inference_correctness(self.quantized_model) def test_serialization(self): """ Test the serialization of the model and the loading of the quantized weights works """ with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) if not self.use_exllama: quantized_model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname).to(0) self.check_quantized_layers_type(quantized_model_from_saved, "cuda-old") else: # we need to put it directly to the gpu. Otherwise, we won't be able to initialize the exllama kernel quantized_model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map={"": 0}) self.check_quantized_layers_type(quantized_model_from_saved, "exllama") self.check_inference_correctness(quantized_model_from_saved) @require_accelerate def test_serialization_big_model_inference(self): """ Test the serialization of the model and the loading of the quantized weights with big model inference """ with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) quantized_model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map="auto") self.check_inference_correctness(quantized_model_from_saved) def test_change_loading_attributes(self): """ Test the serialization of the model and the loading of the quantized weights works with another config file """ with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) if not self.use_exllama: self.assertEqual(self.quantized_model.config.quantization_config.use_exllama, False) # we need to put it directly to the gpu. Otherwise, we won't be able to initialize the exllama kernel quantized_model_from_saved = AutoModelForCausalLM.from_pretrained( tmpdirname, quantization_config=GPTQConfig(use_exllama=True, bits=4), device_map={"": 0} ) self.assertEqual(quantized_model_from_saved.config.quantization_config.use_exllama, True) self.assertEqual(quantized_model_from_saved.config.quantization_config.bits, self.bits) self.check_quantized_layers_type(quantized_model_from_saved, "exllama") self.check_inference_correctness(quantized_model_from_saved) @require_accelerate @require_torch_multi_gpu class GPTQTestDeviceMap(GPTQTest): device_map = "auto" @require_accelerate @require_torch_multi_gpu class GPTQTestDeviceMapExllama(GPTQTest): device_map = "auto" use_exllama = True @slow @require_optimum @require_auto_gptq @require_torch_gpu @require_accelerate class GPTQTestActOrderExllama(unittest.TestCase): """ Test GPTQ model with exllama kernel and desc_act=True (also known as act-order). More information on those arguments here: https://huggingface.co/docs/transformers/main_classes/quantization#transformers.GPTQConfig """ EXPECTED_OUTPUTS = set() EXPECTED_OUTPUTS.add("Hello my name is Katie and I am a 20 year") model_name = "hf-internal-testing/Llama-2-7B-GPTQ" revision = "gptq-4bit-128g-actorder_True" input_text = "Hello my name is" @classmethod def setUpClass(cls): """ Setup quantized model """ cls.quantization_config = GPTQConfig(bits=4, max_input_length=4028) cls.quantized_model = AutoModelForCausalLM.from_pretrained( cls.model_name, revision=cls.revision, torch_dtype=torch.float16, device_map={"": 0}, quantization_config=cls.quantization_config, ) cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name, use_fast=True) def check_inference_correctness(self, model): """ Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output. """ # Check that inference pass works on the model encoded_input = self.tokenizer(self.input_text, return_tensors="pt") # Check the exactness of the results output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) # Get the generation self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_quantized_layers_type(self): self.assertTrue(self.quantized_model.model.layers[0].self_attn.k_proj.QUANT_TYPE == "exllama") def test_generate_quality(self): """ Simple test to check the quality of the model by comparing the generated tokens with the expected tokens """ self.check_inference_correctness(self.quantized_model) def test_max_input_length(self): """ Test if the max_input_length works. It modifies the maximum input length that of the model that runs with exllama backend. """ prompt = "I am in Paris and" * 1000 inp = self.tokenizer(prompt, return_tensors="pt").to(0) self.assertTrue(inp["input_ids"].shape[1] > 4028) with self.assertRaises(RuntimeError) as cm: self.quantized_model.generate(**inp, num_beams=1, min_new_tokens=3, max_new_tokens=3) self.assertTrue("temp_state buffer is too small" in str(cm.exception)) prompt = "I am in Paris and" * 500 inp = self.tokenizer(prompt, return_tensors="pt").to(0) self.assertTrue(inp["input_ids"].shape[1] < 4028) self.quantized_model.generate(**inp, num_beams=1, min_new_tokens=3, max_new_tokens=3) @slow @require_optimum @require_auto_gptq @require_torch_gpu @require_accelerate class GPTQTestExllamaV2(unittest.TestCase): """ Test GPTQ model with exllamav2 kernel and desc_act=True (also known as act-order). More information on those arguments here: https://huggingface.co/docs/transformers/main_classes/quantization#transformers.GPTQConfig """ EXPECTED_OUTPUTS = set() EXPECTED_OUTPUTS.add("Hello my name is Katie and I am a 20 year") model_name = "hf-internal-testing/Llama-2-7B-GPTQ" revision = "gptq-4bit-128g-actorder_True" input_text = "Hello my name is" @classmethod def setUpClass(cls): """ Setup quantized model """ cls.quantization_config = GPTQConfig(bits=4, exllama_config={"version": 2}) cls.quantized_model = AutoModelForCausalLM.from_pretrained( cls.model_name, revision=cls.revision, torch_dtype=torch.float16, device_map={"": 0}, quantization_config=cls.quantization_config, ) cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name, use_fast=True) def test_quantized_layers_type(self): self.assertTrue(self.quantized_model.model.layers[0].self_attn.k_proj.QUANT_TYPE == "exllamav2") def check_inference_correctness(self, model): """ Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output. """ # Check that inference pass works on the model encoded_input = self.tokenizer(self.input_text, return_tensors="pt") # Check the exactness of the results output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) # Get the generation self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_generate_quality(self): """ Simple test to check the quality of the model by comapring the the generated tokens with the expected tokens """ self.check_inference_correctness(self.quantized_model) # fail when run all together @pytest.mark.skip @require_accelerate @require_torch_multi_gpu class GPTQTestDeviceMapCPUOffload(GPTQTest): device_map = { "transformer.word_embeddings": 0, "transformer.word_embeddings_layernorm": 0, "lm_head": 0, "transformer.h.0": 0, "transformer.h.1": 0, "transformer.h.2": 0, "transformer.h.3": 0, "transformer.h.4": 0, "transformer.h.5": 0, "transformer.h.6": 0, "transformer.h.7": 0, "transformer.h.8": 0, "transformer.h.9": 0, "transformer.h.10": 1, "transformer.h.11": 1, "transformer.h.12": 1, "transformer.h.13": 1, "transformer.h.14": 1, "transformer.h.15": 1, "transformer.h.16": 1, "transformer.h.17": 0, "transformer.h.18": "cpu", "transformer.h.19": "cpu", "transformer.h.20": "cpu", "transformer.h.21": "cpu", "transformer.h.22": "cpu", "transformer.h.23": 1, "transformer.ln_f": 0, }
0
hf_public_repos/transformers/tests/quantization
hf_public_repos/transformers/tests/quantization/bnb/README.md
# Testing mixed int8 quantization ![HFxbitsandbytes.png](https://cdn-uploads.huggingface.co/production/uploads/1660567705337-62441d1d9fdefb55a0b7d12c.png) The following is the recipe on how to effectively debug `bitsandbytes` integration on Hugging Face `transformers`. ## Library requirements + `transformers>=4.22.0` + `accelerate>=0.12.0` + `bitsandbytes>=0.31.5`. ## Hardware requirements The following instructions are tested with 2 NVIDIA-Tesla T4 GPUs. To run successfully `bitsandbytes` you would need a 8-bit core tensor supported GPU. Note that Turing, Ampere or newer architectures - e.g. T4, RTX20s RTX30s, A40-A100, A6000 should be supported. ## Virutal envs ```bash conda create --name int8-testing python==3.8 pip install bitsandbytes>=0.31.5 pip install accelerate>=0.12.0 pip install transformers>=4.23.0 ``` if `transformers>=4.23.0` is not released yet, then use: ``` pip install git+https://github.com/huggingface/transformers.git ``` ## Troubleshooting A list of common errors: ### Torch does not correctly do the operations on GPU First check that: ```py import torch vec = torch.randn(1, 2, 3).to(0) ``` Works without any error. If not, install torch using `conda` like: ```bash conda create --name int8-testing python==3.8 conda install pytorch torchvision torchaudio cudatoolkit=11.6 -c pytorch -c conda-forge pip install bitsandbytes>=0.31.5 pip install accelerate>=0.12.0 pip install transformers>=4.23.0 ``` For the latest pytorch instructions please see [this](https://pytorch.org/get-started/locally/) and the snippet above should work. ### ` bitsandbytes operations are not supported under CPU!` This happens when some Linear weights are set to the CPU when using `accelerate`. Please check carefully `model.hf_device_map` and make sure that there is no `Linear` module that is assigned to CPU. It is fine to have the last module (usually the Lm_head) set on CPU. ### `To use the type as a Parameter, please correct the detach() semantics defined by __torch_dispatch__() implementation.` Use the latest version of `accelerate` with a command such as: `pip install -U accelerate` and the problem should be solved. ### `Parameter has no attribue .CB` Same solution as above. ### `RuntimeError: CUDA error: an illegal memory access was encountered ... consider passing CUDA_LAUNCH_BLOCKING=1` Run your script by pre-pending `CUDA_LAUNCH_BLOCKING=1` and you should observe an error as described in the next section. ### `CUDA illegal memory error: an illegal memory access at line...`: Check the CUDA verisons with: ``` nvcc --version ``` and confirm it is the same version as the one detected by `bitsandbytes`. If not, run: ``` ls -l $CONDA_PREFIX/lib/libcudart.so ``` or ``` ls -l $LD_LIBRARY_PATH ``` Check if `libcudart.so` has a correct symlink that is set. Sometimes `nvcc` detects the correct CUDA version but `bitsandbytes` doesn't. You have to make sure that the symlink that is set for the file `libcudart.so` is redirected to the correct CUDA file. Here is an example of a badly configured CUDA installation: `nvcc --version` gives: ![Screenshot 2022-08-15 at 15.12.23.png](https://cdn-uploads.huggingface.co/production/uploads/1660569220888-62441d1d9fdefb55a0b7d12c.png) which means that the detected CUDA version is 11.3 but `bitsandbytes` outputs: ![image.png](https://cdn-uploads.huggingface.co/production/uploads/1660569284243-62441d1d9fdefb55a0b7d12c.png) First check: ```bash echo $LD_LIBRARY_PATH ``` If this contains multiple paths separated by `:`. Then you have to make sure that the correct CUDA version is set. By doing: ```bash ls -l $path/libcudart.so ``` On each path (`$path`) separated by `:`. If not, simply run ```bash ls -l $LD_LIBRARY_PATH/libcudart.so ``` and you can see ![Screenshot 2022-08-15 at 15.12.33.png](https://cdn-uploads.huggingface.co/production/uploads/1660569176504-62441d1d9fdefb55a0b7d12c.png) If you see that the file is linked to the wrong CUDA version (here 10.2), find the correct location for `libcudart.so` (`find --name libcudart.so`) and replace the environment variable `LD_LIBRARY_PATH` with the one containing the correct `libcudart.so` file.
0
hf_public_repos/transformers/tests/quantization
hf_public_repos/transformers/tests/quantization/bnb/test_mixed_int8.py
# coding=utf-8 # Copyright 2022 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_accelerate_available, is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def get_some_linear_layer(model): if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_4h_to_h if is_accelerate_available(): from accelerate import PartialState from accelerate.logging import get_logger logger = get_logger(__name__) _ = PartialState() if is_torch_available(): import torch import torch.nn as nn class LoRALayer(nn.Module): """Wraps a linear layer with LoRA-like adapter - Used for testing purposes only""" def __init__(self, module: nn.Module, rank: int): super().__init__() self.module = module self.adapter = nn.Sequential( nn.Linear(module.in_features, rank, bias=False), nn.Linear(rank, module.out_features, bias=False), ) small_std = (2.0 / (5 * min(module.in_features, module.out_features))) ** 0.5 nn.init.normal_(self.adapter[0].weight, std=small_std) nn.init.zeros_(self.adapter[1].weight) self.adapter.to(module.weight.device) def forward(self, input, *args, **kwargs): return self.module(input, *args, **kwargs) + self.adapter(input) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class BaseMixedInt8Test(unittest.TestCase): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module model_name = "bigscience/bloom-1b7" # Constant values EXPECTED_RELATIVE_DIFFERENCE = ( 1.540025 # This was obtained on a Quadro RTX 8000 so the number might slightly change ) input_text = "Hello my name is" EXPECTED_OUTPUT = "Hello my name is John.\nI am a friend of the family.\n" MAX_NEW_TOKENS = 10 def setUp(self): # Models and tokenizer self.tokenizer = AutoTokenizer.from_pretrained(self.model_name) class MixedInt8Test(BaseMixedInt8Test): def setUp(self): super().setUp() # Models and tokenizer self.model_fp16 = AutoModelForCausalLM.from_pretrained( self.model_name, torch_dtype=torch.float16, device_map="auto" ) self.model_8bit = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ del self.model_fp16 del self.model_8bit gc.collect() torch.cuda.empty_cache() def test_get_keys_to_not_convert_trust_remote_code(self): r""" Test the `get_keys_to_not_convert` function with `trust_remote_code` models. """ from accelerate import init_empty_weights from transformers.integrations.bitsandbytes import get_keys_to_not_convert model_id = "mosaicml/mpt-7b" config = AutoConfig.from_pretrained( model_id, trust_remote_code=True, revision="ada218f9a93b5f1c6dce48a4cc9ff01fcba431e7" ) with init_empty_weights(): model = AutoModelForCausalLM.from_config( config, trust_remote_code=True, code_revision="ada218f9a93b5f1c6dce48a4cc9ff01fcba431e7" ) self.assertEqual(get_keys_to_not_convert(model), ["transformer.wte"]) def test_get_keys_to_not_convert(self): r""" Test the `get_keys_to_not_convert` function. """ from accelerate import init_empty_weights from transformers import AutoModelForMaskedLM, Blip2ForConditionalGeneration, MptForCausalLM, OPTForCausalLM from transformers.integrations.bitsandbytes import get_keys_to_not_convert model_id = "mosaicml/mpt-7b" config = AutoConfig.from_pretrained(model_id, revision="72e5f594ce36f9cabfa2a9fd8f58b491eb467ee7") with init_empty_weights(): model = MptForCausalLM(config) # The order of the keys does not matter, so we sort them before comparing, same for the other tests. self.assertEqual(get_keys_to_not_convert(model).sort(), ["lm_head", "transformer.wte"].sort()) model_id = "Salesforce/blip2-opt-2.7b" config = AutoConfig.from_pretrained(model_id, revision="1ef7f63a8f0a144c13fdca8103eb7b4691c74cec") with init_empty_weights(): model = Blip2ForConditionalGeneration(config) self.assertEqual( get_keys_to_not_convert(model).sort(), ["language_model.lm_head", "language_model.model.decoder.embed_tokens"].sort(), ) model_id = "facebook/opt-350m" config = AutoConfig.from_pretrained(model_id, revision="cb32f77e905cccbca1d970436fb0f5e6b58ee3c5") with init_empty_weights(): model = OPTForCausalLM(config) self.assertEqual(get_keys_to_not_convert(model).sort(), ["lm_head", "model.decoder.embed_tokens"].sort()) model_id = "roberta-large" config = AutoConfig.from_pretrained(model_id, revision="716877d372b884cad6d419d828bac6c85b3b18d9") with init_empty_weights(): model = AutoModelForMaskedLM.from_config(config) self.assertEqual( get_keys_to_not_convert(model).sort(), ["'roberta.embeddings.word_embeddings', 'lm_head', 'lm_head.decoder"].sort(), ) def test_quantization_config_json_serialization(self): r""" A simple test to check if the quantization config is correctly serialized and deserialized """ config = self.model_8bit.config self.assertTrue(hasattr(config, "quantization_config")) _ = config.to_dict() _ = config.to_diff_dict() _ = config.to_json_string() def test_original_dtype(self): r""" A simple test to check if the model succesfully stores the original dtype """ self.assertTrue(hasattr(self.model_8bit.config, "_pre_quantization_dtype")) self.assertFalse(hasattr(self.model_fp16.config, "_pre_quantization_dtype")) self.assertTrue(self.model_8bit.config._pre_quantization_dtype == torch.float16) def test_memory_footprint(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models """ from bitsandbytes.nn import Int8Params mem_fp16 = self.model_fp16.get_memory_footprint() mem_8bit = self.model_8bit.get_memory_footprint() self.assertAlmostEqual(mem_fp16 / mem_8bit, self.EXPECTED_RELATIVE_DIFFERENCE) self.assertTrue(get_some_linear_layer(self.model_8bit).weight.__class__ == Int8Params) def test_linear_are_8bit(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models """ from transformers import T5PreTrainedModel self.model_fp16.get_memory_footprint() self.model_8bit.get_memory_footprint() for name, module in self.model_8bit.named_modules(): if isinstance(module, torch.nn.Linear): if name not in ["lm_head"] + T5PreTrainedModel._keep_in_fp32_modules: self.assertTrue(module.weight.dtype == torch.int8) def test_llm_skip(self): r""" A simple test to check if `llm_int8_skip_modules` works as expected """ import bitsandbytes as bnb quantization_config = BitsAndBytesConfig(load_in_8bit=True, llm_int8_skip_modules=["classifier"]) seq_classification_model = AutoModelForSequenceClassification.from_pretrained( "roberta-large-mnli", quantization_config=quantization_config ) self.assertTrue(seq_classification_model.roberta.encoder.layer[0].output.dense.weight.dtype == torch.int8) self.assertTrue( isinstance(seq_classification_model.roberta.encoder.layer[0].output.dense, bnb.nn.Linear8bitLt) ) self.assertTrue(isinstance(seq_classification_model.classifier.dense, nn.Linear)) self.assertTrue(seq_classification_model.classifier.dense.weight.dtype != torch.int8) self.assertTrue(isinstance(seq_classification_model.classifier.out_proj, nn.Linear)) self.assertTrue(seq_classification_model.classifier.out_proj != torch.int8) def test_generate_quality(self): r""" Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output. """ encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = self.model_8bit.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertEqual(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) def test_generate_quality_config(self): r""" Test that loading the model with the config is equivalent """ bnb_config = BitsAndBytesConfig() bnb_config.load_in_8bit = True model_8bit_from_config = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=bnb_config, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_8bit_from_config.generate( input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10 ) self.assertEqual(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) def test_raise_if_config_and_load_in_8bit(self): r""" Test that loading the model with the config and `load_in_8bit` raises an error """ bnb_config = BitsAndBytesConfig() with self.assertRaises(ValueError): _ = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=bnb_config, load_in_8bit=True, device_map="auto", llm_int8_enable_fp32_cpu_offload=True, ) def test_device_and_dtype_assignment(self): r""" Test whether trying to cast (or assigning a device to) a model after converting it in 8-bit will throw an error. Checks also if other models are casted correctly. """ with self.assertRaises(ValueError): # Tries with `str` self.model_8bit.to("cpu") with self.assertRaises(ValueError): # Tries with a `dtype`` self.model_8bit.to(torch.float16) with self.assertRaises(ValueError): # Tries with a `device` self.model_8bit.to(torch.device("cuda:0")) with self.assertRaises(ValueError): # Tries with a `device` self.model_8bit.float() with self.assertRaises(ValueError): # Tries with a `device` self.model_8bit.half() # Test if we did not break anything encoded_input = self.tokenizer(self.input_text, return_tensors="pt") self.model_fp16 = self.model_fp16.to(torch.float32) _ = self.model_fp16.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) # Check this does not throw an error _ = self.model_fp16.to("cpu") # Check this does not throw an error _ = self.model_fp16.half() # Check this does not throw an error _ = self.model_fp16.float() def test_fp32_int8_conversion(self): r""" Test whether it is possible to mix both `int8` and `fp32` weights when using `keep_in_fp32_modules` correctly. """ model = AutoModelForSeq2SeqLM.from_pretrained("t5-small", load_in_8bit=True, device_map="auto") self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.float32) def test_int8_serialization(self): r""" Test whether it is possible to serialize a model in 8-bit. """ from bitsandbytes.nn import Int8Params with tempfile.TemporaryDirectory() as tmpdirname: self.model_8bit.save_pretrained(tmpdirname) # check that the file `quantization_config` is present config = AutoConfig.from_pretrained(tmpdirname) self.assertTrue(hasattr(config, "quantization_config")) model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname, load_in_8bit=True, device_map="auto") linear = get_some_linear_layer(model_from_saved) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) # generate encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_from_saved.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertEqual( self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUT ) def test_int8_serialization_regression(self): r""" Test whether it is possible to serialize a model in 8-bit - using not safetensors """ from bitsandbytes.nn import Int8Params with tempfile.TemporaryDirectory() as tmpdirname: self.model_8bit.save_pretrained(tmpdirname, safe_serialization=False) # check that the file `quantization_config` is present config = AutoConfig.from_pretrained(tmpdirname) self.assertTrue(hasattr(config, "quantization_config")) model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname, load_in_8bit=True, device_map="auto") linear = get_some_linear_layer(model_from_saved) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) # generate encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_from_saved.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertEqual( self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUT ) def test_int8_serialization_sharded(self): r""" Test whether it is possible to serialize a model in 8-bit - sharded version. """ from bitsandbytes.nn import Int8Params with tempfile.TemporaryDirectory() as tmpdirname: self.model_8bit.save_pretrained(tmpdirname, max_shard_size="200MB") # check that the file `quantization_config` is present config = AutoConfig.from_pretrained(tmpdirname) self.assertTrue(hasattr(config, "quantization_config")) model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname) linear = get_some_linear_layer(model_from_saved) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) # generate encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_from_saved.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertEqual( self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUT ) def test_int8_from_pretrained(self): r""" Test whether loading a 8bit model from the Hub works as expected """ from bitsandbytes.nn import Int8Params model_id = "ybelkada/bloom-1b7-8bit" model = AutoModelForCausalLM.from_pretrained(model_id) linear = get_some_linear_layer(model) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) # generate encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertEqual(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class MixedInt8T5Test(unittest.TestCase): @classmethod def setUpClass(cls): cls.model_name = "t5-small" cls.dense_act_model_name = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name) cls.input_text = "Translate in German: Hello, my dog is cute" def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ gc.collect() torch.cuda.empty_cache() def test_inference_without_keep_in_fp32(self): r""" Test whether it is possible to mix both `int8` and `fp32` weights when using `keep_in_fp32_modules` correctly. `flan-t5-small` uses `T5DenseGatedActDense` whereas `t5-small` uses `T5DenseReluDense`. We need to test both cases. """ from transformers import T5ForConditionalGeneration modules = T5ForConditionalGeneration._keep_in_fp32_modules T5ForConditionalGeneration._keep_in_fp32_modules = None # test with `t5-small` model = T5ForConditionalGeneration.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) # test with `flan-t5-small` model = T5ForConditionalGeneration.from_pretrained( self.dense_act_model_name, load_in_8bit=True, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) T5ForConditionalGeneration._keep_in_fp32_modules = modules def test_inference_with_keep_in_fp32(self): r""" Test whether it is possible to mix both `int8` and `fp32` weights when using `keep_in_fp32_modules` correctly. `flan-t5-small` uses `T5DenseGatedActDense` whereas `t5-small` uses `T5DenseReluDense`. We need to test both cases. """ import bitsandbytes as bnb from transformers import T5ForConditionalGeneration # test with `t5-small` model = T5ForConditionalGeneration.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q, bnb.nn.Linear8bitLt)) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) # test with `flan-t5-small` model = T5ForConditionalGeneration.from_pretrained( self.dense_act_model_name, load_in_8bit=True, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) def test_inference_with_keep_in_fp32_serialized(self): r""" Test whether it is possible to mix both `int8` and `fp32` weights when using `keep_in_fp32_modules` correctly on a serialized model. `flan-t5-small` uses `T5DenseGatedActDense` whereas `t5-small` uses `T5DenseReluDense`. We need to test both cases. """ import bitsandbytes as bnb from transformers import T5ForConditionalGeneration # test with `t5-small` model = T5ForConditionalGeneration.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) model = T5ForConditionalGeneration.from_pretrained(tmp_dir) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q, bnb.nn.Linear8bitLt)) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) # test with `flan-t5-small` model = T5ForConditionalGeneration.from_pretrained( self.dense_act_model_name, load_in_8bit=True, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) class MixedInt8ModelClassesTest(BaseMixedInt8Test): def setUp(self): super().setUp() # model_name self.model_name = "bigscience/bloom-560m" self.seq_to_seq_name = "t5-small" # Different types of model self.base_model = AutoModel.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") # Sequence classification model self.sequence_model = AutoModelForSequenceClassification.from_pretrained( self.model_name, load_in_8bit=True, device_map="auto" ) # CausalLM model self.model_8bit = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") # Seq2seq model self.seq_to_seq_model = AutoModelForSeq2SeqLM.from_pretrained( self.seq_to_seq_name, load_in_8bit=True, device_map="auto" ) def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ del self.base_model del self.sequence_model del self.model_8bit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def test_correct_head_class(self): r""" A simple test to check if the last modules for some classes (AutoModelForCausalLM or SequenceClassification) are kept in their native class. """ from bitsandbytes.nn import Int8Params # last param of a base model should be a linear8bit module self.assertTrue(self.base_model.h[-1].mlp.dense_4h_to_h.weight.__class__ == Int8Params) # Other heads should be nn.Parameter self.assertTrue(self.model_8bit.lm_head.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter) class MixedInt8TestPipeline(BaseMixedInt8Test): def setUp(self): super().setUp() def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ del self.pipe gc.collect() torch.cuda.empty_cache() def test_pipeline(self): r""" The aim of this test is to verify that the mixed int8 is compatible with `pipeline` from transformers. Since we used pipline for inference speed benchmarking we want to make sure that this feature does not break anything on pipline. """ # self._clear_cuda_cache() self.pipe = pipeline( "text-generation", model=self.model_name, model_kwargs={"device_map": "auto", "load_in_8bit": True}, max_new_tokens=self.MAX_NEW_TOKENS, ) # Real second forward pass pipeline_output = self.pipe(self.input_text) self.assertEqual(pipeline_output[0]["generated_text"], self.EXPECTED_OUTPUT) @require_torch_multi_gpu class MixedInt8TestMultiGpu(BaseMixedInt8Test): def setUp(self): super().setUp() def test_multi_gpu_loading(self): r""" This tests that the model has been loaded and can be used correctly on a multi-GPU setup. Let's just try to load a model on 2 GPUs and see if it works. The model we test has ~2GB of total, 3GB should suffice """ model_parallel = AutoModelForCausalLM.from_pretrained( self.model_name, load_in_8bit=True, device_map="balanced" ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values()), {0, 1}) # Check that inference pass works on the model encoded_input = self.tokenizer(self.input_text, return_tensors="pt") # Second real batch output_parallel = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertEqual(self.tokenizer.decode(output_parallel[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) @require_torch_multi_gpu class MixedInt8TestCpuGpu(BaseMixedInt8Test): def setUp(self): super().setUp() def check_inference_correctness(self, model): # Check that inference pass works on the model encoded_input = self.tokenizer(self.input_text, return_tensors="pt") # Check the exactness of the results output_parallel = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) # Get the generation output_text = self.tokenizer.decode(output_parallel[0], skip_special_tokens=True) self.assertEqual(output_text, self.EXPECTED_OUTPUT) def test_cpu_gpu_loading_random_device_map(self): r""" A test to check is dispatching a model on cpu & gpu works correctly using a random `device_map`. """ device_map = { "transformer.word_embeddings": 0, "transformer.word_embeddings_layernorm": 0, "lm_head": 0, "transformer.h.0": "cpu", "transformer.h.1": "cpu", "transformer.h.2": 0, "transformer.h.3": 0, "transformer.h.4": 0, "transformer.h.5": 0, "transformer.h.6": 0, "transformer.h.7": 0, "transformer.h.8": 0, "transformer.h.9": 1, "transformer.h.10": 0, "transformer.h.11": 1, "transformer.h.12": 0, "transformer.h.13": 0, "transformer.h.14": 1, "transformer.h.15": 0, "transformer.h.16": 0, "transformer.h.17": 1, "transformer.h.18": 1, "transformer.h.19": 0, "transformer.h.20": 1, "transformer.h.21": 1, "transformer.h.22": 0, "transformer.h.23": 0, "transformer.ln_f": 1, } bnb_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True) model_8bit = AutoModelForCausalLM.from_pretrained( self.model_name, device_map=device_map, quantization_config=bnb_config, ) # Check that the model has been correctly set on device 0, 1, and `cpu`. self.assertEqual(set(model_8bit.hf_device_map.values()), {0, 1, "cpu"}) self.check_inference_correctness(model_8bit) def test_cpu_gpu_loading_custom_device_map(self): r""" A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map`. This time the device map is more organized than the test above and uses the abstraction `transformer.h` to encapsulate all the decoder layers. """ device_map = { "transformer.word_embeddings": "cpu", "transformer.word_embeddings_layernorm": "cpu", "lm_head": "cpu", "transformer.h": 0, "transformer.ln_f": 1, } bnb_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True) # Load model model_8bit = AutoModelForCausalLM.from_pretrained( self.model_name, device_map=device_map, quantization_config=bnb_config, ) # Check that the model has been correctly set on device 0, 1, and `cpu`. self.assertEqual(set(model_8bit.hf_device_map.values()), {0, 1, "cpu"}) self.check_inference_correctness(model_8bit) def test_cpu_gpu_disk_loading_custom_device_map(self): r""" A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map`. This time we also add `disk` on the device_map. """ device_map = { "transformer.word_embeddings": 0, "transformer.word_embeddings_layernorm": "cpu", "lm_head": 0, "transformer.h": 1, "transformer.ln_f": "disk", } bnb_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True) with tempfile.TemporaryDirectory() as tmpdirname: # Load model model_8bit = AutoModelForCausalLM.from_pretrained( self.model_name, device_map=device_map, quantization_config=bnb_config, offload_folder=tmpdirname, ) # Check that the model has been correctly set on device 0, 1, and `cpu`. self.assertEqual(set(model_8bit.hf_device_map.values()), {0, 1, "cpu", "disk"}) self.check_inference_correctness(model_8bit) def test_cpu_gpu_disk_loading_custom_device_map_kwargs(self): r""" A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map`. This time we also add `disk` on the device_map - using the kwargs directly instead of the quantization config """ device_map = { "transformer.word_embeddings": 0, "transformer.word_embeddings_layernorm": "cpu", "lm_head": 0, "transformer.h": 1, "transformer.ln_f": "disk", } with tempfile.TemporaryDirectory() as tmpdirname: # Load model model_8bit = AutoModelForCausalLM.from_pretrained( self.model_name, device_map=device_map, load_in_8bit=True, llm_int8_enable_fp32_cpu_offload=True, offload_folder=tmpdirname, ) # Check that the model has been correctly set on device 0, 1, and `cpu`. self.assertEqual(set(model_8bit.hf_device_map.values()), {0, 1, "cpu", "disk"}) self.check_inference_correctness(model_8bit) class MixedInt8TestTraining(BaseMixedInt8Test): def setUp(self): self.model_name = "facebook/opt-350m" super().setUp() def test_training(self): if version.parse(importlib.metadata.version("bitsandbytes")) < version.parse("0.37.0"): return # Step 1: freeze all parameters model = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_8bit=True) self.assertEqual(set(model.hf_device_map.values()), {torch.cuda.current_device()}) for param in model.parameters(): param.requires_grad = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability param.data = param.data.to(torch.float32) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(module)): module.q_proj = LoRALayer(module.q_proj, rank=16) module.k_proj = LoRALayer(module.k_proj, rank=16) module.v_proj = LoRALayer(module.v_proj, rank=16) # Step 3: dummy batch batch = self.tokenizer("Test batch ", return_tensors="pt").to(0) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): out = model.forward(**batch) out.logits.norm().backward() for module in model.modules(): if isinstance(module, LoRALayer): self.assertTrue(module.adapter[1].weight.grad is not None) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0) elif isinstance(module, nn.Embedding): self.assertTrue(module.weight.grad is None) class MixedInt8GPT2Test(MixedInt8Test): model_name = "gpt2-xl" EXPECTED_RELATIVE_DIFFERENCE = 1.8720077507258357 EXPECTED_OUTPUT = "Hello my name is John Doe, and I'm a big fan of" def test_int8_from_pretrained(self): r""" Test whether loading a 8bit model from the Hub works as expected """ from bitsandbytes.nn import Int8Params model_id = "ybelkada/gpt2-xl-8bit" model = AutoModelForCausalLM.from_pretrained(model_id) linear = get_some_linear_layer(model) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) # generate encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertEqual(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
0
hf_public_repos/transformers/tests/quantization
hf_public_repos/transformers/tests/quantization/bnb/test_4bit.py
# coding=utf-8 # Copyright 2022 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def get_some_linear_layer(model): if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_4h_to_h if is_torch_available(): import torch import torch.nn as nn class LoRALayer(nn.Module): """Wraps a linear layer with LoRA-like adapter - Used for testing purposes only""" def __init__(self, module: nn.Module, rank: int): super().__init__() self.module = module self.adapter = nn.Sequential( nn.Linear(module.in_features, rank, bias=False), nn.Linear(rank, module.out_features, bias=False), ) small_std = (2.0 / (5 * min(module.in_features, module.out_features))) ** 0.5 nn.init.normal_(self.adapter[0].weight, std=small_std) nn.init.zeros_(self.adapter[1].weight) self.adapter.to(module.weight.device) def forward(self, input, *args, **kwargs): return self.module(input, *args, **kwargs) + self.adapter(input) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class Base4bitTest(unittest.TestCase): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module model_name = "bigscience/bloom-1b7" # Constant values EXPECTED_RELATIVE_DIFFERENCE = ( 2.109659552692574 # This was obtained on a RTX Titan so the number might slightly change ) input_text = "Hello my name is" EXPECTED_OUTPUTS = set() EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I") EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n") EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University") MAX_NEW_TOKENS = 10 def setUp(self): # Models and tokenizer self.tokenizer = AutoTokenizer.from_pretrained(self.model_name) class Bnb4BitTest(Base4bitTest): def setUp(self): super().setUp() # Models and tokenizer self.model_fp16 = AutoModelForCausalLM.from_pretrained( self.model_name, torch_dtype=torch.float16, device_map="auto" ) self.model_4bit = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_4bit=True, device_map="auto") def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ del self.model_fp16 del self.model_4bit gc.collect() torch.cuda.empty_cache() def test_quantization_num_parameters(self): r""" Test if the number of returned parameters is correct See: https://github.com/huggingface/transformers/issues/25978 """ num_params_4bit = self.model_4bit.num_parameters() num_params_fp16 = self.model_fp16.num_parameters() self.assertEqual(num_params_4bit, num_params_fp16) def test_quantization_config_json_serialization(self): r""" A simple test to check if the quantization config is correctly serialized and deserialized """ config = self.model_4bit.config self.assertTrue(hasattr(config, "quantization_config")) _ = config.to_dict() _ = config.to_diff_dict() _ = config.to_json_string() def test_memory_footprint(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models """ from bitsandbytes.nn import Params4bit mem_fp16 = self.model_fp16.get_memory_footprint() mem_4bit = self.model_4bit.get_memory_footprint() self.assertAlmostEqual(mem_fp16 / mem_4bit, self.EXPECTED_RELATIVE_DIFFERENCE) linear = get_some_linear_layer(self.model_4bit) self.assertTrue(linear.weight.__class__ == Params4bit) def test_original_dtype(self): r""" A simple test to check if the model succesfully stores the original dtype """ self.assertTrue(hasattr(self.model_4bit.config, "_pre_quantization_dtype")) self.assertFalse(hasattr(self.model_fp16.config, "_pre_quantization_dtype")) self.assertTrue(self.model_4bit.config._pre_quantization_dtype == torch.float16) def test_linear_are_4bit(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models """ from transformers import T5PreTrainedModel self.model_fp16.get_memory_footprint() self.model_4bit.get_memory_footprint() for name, module in self.model_4bit.named_modules(): if isinstance(module, torch.nn.Linear): if name not in ["lm_head"] + T5PreTrainedModel._keep_in_fp32_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uint8) def test_rwkv_4bit(self): r""" A simple test to check if 4-bit RWKV inference works as expected. """ model_id = "RWKV/rwkv-4-169m-pile" quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_use_double_quant=True) model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=quantization_config) tok = AutoTokenizer.from_pretrained(model_id) text = "Hello my name is" input_ids = tok.encode(text, return_tensors="pt").to(0) _ = model.generate(input_ids, max_new_tokens=30) def test_generate_quality(self): r""" Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output. """ encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = self.model_4bit.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_generate_quality_config(self): r""" Test that loading the model with the config is equivalent """ bnb_config = BitsAndBytesConfig() bnb_config.load_in_4bit = True model_4bit_from_config = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=bnb_config, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_4bit_from_config.generate( input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_raise_on_save_pretrained(self): r""" Test whether trying to save a model after converting it in 8-bit will throw a warning. """ with self.assertRaises(NotImplementedError), tempfile.TemporaryDirectory() as tmpdirname: self.model_4bit.save_pretrained(tmpdirname) def test_raise_if_config_and_load_in_4bit(self): r""" Test that loading the model with the config and `load_in_4bit` raises an error """ bnb_config = BitsAndBytesConfig() with self.assertRaises(ValueError): _ = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=bnb_config, load_in_4bit=True, device_map="auto", bnb_4bit_quant_type="nf4", ) def test_device_and_dtype_assignment(self): r""" Test whether trying to cast (or assigning a device to) a model after converting it in 8-bit will throw an error. Checks also if other models are casted correctly. """ with self.assertRaises(ValueError): # Tries with `str` self.model_4bit.to("cpu") with self.assertRaises(ValueError): # Tries with a `dtype`` self.model_4bit.to(torch.float16) with self.assertRaises(ValueError): # Tries with a `device` self.model_4bit.to(torch.device("cuda:0")) with self.assertRaises(ValueError): # Tries with a `device` self.model_4bit.float() with self.assertRaises(ValueError): # Tries with a `device` self.model_4bit.half() # Test if we did not break anything encoded_input = self.tokenizer(self.input_text, return_tensors="pt") self.model_fp16 = self.model_fp16.to(torch.float32) _ = self.model_fp16.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) # Check this does not throw an error _ = self.model_fp16.to("cpu") # Check this does not throw an error _ = self.model_fp16.half() # Check this does not throw an error _ = self.model_fp16.float() def test_fp32_4bit_conversion(self): r""" Test whether it is possible to mix both `4bit` and `fp32` weights when using `keep_in_fp32_modules` correctly. """ model = AutoModelForSeq2SeqLM.from_pretrained("t5-small", load_in_4bit=True, device_map="auto") self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.float32) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class Bnb4BitT5Test(unittest.TestCase): @classmethod def setUpClass(cls): cls.model_name = "t5-small" cls.dense_act_model_name = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name) cls.input_text = "Translate in German: Hello, my dog is cute" def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ gc.collect() torch.cuda.empty_cache() def test_inference_without_keep_in_fp32(self): r""" Test whether it is possible to mix both `4bit` and `fp32` weights when using `keep_in_fp32_modules` correctly. `flan-t5-small` uses `T5DenseGatedActDense` whereas `t5-small` uses `T5DenseReluDense`. We need to test both cases. """ from transformers import T5ForConditionalGeneration modules = T5ForConditionalGeneration._keep_in_fp32_modules T5ForConditionalGeneration._keep_in_fp32_modules = None # test with `t5-small` model = T5ForConditionalGeneration.from_pretrained(self.model_name, load_in_4bit=True, device_map="auto") encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) # test with `flan-t5-small` model = T5ForConditionalGeneration.from_pretrained( self.dense_act_model_name, load_in_4bit=True, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) T5ForConditionalGeneration._keep_in_fp32_modules = modules def test_inference_with_keep_in_fp32(self): r""" Test whether it is possible to mix both `4bit` and `fp32` weights when using `keep_in_fp32_modules` correctly. `flan-t5-small` uses `T5DenseGatedActDense` whereas `t5-small` uses `T5DenseReluDense`. We need to test both cases. """ import bitsandbytes as bnb from transformers import T5ForConditionalGeneration # test with `t5-small` model = T5ForConditionalGeneration.from_pretrained(self.model_name, load_in_4bit=True, device_map="auto") # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q, bnb.nn.Linear4bit)) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) # test with `flan-t5-small` model = T5ForConditionalGeneration.from_pretrained( self.dense_act_model_name, load_in_4bit=True, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) class Classes4BitModelTest(Base4bitTest): def setUp(self): super().setUp() # model_name self.model_name = "bigscience/bloom-560m" self.seq_to_seq_name = "t5-small" # Different types of model self.base_model = AutoModel.from_pretrained(self.model_name, load_in_4bit=True, device_map="auto") # Sequence classification model self.sequence_model = AutoModelForSequenceClassification.from_pretrained( self.model_name, load_in_4bit=True, device_map="auto" ) # CausalLM model self.model_4bit = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_4bit=True, device_map="auto") # Seq2seq model self.seq_to_seq_model = AutoModelForSeq2SeqLM.from_pretrained( self.seq_to_seq_name, load_in_4bit=True, device_map="auto" ) def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ del self.base_model del self.sequence_model del self.model_4bit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def test_correct_head_class(self): r""" A simple test to check if the last modules for some classes (AutoModelForCausalLM or SequenceClassification) are kept in their native class. """ from bitsandbytes.nn import Params4bit self.assertTrue(self.base_model.h[-1].mlp.dense_4h_to_h.weight.__class__ == Params4bit) # Other heads should be nn.Parameter self.assertTrue(self.model_4bit.lm_head.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter) class Pipeline4BitTest(Base4bitTest): def setUp(self): super().setUp() def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ del self.pipe gc.collect() torch.cuda.empty_cache() def test_pipeline(self): r""" The aim of this test is to verify that the mixed 4bit is compatible with `pipeline` from transformers. Since we used pipline for inference speed benchmarking we want to make sure that this feature does not break anything on pipline. """ # self._clear_cuda_cache() self.pipe = pipeline( "text-generation", model=self.model_name, model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.float16}, max_new_tokens=self.MAX_NEW_TOKENS, ) # Real second forward pass pipeline_output = self.pipe(self.input_text) self.assertIn(pipeline_output[0]["generated_text"], self.EXPECTED_OUTPUTS) @require_torch_multi_gpu class Bnb4bitTestMultiGpu(Base4bitTest): def setUp(self): super().setUp() def test_multi_gpu_loading(self): r""" This tests that the model has been loaded and can be used correctly on a multi-GPU setup. Let's just try to load a model on 2 GPUs and see if it works. The model we test has ~2GB of total, 3GB should suffice """ model_parallel = AutoModelForCausalLM.from_pretrained( self.model_name, load_in_4bit=True, device_map="balanced" ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values()), {0, 1}) # Check that inference pass works on the model encoded_input = self.tokenizer(self.input_text, return_tensors="pt") # Second real batch output_parallel = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_parallel[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) class Bnb4BitTestTraining(Base4bitTest): def setUp(self): self.model_name = "facebook/opt-350m" super().setUp() def test_training(self): if version.parse(importlib.metadata.version("bitsandbytes")) < version.parse("0.37.0"): return # Step 1: freeze all parameters model = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_4bit=True) self.assertEqual(set(model.hf_device_map.values()), {torch.cuda.current_device()}) for param in model.parameters(): param.requires_grad = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability param.data = param.data.to(torch.float32) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(module)): module.q_proj = LoRALayer(module.q_proj, rank=16) module.k_proj = LoRALayer(module.k_proj, rank=16) module.v_proj = LoRALayer(module.v_proj, rank=16) # Step 3: dummy batch batch = self.tokenizer("Test batch ", return_tensors="pt").to(0) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): out = model.forward(**batch) out.logits.norm().backward() for module in model.modules(): if isinstance(module, LoRALayer): self.assertTrue(module.adapter[1].weight.grad is not None) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0) elif isinstance(module, nn.Embedding): self.assertTrue(module.weight.grad is None) class Bnb4BitGPT2Test(Bnb4BitTest): model_name = "gpt2-xl" EXPECTED_RELATIVE_DIFFERENCE = 3.3191854854152187
0
hf_public_repos/transformers/tests/quantization
hf_public_repos/transformers/tests/quantization/autoawq/test_awq.py
# coding=utf-8 # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, AwqConfig, OPTForCausalLM from transformers.testing_utils import ( require_accelerate, require_auto_awq, require_torch_gpu, require_torch_multi_gpu, slow, torch_device, ) from transformers.utils import is_accelerate_available, is_torch_available if is_torch_available(): import torch if is_accelerate_available(): from accelerate import init_empty_weights @require_torch_gpu class AwqConfigTest(unittest.TestCase): def test_wrong_backend(self): """ Simple test that checks if a user passes a wrong backend an error is raised """ # This should work fine _ = AwqConfig(bits=4) with self.assertRaises(ValueError): AwqConfig(bits=4, backend="") # These should work fine _ = AwqConfig(bits=4, version="GEMM") _ = AwqConfig(bits=4, version="gemm") with self.assertRaises(ValueError): AwqConfig(bits=4, backend="unexisting-backend") # LLMAWQ does not work on a T4 with self.assertRaises(ValueError): AwqConfig(bits=4, backend="llm-awq") def test_to_dict(self): """ Simple test that checks if one uses a config and converts it to a dict, the dict is the same as the config object """ quantization_config = AwqConfig(bits=4) config_to_dict = quantization_config.to_dict() for key in config_to_dict: self.assertEqual(getattr(quantization_config, key), config_to_dict[key]) def test_from_dict(self): """ Simple test that checks if one uses a dict and converts it to a config object, the config object is the same as the dict """ dict = {"bits": 2, "zero_point": False, "backend": "autoawq"} quantization_config = AwqConfig.from_dict(dict) self.assertEqual(dict["bits"], quantization_config.bits) self.assertEqual(dict["zero_point"], quantization_config.zero_point) self.assertEqual(dict["backend"], quantization_config.backend) @slow @require_torch_gpu @require_auto_awq @require_accelerate class AwqTest(unittest.TestCase): model_name = "TheBloke/Mistral-7B-v0.1-AWQ" dummy_transformers_model_name = "bigscience/bloom-560m" input_text = "Hello my name is" EXPECTED_OUTPUT = "Hello my name is Katie and I am a 20 year old student at the University of North Carolina at Chapel Hill. I am a junior and I am majoring in Journalism and minoring in Spanish" EXPECTED_OUTPUT_BF16 = "Hello my name is Katie and I am a 20 year old student at the University of North Carolina at Chapel Hill. I am a junior and I am majoring in Exercise and Sport Science with a" device_map = "cuda" # called only once for all test in this class @classmethod def setUpClass(cls): """ Setup quantized model """ cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name) cls.quantized_model = AutoModelForCausalLM.from_pretrained( cls.model_name, device_map=cls.device_map, ) def test_quantized_model_conversion(self): """ Simple test that checks if the quantized model has been converted properly """ from awq.modules.linear import WQLinear_GEMM, WQLinear_GEMV from transformers.integrations.awq import replace_with_awq_linear model_id = "facebook/opt-350m" config = AutoConfig.from_pretrained(model_id, revision="cb32f77e905cccbca1d970436fb0f5e6b58ee3c5") quantization_config = AwqConfig(bits=4) with init_empty_weights(): model = OPTForCausalLM(config) nb_linears = 0 for module in model.modules(): if isinstance(module, torch.nn.Linear): nb_linears += 1 model, _ = replace_with_awq_linear(model, quantization_config=quantization_config) nb_awq_linear = 0 for module in model.modules(): if isinstance(module, (WQLinear_GEMM, WQLinear_GEMV)): nb_awq_linear += 1 self.assertEqual(nb_linears, nb_awq_linear) # Try with `modules_not_to_convert` with init_empty_weights(): model = OPTForCausalLM(config) model, _ = replace_with_awq_linear( model, quantization_config=quantization_config, modules_to_not_convert=["lm_head"] ) nb_awq_linear = 0 for module in model.modules(): if isinstance(module, (WQLinear_GEMM, WQLinear_GEMV)): nb_awq_linear += 1 self.assertEqual(nb_linears - 1, nb_awq_linear) def test_quantized_model(self): """ Simple test that checks if the quantized model is working properly """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = self.quantized_model.generate(**input_ids, max_new_tokens=40) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) def test_quantized_model_bf16(self): """ Simple test that checks if the quantized model is working properly with bf16 """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) quantized_model = AutoModelForCausalLM.from_pretrained(self.model_name, torch_dtype=torch.bfloat16).to( torch_device ) output = quantized_model.generate(**input_ids, max_new_tokens=40) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT_BF16) def test_quantized_model_no_device_map(self): """ Simple test that checks if the quantized model is working properly """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) quantized_model = AutoModelForCausalLM.from_pretrained(self.model_name).to(torch_device) output = quantized_model.generate(**input_ids, max_new_tokens=40) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) def test_save_pretrained(self): """ Simple test that checks if the quantized model is working properly after being saved and loaded """ with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=self.device_map) input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = model.generate(**input_ids, max_new_tokens=40) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) def test_raise_quantization(self): """ Simple test that checks if one passes a quantization config to quantize a model, it raises an error """ quantization_config = AwqConfig(bits=4) with self.assertRaises(ValueError) as context: _ = AutoModelForCausalLM.from_pretrained( self.dummy_transformers_model_name, quantization_config=quantization_config ) self.assertEqual( str(context.exception), "You cannot pass an `AwqConfig` when loading a model as you can only use AWQ models for inference. To quantize transformers models with AWQ algorithm, please refer to our quantization docs: https://huggingface.co/docs/transformers/main_classes/quantization ", ) @require_torch_multi_gpu def test_quantized_model_multi_gpu(self): """ Simple test that checks if the quantized model is working properly with multiple GPUs """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) quantized_model = AutoModelForCausalLM.from_pretrained(self.model_name, device_map="auto") self.assertTrue(set(quantized_model.hf_device_map.values()) == {0, 1, 2, 3}) output = quantized_model.generate(**input_ids, max_new_tokens=40) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/tokenization/test_tokenization_fast.py
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import concurrent.futures import json import os import shutil import tempfile import unittest from transformers import AutoTokenizer, PreTrainedTokenizerFast from transformers.testing_utils import require_tokenizers from ..test_tokenization_common import TokenizerTesterMixin @require_tokenizers class PreTrainedTokenizationFastTest(TokenizerTesterMixin, unittest.TestCase): rust_tokenizer_class = PreTrainedTokenizerFast test_slow_tokenizer = False test_rust_tokenizer = True from_pretrained_vocab_key = "tokenizer_file" def setUp(self): self.test_rust_tokenizer = False # because we don't have pretrained_vocab_files_map super().setUp() self.test_rust_tokenizer = True model_paths = ["robot-test/dummy-tokenizer-fast", "robot-test/dummy-tokenizer-wordlevel"] self.bytelevel_bpe_model_name = "SaulLu/dummy-tokenizer-bytelevel-bpe" # Inclusion of 2 tokenizers to test different types of models (Unigram and WordLevel for the moment) self.tokenizers_list = [(PreTrainedTokenizerFast, model_path, {}) for model_path in model_paths] tokenizer = PreTrainedTokenizerFast.from_pretrained(model_paths[0]) tokenizer.save_pretrained(self.tmpdirname) def test_tokenizer_mismatch_warning(self): # We disable this test for PreTrainedTokenizerFast because it is the only tokenizer that is not linked to any # model pass @unittest.skip( "We disable this test for PreTrainedTokenizerFast because it is the only tokenizer that is not linked to any model" ) def test_encode_decode_with_spaces(self): pass @unittest.skip( "We disable this test for PreTrainedTokenizerFast because it is the only tokenizer that is not linked to any model" ) def test_added_tokens_serialization(self): pass @unittest.skip( "We disable this test for PreTrainedTokenizerFast because it is the only tokenizer that is not linked to any model" ) def test_additional_special_tokens_serialization(self): pass def test_pretrained_model_lists(self): # We disable this test for PreTrainedTokenizerFast because it is the only tokenizer that is not linked to any # model pass def test_prepare_for_model(self): # We disable this test for PreTrainedTokenizerFast because it is the only tokenizer that is not linked to any # model pass def test_rust_tokenizer_signature(self): # PreTrainedTokenizerFast doesn't have tokenizer_file in its signature pass def test_training_new_tokenizer(self): tmpdirname_orig = self.tmpdirname # Here we want to test the 2 available tokenizers that use 2 different types of models: Unigram and WordLevel. for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): try: self.tmpdirname = tempfile.mkdtemp() tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer.save_pretrained(self.tmpdirname) super().test_training_new_tokenizer() finally: # Even if the test fails, we must be sure that the folder is deleted and that the default tokenizer # is restored shutil.rmtree(self.tmpdirname) self.tmpdirname = tmpdirname_orig def test_training_new_tokenizer_with_special_tokens_change(self): tmpdirname_orig = self.tmpdirname # Here we want to test the 2 available tokenizers that use 2 different types of models: Unigram and WordLevel. for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): try: self.tmpdirname = tempfile.mkdtemp() tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer.save_pretrained(self.tmpdirname) super().test_training_new_tokenizer_with_special_tokens_change() finally: # Even if the test fails, we must be sure that the folder is deleted and that the default tokenizer # is restored shutil.rmtree(self.tmpdirname) self.tmpdirname = tmpdirname_orig def test_training_new_tokenizer_with_bytelevel(self): tokenizer = self.rust_tokenizer_class.from_pretrained(self.bytelevel_bpe_model_name) toy_text_iterator = ("a" for _ in range(1000)) new_tokenizer = tokenizer.train_new_from_iterator(text_iterator=toy_text_iterator, length=1000, vocab_size=50) encoding_ids = new_tokenizer.encode("a🤗") self.assertEqual(encoding_ids, [64, 172, 253, 97, 245]) def test_init_from_tokenizers_model(self): from tokenizers import Tokenizer sentences = ["Hello, y'all!", "How are you 😁 ? There should not be any issue right?"] tokenizer = Tokenizer.from_pretrained("t5-base") # Enable padding tokenizer.enable_padding(pad_id=0, pad_token="<pad>", length=512, pad_to_multiple_of=8) self.assertEqual( tokenizer.padding, { "length": 512, "pad_to_multiple_of": 8, "pad_id": 0, "pad_token": "<pad>", "pad_type_id": 0, "direction": "right", }, ) fast_tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer) tmpdirname = tempfile.mkdtemp() fast_tokenizer.save_pretrained(tmpdirname) fast_from_saved = PreTrainedTokenizerFast.from_pretrained(tmpdirname) for tok in [fast_tokenizer, fast_from_saved]: self.assertEqual(tok.pad_token_id, 0) self.assertEqual(tok.padding_side, "right") self.assertEqual(tok.pad_token, "<pad>") self.assertEqual(tok.init_kwargs["max_length"], 512) self.assertEqual(tok.init_kwargs["pad_to_multiple_of"], 8) self.assertEqual(tok(sentences, padding = True), {'input_ids': [[8774, 6, 3, 63, 31, 1748, 55, 1, 0, 0, 0, 0,0, 0, 0, 0],[ 571, 33, 25, 3, 2, 3, 58, 290, 225, 59, 36, 136, 962, 269, 58, 1]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]}) # fmt: skip tokenizer.enable_truncation(8, stride=0, strategy="longest_first", direction="right") self.assertEqual( tokenizer.truncation, {"max_length": 8, "stride": 0, "strategy": "longest_first", "direction": "right"} ) fast_tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer) tmpdirname = tempfile.mkdtemp() fast_tokenizer.save_pretrained(tmpdirname) fast_from_saved = PreTrainedTokenizerFast.from_pretrained(tmpdirname) for tok in [fast_tokenizer, fast_from_saved]: self.assertEqual(tok.truncation_side, "right") self.assertEqual(tok.init_kwargs["truncation_strategy"], "longest_first") self.assertEqual(tok.init_kwargs["max_length"], 8) self.assertEqual(tok.init_kwargs["stride"], 0) # NOTE even if the model has a default max_length, it is not used... # thus tok(sentences, truncation = True) does nothing and does not warn either self.assertEqual(tok(sentences, truncation = True, max_length = 8), {'input_ids': [[8774, 6, 3, 63, 31, 1748, 55, 1],[ 571, 33, 25, 3, 2, 3, 58, 1]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1]]}) # fmt: skip @require_tokenizers class TokenizerVersioningTest(unittest.TestCase): def test_local_versioning(self): tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") json_tokenizer = json.loads(tokenizer._tokenizer.to_str()) json_tokenizer["model"]["vocab"]["huggingface"] = len(tokenizer) with tempfile.TemporaryDirectory() as tmp_dir: # Hack to save this in the tokenizer_config.json tokenizer.init_kwargs["fast_tokenizer_files"] = ["tokenizer.4.0.0.json"] tokenizer.save_pretrained(tmp_dir) json.dump(json_tokenizer, open(os.path.join(tmp_dir, "tokenizer.4.0.0.json"), "w")) # This should pick the new tokenizer file as the version of Transformers is > 4.0.0 new_tokenizer = AutoTokenizer.from_pretrained(tmp_dir) self.assertEqual(len(new_tokenizer), len(tokenizer) + 1) json_tokenizer = json.loads(new_tokenizer._tokenizer.to_str()) self.assertIn("huggingface", json_tokenizer["model"]["vocab"]) # Will need to be adjusted if we reach v42 and this test is still here. # Should pick the old tokenizer file as the version of Transformers is < 4.0.0 shutil.move(os.path.join(tmp_dir, "tokenizer.4.0.0.json"), os.path.join(tmp_dir, "tokenizer.42.0.0.json")) tokenizer.init_kwargs["fast_tokenizer_files"] = ["tokenizer.42.0.0.json"] tokenizer.save_pretrained(tmp_dir) new_tokenizer = AutoTokenizer.from_pretrained(tmp_dir) self.assertEqual(len(new_tokenizer), len(tokenizer)) json_tokenizer = json.loads(new_tokenizer._tokenizer.to_str()) self.assertNotIn("huggingface", json_tokenizer["model"]["vocab"]) def test_repo_versioning(self): # This repo has two tokenizer files, one for v4.0.0 and above with an added token, one for versions lower. repo = "hf-internal-testing/test-two-tokenizers" # This should pick the new tokenizer file as the version of Transformers is > 4.0.0 tokenizer = AutoTokenizer.from_pretrained(repo) self.assertEqual(len(tokenizer), 28997) json_tokenizer = json.loads(tokenizer._tokenizer.to_str()) self.assertIn("huggingface", json_tokenizer["model"]["vocab"]) # Testing an older version by monkey-patching the version in the module it's used. import transformers as old_transformers old_transformers.tokenization_utils_base.__version__ = "3.0.0" old_tokenizer = old_transformers.models.auto.AutoTokenizer.from_pretrained(repo) self.assertEqual(len(old_tokenizer), 28996) json_tokenizer = json.loads(old_tokenizer._tokenizer.to_str()) self.assertNotIn("huggingface", json_tokenizer["model"]["vocab"]) @require_tokenizers class ReduceMutableBorrowTests(unittest.TestCase): def test_async_share_tokenizer(self): # See https://github.com/huggingface/transformers/pull/12550 # and https://github.com/huggingface/tokenizers/issues/537 tokenizer = PreTrainedTokenizerFast.from_pretrained("robot-test/dummy-tokenizer-wordlevel") text = "The Matrix is a 1999 science fiction action film." with concurrent.futures.ThreadPoolExecutor() as executor: futures = [executor.submit(self.fetch, tokenizer, text) for i in range(10)] return_value = [future.result() for future in futures] self.assertEqual(return_value, [[1, 10, 0, 8, 0, 18, 0, 0, 0, 2] for i in range(10)]) def fetch(self, tokenizer, text): return tokenizer.encode(text, truncation="longest_first", padding="longest")
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/tokenization/test_tokenization_utils.py
# coding=utf-8 # Copyright 2018 HuggingFace Inc.. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ isort:skip_file """ import os import pickle import tempfile import unittest from typing import Callable, Optional import numpy as np from transformers import ( BatchEncoding, BertTokenizer, BertTokenizerFast, PreTrainedTokenizer, PreTrainedTokenizerFast, TensorType, TokenSpan, is_tokenizers_available, ) from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer from transformers.testing_utils import CaptureStderr, require_flax, require_tf, require_tokenizers, require_torch, slow if is_tokenizers_available(): from tokenizers import Tokenizer from tokenizers.models import WordPiece class TokenizerUtilsTest(unittest.TestCase): def check_tokenizer_from_pretrained(self, tokenizer_class): s3_models = list(tokenizer_class.max_model_input_sizes.keys()) for model_name in s3_models[:1]: tokenizer = tokenizer_class.from_pretrained(model_name) self.assertIsNotNone(tokenizer) self.assertIsInstance(tokenizer, tokenizer_class) self.assertIsInstance(tokenizer, PreTrainedTokenizer) for special_tok in tokenizer.all_special_tokens: self.assertIsInstance(special_tok, str) special_tok_id = tokenizer.convert_tokens_to_ids(special_tok) self.assertIsInstance(special_tok_id, int) def assert_dump_and_restore(self, be_original: BatchEncoding, equal_op: Optional[Callable] = None): batch_encoding_str = pickle.dumps(be_original) self.assertIsNotNone(batch_encoding_str) be_restored = pickle.loads(batch_encoding_str) # Ensure is_fast is correctly restored self.assertEqual(be_restored.is_fast, be_original.is_fast) # Ensure encodings are potentially correctly restored if be_original.is_fast: self.assertIsNotNone(be_restored.encodings) else: self.assertIsNone(be_restored.encodings) # Ensure the keys are the same for original_v, restored_v in zip(be_original.values(), be_restored.values()): if equal_op: self.assertTrue(equal_op(restored_v, original_v)) else: self.assertEqual(restored_v, original_v) @slow def test_pretrained_tokenizers(self): self.check_tokenizer_from_pretrained(GPT2Tokenizer) def test_tensor_type_from_str(self): self.assertEqual(TensorType("tf"), TensorType.TENSORFLOW) self.assertEqual(TensorType("pt"), TensorType.PYTORCH) self.assertEqual(TensorType("np"), TensorType.NUMPY) @require_tokenizers def test_batch_encoding_pickle(self): import numpy as np tokenizer_p = BertTokenizer.from_pretrained("bert-base-cased") tokenizer_r = BertTokenizerFast.from_pretrained("bert-base-cased") # Python no tensor with self.subTest("BatchEncoding (Python, return_tensors=None)"): self.assert_dump_and_restore(tokenizer_p("Small example to encode")) with self.subTest("BatchEncoding (Python, return_tensors=NUMPY)"): self.assert_dump_and_restore( tokenizer_p("Small example to encode", return_tensors=TensorType.NUMPY), np.array_equal ) with self.subTest("BatchEncoding (Rust, return_tensors=None)"): self.assert_dump_and_restore(tokenizer_r("Small example to encode")) with self.subTest("BatchEncoding (Rust, return_tensors=NUMPY)"): self.assert_dump_and_restore( tokenizer_r("Small example to encode", return_tensors=TensorType.NUMPY), np.array_equal ) @require_tf @require_tokenizers def test_batch_encoding_pickle_tf(self): import tensorflow as tf def tf_array_equals(t1, t2): return tf.reduce_all(tf.equal(t1, t2)) tokenizer_p = BertTokenizer.from_pretrained("bert-base-cased") tokenizer_r = BertTokenizerFast.from_pretrained("bert-base-cased") with self.subTest("BatchEncoding (Python, return_tensors=TENSORFLOW)"): self.assert_dump_and_restore( tokenizer_p("Small example to encode", return_tensors=TensorType.TENSORFLOW), tf_array_equals ) with self.subTest("BatchEncoding (Rust, return_tensors=TENSORFLOW)"): self.assert_dump_and_restore( tokenizer_r("Small example to encode", return_tensors=TensorType.TENSORFLOW), tf_array_equals ) @require_torch @require_tokenizers def test_batch_encoding_pickle_pt(self): import torch tokenizer_p = BertTokenizer.from_pretrained("bert-base-cased") tokenizer_r = BertTokenizerFast.from_pretrained("bert-base-cased") with self.subTest("BatchEncoding (Python, return_tensors=PYTORCH)"): self.assert_dump_and_restore( tokenizer_p("Small example to encode", return_tensors=TensorType.PYTORCH), torch.equal ) with self.subTest("BatchEncoding (Rust, return_tensors=PYTORCH)"): self.assert_dump_and_restore( tokenizer_r("Small example to encode", return_tensors=TensorType.PYTORCH), torch.equal ) @require_tokenizers def test_batch_encoding_is_fast(self): tokenizer_p = BertTokenizer.from_pretrained("bert-base-cased") tokenizer_r = BertTokenizerFast.from_pretrained("bert-base-cased") with self.subTest("Python Tokenizer"): self.assertFalse(tokenizer_p("Small example to_encode").is_fast) with self.subTest("Rust Tokenizer"): self.assertTrue(tokenizer_r("Small example to_encode").is_fast) @require_tokenizers def test_batch_encoding_word_to_tokens(self): tokenizer_r = BertTokenizerFast.from_pretrained("bert-base-cased") encoded = tokenizer_r(["Test", "\xad", "test"], is_split_into_words=True) self.assertEqual(encoded.word_to_tokens(0), TokenSpan(start=1, end=2)) self.assertEqual(encoded.word_to_tokens(1), None) self.assertEqual(encoded.word_to_tokens(2), TokenSpan(start=2, end=3)) def test_batch_encoding_with_labels(self): batch = BatchEncoding({"inputs": [[1, 2, 3], [4, 5, 6]], "labels": [0, 1]}) tensor_batch = batch.convert_to_tensors(tensor_type="np") self.assertEqual(tensor_batch["inputs"].shape, (2, 3)) self.assertEqual(tensor_batch["labels"].shape, (2,)) # test converting the converted with CaptureStderr() as cs: tensor_batch = batch.convert_to_tensors(tensor_type="np") self.assertFalse(len(cs.err), msg=f"should have no warning, but got {cs.err}") batch = BatchEncoding({"inputs": [1, 2, 3], "labels": 0}) tensor_batch = batch.convert_to_tensors(tensor_type="np", prepend_batch_axis=True) self.assertEqual(tensor_batch["inputs"].shape, (1, 3)) self.assertEqual(tensor_batch["labels"].shape, (1,)) @require_torch def test_batch_encoding_with_labels_pt(self): batch = BatchEncoding({"inputs": [[1, 2, 3], [4, 5, 6]], "labels": [0, 1]}) tensor_batch = batch.convert_to_tensors(tensor_type="pt") self.assertEqual(tensor_batch["inputs"].shape, (2, 3)) self.assertEqual(tensor_batch["labels"].shape, (2,)) # test converting the converted with CaptureStderr() as cs: tensor_batch = batch.convert_to_tensors(tensor_type="pt") self.assertFalse(len(cs.err), msg=f"should have no warning, but got {cs.err}") batch = BatchEncoding({"inputs": [1, 2, 3], "labels": 0}) tensor_batch = batch.convert_to_tensors(tensor_type="pt", prepend_batch_axis=True) self.assertEqual(tensor_batch["inputs"].shape, (1, 3)) self.assertEqual(tensor_batch["labels"].shape, (1,)) @require_tf def test_batch_encoding_with_labels_tf(self): batch = BatchEncoding({"inputs": [[1, 2, 3], [4, 5, 6]], "labels": [0, 1]}) tensor_batch = batch.convert_to_tensors(tensor_type="tf") self.assertEqual(tensor_batch["inputs"].shape, (2, 3)) self.assertEqual(tensor_batch["labels"].shape, (2,)) # test converting the converted with CaptureStderr() as cs: tensor_batch = batch.convert_to_tensors(tensor_type="tf") self.assertFalse(len(cs.err), msg=f"should have no warning, but got {cs.err}") batch = BatchEncoding({"inputs": [1, 2, 3], "labels": 0}) tensor_batch = batch.convert_to_tensors(tensor_type="tf", prepend_batch_axis=True) self.assertEqual(tensor_batch["inputs"].shape, (1, 3)) self.assertEqual(tensor_batch["labels"].shape, (1,)) @require_flax def test_batch_encoding_with_labels_jax(self): batch = BatchEncoding({"inputs": [[1, 2, 3], [4, 5, 6]], "labels": [0, 1]}) tensor_batch = batch.convert_to_tensors(tensor_type="jax") self.assertEqual(tensor_batch["inputs"].shape, (2, 3)) self.assertEqual(tensor_batch["labels"].shape, (2,)) # test converting the converted with CaptureStderr() as cs: tensor_batch = batch.convert_to_tensors(tensor_type="jax") self.assertFalse(len(cs.err), msg=f"should have no warning, but got {cs.err}") batch = BatchEncoding({"inputs": [1, 2, 3], "labels": 0}) tensor_batch = batch.convert_to_tensors(tensor_type="jax", prepend_batch_axis=True) self.assertEqual(tensor_batch["inputs"].shape, (1, 3)) self.assertEqual(tensor_batch["labels"].shape, (1,)) def test_padding_accepts_tensors(self): features = [{"input_ids": np.array([0, 1, 2])}, {"input_ids": np.array([0, 1, 2, 3])}] tokenizer = BertTokenizer.from_pretrained("bert-base-cased") batch = tokenizer.pad(features, padding=True) self.assertTrue(isinstance(batch["input_ids"], np.ndarray)) self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) batch = tokenizer.pad(features, padding=True, return_tensors="np") self.assertTrue(isinstance(batch["input_ids"], np.ndarray)) self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) @require_torch def test_padding_accepts_tensors_pt(self): import torch features = [{"input_ids": torch.tensor([0, 1, 2])}, {"input_ids": torch.tensor([0, 1, 2, 3])}] tokenizer = BertTokenizer.from_pretrained("bert-base-cased") batch = tokenizer.pad(features, padding=True) self.assertTrue(isinstance(batch["input_ids"], torch.Tensor)) self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) batch = tokenizer.pad(features, padding=True, return_tensors="pt") self.assertTrue(isinstance(batch["input_ids"], torch.Tensor)) self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) @require_tf def test_padding_accepts_tensors_tf(self): import tensorflow as tf features = [{"input_ids": tf.constant([0, 1, 2])}, {"input_ids": tf.constant([0, 1, 2, 3])}] tokenizer = BertTokenizer.from_pretrained("bert-base-cased") batch = tokenizer.pad(features, padding=True) self.assertTrue(isinstance(batch["input_ids"], tf.Tensor)) self.assertEqual(batch["input_ids"].numpy().tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) batch = tokenizer.pad(features, padding=True, return_tensors="tf") self.assertTrue(isinstance(batch["input_ids"], tf.Tensor)) self.assertEqual(batch["input_ids"].numpy().tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) @require_tokenizers def test_instantiation_from_tokenizers(self): bert_tokenizer = Tokenizer(WordPiece(unk_token="[UNK]")) PreTrainedTokenizerFast(tokenizer_object=bert_tokenizer) @require_tokenizers def test_instantiation_from_tokenizers_json_file(self): bert_tokenizer = Tokenizer(WordPiece(unk_token="[UNK]")) with tempfile.TemporaryDirectory() as tmpdirname: bert_tokenizer.save(os.path.join(tmpdirname, "tokenizer.json")) PreTrainedTokenizerFast(tokenizer_file=os.path.join(tmpdirname, "tokenizer.json"))
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/repo_utils/test_check_dummies.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import unittest git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path check_dummies.PATH_TO_TRANSFORMERS = os.path.join(git_repo_path, "src", "transformers") DUMMY_CONSTANT = """ {0} = None """ DUMMY_CLASS = """ class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) """ DUMMY_FUNCTION = """ def {0}(*args, **kwargs): requires_backends({0}, {1}) """ class CheckDummiesTester(unittest.TestCase): def test_find_backend(self): no_backend = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")') self.assertIsNone(no_backend) simple_backend = find_backend(" if not is_tokenizers_available():") self.assertEqual(simple_backend, "tokenizers") backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") self.assertEqual(backend_with_underscore, "tensorflow_text") double_backend = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):") self.assertEqual(double_backend, "sentencepiece_and_tokenizers") double_backend_with_underscore = find_backend( " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" ) self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") triple_backend = find_backend( " if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" ) self.assertEqual(triple_backend, "sentencepiece_and_tokenizers_and_vision") def test_read_init(self): objects = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch", objects) self.assertIn("tensorflow_text", objects) self.assertIn("sentencepiece_and_tokenizers", objects) # Likewise, we can't assert on the exact content of a key self.assertIn("BertModel", objects["torch"]) self.assertIn("TFBertModel", objects["tf"]) self.assertIn("FlaxBertModel", objects["flax"]) self.assertIn("BertModel", objects["torch"]) self.assertIn("TFBertTokenizer", objects["tensorflow_text"]) self.assertIn("convert_slow_tokenizer", objects["sentencepiece_and_tokenizers"]) def test_create_dummy_object(self): dummy_constant = create_dummy_object("CONSTANT", "'torch'") self.assertEqual(dummy_constant, "\nCONSTANT = None\n") dummy_function = create_dummy_object("function", "'torch'") self.assertEqual( dummy_function, "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" ) expected_dummy_class = """ class FakeClass(metaclass=DummyObject): _backends = 'torch' def __init__(self, *args, **kwargs): requires_backends(self, 'torch') """ dummy_class = create_dummy_object("FakeClass", "'torch'") self.assertEqual(dummy_class, expected_dummy_class) def test_create_dummy_files(self): expected_dummy_pytorch_file = """# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, ["torch"]) class FakeClass(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) """ dummy_files = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]}) self.assertEqual(dummy_files["torch"], expected_dummy_pytorch_file)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/repo_utils/test_check_docstrings.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import os import sys import unittest git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) from check_docstrings import get_default_description, replace_default_in_arg_description # noqa: E402 class CheckDostringsTested(unittest.TestCase): def test_replace_default_in_arg_description(self): # Standard docstring with default. desc_with_default = "`float`, *optional*, defaults to 2.0" self.assertEqual( replace_default_in_arg_description(desc_with_default, 2.0), "`float`, *optional*, defaults to 2.0" ) self.assertEqual( replace_default_in_arg_description(desc_with_default, 1.0), "`float`, *optional*, defaults to 1.0" ) self.assertEqual(replace_default_in_arg_description(desc_with_default, inspect._empty), "`float`") # Standard docstring with default but optional is not using the stars. desc_with_default_typo = "`float`, `optional`, defaults to 2.0" self.assertEqual( replace_default_in_arg_description(desc_with_default_typo, 2.0), "`float`, *optional*, defaults to 2.0" ) self.assertEqual( replace_default_in_arg_description(desc_with_default_typo, 1.0), "`float`, *optional*, defaults to 1.0" ) # If the default is None we do not erase the value in the docstring. self.assertEqual( replace_default_in_arg_description(desc_with_default, None), "`float`, *optional*, defaults to 2.0" ) # If the default is None (and set as such in the docstring), we do not include it. desc_with_default = "`float`, *optional*, defaults to None" self.assertEqual(replace_default_in_arg_description(desc_with_default, None), "`float`, *optional*") desc_with_default = "`float`, *optional*, defaults to `None`" self.assertEqual(replace_default_in_arg_description(desc_with_default, None), "`float`, *optional*") # Operations are not replaced, but put in backtiks. desc_with_default = "`float`, *optional*, defaults to 1/255" self.assertEqual( replace_default_in_arg_description(desc_with_default, 1 / 255), "`float`, *optional*, defaults to `1/255`" ) desc_with_default = "`float`, *optional*, defaults to `1/255`" self.assertEqual( replace_default_in_arg_description(desc_with_default, 1 / 255), "`float`, *optional*, defaults to `1/255`" ) desc_with_optional = "`float`, *optional*" self.assertEqual( replace_default_in_arg_description(desc_with_optional, 2.0), "`float`, *optional*, defaults to 2.0" ) self.assertEqual( replace_default_in_arg_description(desc_with_optional, 1.0), "`float`, *optional*, defaults to 1.0" ) self.assertEqual(replace_default_in_arg_description(desc_with_optional, None), "`float`, *optional*") self.assertEqual(replace_default_in_arg_description(desc_with_optional, inspect._empty), "`float`") desc_with_no_optional = "`float`" self.assertEqual( replace_default_in_arg_description(desc_with_no_optional, 2.0), "`float`, *optional*, defaults to 2.0" ) self.assertEqual( replace_default_in_arg_description(desc_with_no_optional, 1.0), "`float`, *optional*, defaults to 1.0" ) self.assertEqual(replace_default_in_arg_description(desc_with_no_optional, None), "`float`, *optional*") self.assertEqual(replace_default_in_arg_description(desc_with_no_optional, inspect._empty), "`float`") def test_get_default_description(self): # Fake function to have arguments to test. def _fake_function(a, b: int, c=1, d: float = 2.0, e: str = "blob"): pass params = inspect.signature(_fake_function).parameters assert get_default_description(params["a"]) == "`<fill_type>`" assert get_default_description(params["b"]) == "`int`" assert get_default_description(params["c"]) == "`<fill_type>`, *optional*, defaults to 1" assert get_default_description(params["d"]) == "`float`, *optional*, defaults to 2.0" assert get_default_description(params["e"]) == '`str`, *optional*, defaults to `"blob"`'
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/repo_utils/test_tests_fetcher.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import shutil import sys import tempfile import unittest from contextlib import contextmanager from pathlib import Path from git import Repo from transformers.testing_utils import CaptureStdout REPO_PATH = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(REPO_PATH, "utils")) import tests_fetcher # noqa: E402 from tests_fetcher import ( # noqa: E402 checkout_commit, clean_code, create_module_to_test_map, create_reverse_dependency_map, create_reverse_dependency_tree, diff_is_docstring_only, extract_imports, get_all_tests, get_diff, get_module_dependencies, get_tree_starting_at, infer_tests_to_run, init_test_examples_dependencies, parse_commit_message, print_tree_deps_of, ) BERT_MODELING_FILE = "src/transformers/models/bert/modeling_bert.py" BERT_MODEL_FILE = """from ...modeling_utils import PreTrainedModel from ...utils import is_torch_available from .configuration_bert import BertConfig class BertModel: ''' This is the docstring. ''' This is the code """ BERT_MODEL_FILE_NEW_DOCSTRING = """from ...modeling_utils import PreTrainedModel from ...utils import is_torch_available from .configuration_bert import BertConfig class BertModel: ''' This is the docstring. It has been updated. ''' This is the code """ BERT_MODEL_FILE_NEW_CODE = """from ...modeling_utils import PreTrainedModel from ...utils import is_torch_available from .configuration_bert import BertConfig class BertModel: ''' This is the docstring. ''' This is the code. It has been updated """ def create_tmp_repo(tmp_dir, models=None): """ Creates a repository in a temporary directory mimicking the structure of Transformers. Uses the list of models provided (which defaults to just `["bert"]`). """ tmp_dir = Path(tmp_dir) if tmp_dir.exists(): shutil.rmtree(tmp_dir) tmp_dir.mkdir(exist_ok=True) repo = Repo.init(tmp_dir) if models is None: models = ["bert"] class_names = [model[0].upper() + model[1:] for model in models] transformers_dir = tmp_dir / "src" / "transformers" transformers_dir.mkdir(parents=True, exist_ok=True) with open(transformers_dir / "__init__.py", "w") as f: init_lines = ["from .utils import cached_file, is_torch_available"] init_lines.extend( [f"from .models.{model} import {cls}Config, {cls}Model" for model, cls in zip(models, class_names)] ) f.write("\n".join(init_lines) + "\n") with open(transformers_dir / "configuration_utils.py", "w") as f: f.write("from .utils import cached_file\n\ncode") with open(transformers_dir / "modeling_utils.py", "w") as f: f.write("from .utils import cached_file\n\ncode") utils_dir = tmp_dir / "src" / "transformers" / "utils" utils_dir.mkdir(exist_ok=True) with open(utils_dir / "__init__.py", "w") as f: f.write("from .hub import cached_file\nfrom .imports import is_torch_available\n") with open(utils_dir / "hub.py", "w") as f: f.write("import huggingface_hub\n\ncode") with open(utils_dir / "imports.py", "w") as f: f.write("code") model_dir = tmp_dir / "src" / "transformers" / "models" model_dir.mkdir(parents=True, exist_ok=True) with open(model_dir / "__init__.py", "w") as f: f.write("\n".join([f"import {model}" for model in models])) for model, cls in zip(models, class_names): model_dir = tmp_dir / "src" / "transformers" / "models" / model model_dir.mkdir(parents=True, exist_ok=True) with open(model_dir / "__init__.py", "w") as f: f.write(f"from .configuration_{model} import {cls}Config\nfrom .modeling_{model} import {cls}Model\n") with open(model_dir / f"configuration_{model}.py", "w") as f: f.write("from ...configuration_utils import PretrainedConfig\ncode") with open(model_dir / f"modeling_{model}.py", "w") as f: modeling_code = BERT_MODEL_FILE.replace("bert", model).replace("Bert", cls) f.write(modeling_code) test_dir = tmp_dir / "tests" test_dir.mkdir(exist_ok=True) with open(test_dir / "test_modeling_common.py", "w") as f: f.write("from transformers.modeling_utils import PreTrainedModel\ncode") for model, cls in zip(models, class_names): test_model_dir = test_dir / "models" / model test_model_dir.mkdir(parents=True, exist_ok=True) (test_model_dir / "__init__.py").touch() with open(test_model_dir / f"test_modeling_{model}.py", "w") as f: f.write( f"from transformers import {cls}Config, {cls}Model\nfrom ...test_modeling_common import ModelTesterMixin\n\ncode" ) example_dir = tmp_dir / "examples" example_dir.mkdir(exist_ok=True) for framework in ["flax", "pytorch", "tensorflow"]: framework_dir = example_dir / framework framework_dir.mkdir(exist_ok=True) with open(framework_dir / f"test_{framework}_examples.py", "w") as f: f.write("""test_args = "run_glue.py"\n""") glue_dir = framework_dir / "text-classification" glue_dir.mkdir(exist_ok=True) with open(glue_dir / "run_glue.py", "w") as f: f.write("from transformers import BertModel\n\ncode") repo.index.add(["examples", "src", "tests"]) repo.index.commit("Initial commit") repo.create_head("main") repo.head.reference = repo.refs.main repo.delete_head("master") return repo @contextmanager def patch_transformer_repo_path(new_folder): """ Temporarily patches the variables defines in `tests_fetcher` to use a different location for the repo. """ old_repo_path = tests_fetcher.PATH_TO_REPO tests_fetcher.PATH_TO_REPO = Path(new_folder).resolve() tests_fetcher.PATH_TO_EXAMPLES = tests_fetcher.PATH_TO_REPO / "examples" tests_fetcher.PATH_TO_TRANFORMERS = tests_fetcher.PATH_TO_REPO / "src/transformers" tests_fetcher.PATH_TO_TESTS = tests_fetcher.PATH_TO_REPO / "tests" try: yield finally: tests_fetcher.PATH_TO_REPO = old_repo_path tests_fetcher.PATH_TO_EXAMPLES = tests_fetcher.PATH_TO_REPO / "examples" tests_fetcher.PATH_TO_TRANFORMERS = tests_fetcher.PATH_TO_REPO / "src/transformers" tests_fetcher.PATH_TO_TESTS = tests_fetcher.PATH_TO_REPO / "tests" def commit_changes(filenames, contents, repo, commit_message="Commit"): """ Commit new `contents` to `filenames` inside a given `repo`. """ if not isinstance(filenames, list): filenames = [filenames] if not isinstance(contents, list): contents = [contents] folder = Path(repo.working_dir) for filename, content in zip(filenames, contents): with open(folder / filename, "w") as f: f.write(content) repo.index.add(filenames) commit = repo.index.commit(commit_message) return commit.hexsha class TestFetcherTester(unittest.TestCase): def test_checkout_commit(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) repo = create_tmp_repo(tmp_folder) initial_sha = repo.head.commit.hexsha new_sha = commit_changes(BERT_MODELING_FILE, BERT_MODEL_FILE_NEW_DOCSTRING, repo) assert repo.head.commit.hexsha == new_sha with checkout_commit(repo, initial_sha): assert repo.head.commit.hexsha == initial_sha with open(tmp_folder / BERT_MODELING_FILE) as f: assert f.read() == BERT_MODEL_FILE assert repo.head.commit.hexsha == new_sha with open(tmp_folder / BERT_MODELING_FILE) as f: assert f.read() == BERT_MODEL_FILE_NEW_DOCSTRING def test_clean_code(self): # Clean code removes all strings in triple quotes assert clean_code('"""\nDocstring\n"""\ncode\n"""Long string"""\ncode\n') == "code\ncode" assert clean_code("'''\nDocstring\n'''\ncode\n'''Long string'''\ncode\n'''") == "code\ncode" # Clean code removes all comments assert clean_code("code\n# Comment\ncode") == "code\ncode" assert clean_code("code # inline comment\ncode") == "code \ncode" def test_get_all_tests(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) with patch_transformer_repo_path(tmp_folder): assert get_all_tests() == ["tests/models/bert", "tests/test_modeling_common.py"] def test_get_all_tests_on_full_repo(self): all_tests = get_all_tests() assert "tests/models/albert" in all_tests assert "tests/models/bert" in all_tests assert "tests/repo_utils" in all_tests assert "tests/test_pipeline_mixin.py" in all_tests assert "tests/models" not in all_tests assert "tests/__pycache__" not in all_tests assert "tests/models/albert/test_modeling_albert.py" not in all_tests assert "tests/repo_utils/test_tests_fetcher.py" not in all_tests def test_diff_is_docstring_only(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) repo = create_tmp_repo(tmp_folder) branching_point = repo.refs.main.commit bert_file = BERT_MODELING_FILE commit_changes(bert_file, BERT_MODEL_FILE_NEW_DOCSTRING, repo) assert diff_is_docstring_only(repo, branching_point, bert_file) commit_changes(bert_file, BERT_MODEL_FILE_NEW_CODE, repo) assert not diff_is_docstring_only(repo, branching_point, bert_file) def test_get_diff(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) repo = create_tmp_repo(tmp_folder) initial_commit = repo.refs.main.commit bert_file = BERT_MODELING_FILE commit_changes(bert_file, BERT_MODEL_FILE_NEW_DOCSTRING, repo) assert get_diff(repo, repo.head.commit, repo.head.commit.parents) == [] commit_changes(bert_file, BERT_MODEL_FILE_NEW_DOCSTRING + "\n# Adding a comment\n", repo) assert get_diff(repo, repo.head.commit, repo.head.commit.parents) == [] commit_changes(bert_file, BERT_MODEL_FILE_NEW_CODE, repo) assert get_diff(repo, repo.head.commit, repo.head.commit.parents) == [ "src/transformers/models/bert/modeling_bert.py" ] commit_changes("src/transformers/utils/hub.py", "import huggingface_hub\n\nnew code", repo) assert get_diff(repo, repo.head.commit, repo.head.commit.parents) == ["src/transformers/utils/hub.py"] assert get_diff(repo, repo.head.commit, [initial_commit]) == [ "src/transformers/models/bert/modeling_bert.py", "src/transformers/utils/hub.py", ] def test_extract_imports_relative(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) expected_bert_imports = [ ("src/transformers/modeling_utils.py", ["PreTrainedModel"]), ("src/transformers/utils/__init__.py", ["is_torch_available"]), ("src/transformers/models/bert/configuration_bert.py", ["BertConfig"]), ] expected_utils_imports = [ ("src/transformers/utils/hub.py", ["cached_file"]), ("src/transformers/utils/imports.py", ["is_torch_available"]), ] with patch_transformer_repo_path(tmp_folder): assert extract_imports(BERT_MODELING_FILE) == expected_bert_imports assert extract_imports("src/transformers/utils/__init__.py") == expected_utils_imports with open(tmp_folder / BERT_MODELING_FILE, "w") as f: f.write( "from ...utils import cached_file, is_torch_available\nfrom .configuration_bert import BertConfig\n" ) expected_bert_imports = [ ("src/transformers/utils/__init__.py", ["cached_file", "is_torch_available"]), ("src/transformers/models/bert/configuration_bert.py", ["BertConfig"]), ] with patch_transformer_repo_path(tmp_folder): assert extract_imports(BERT_MODELING_FILE) == expected_bert_imports # Test with multi-line imports with open(tmp_folder / BERT_MODELING_FILE, "w") as f: f.write( "from ...utils import (\n cached_file,\n is_torch_available\n)\nfrom .configuration_bert import BertConfig\n" ) expected_bert_imports = [ ("src/transformers/models/bert/configuration_bert.py", ["BertConfig"]), ("src/transformers/utils/__init__.py", ["cached_file", "is_torch_available"]), ] with patch_transformer_repo_path(tmp_folder): assert extract_imports(BERT_MODELING_FILE) == expected_bert_imports def test_extract_imports_absolute(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) with open(tmp_folder / BERT_MODELING_FILE, "w") as f: f.write( "from transformers.utils import cached_file, is_torch_available\nfrom transformers.models.bert.configuration_bert import BertConfig\n" ) expected_bert_imports = [ ("src/transformers/utils/__init__.py", ["cached_file", "is_torch_available"]), ("src/transformers/models/bert/configuration_bert.py", ["BertConfig"]), ] with patch_transformer_repo_path(tmp_folder): assert extract_imports(BERT_MODELING_FILE) == expected_bert_imports # Test with multi-line imports with open(tmp_folder / BERT_MODELING_FILE, "w") as f: f.write( "from transformers.utils import (\n cached_file,\n is_torch_available\n)\nfrom transformers.models.bert.configuration_bert import BertConfig\n" ) expected_bert_imports = [ ("src/transformers/models/bert/configuration_bert.py", ["BertConfig"]), ("src/transformers/utils/__init__.py", ["cached_file", "is_torch_available"]), ] with patch_transformer_repo_path(tmp_folder): assert extract_imports(BERT_MODELING_FILE) == expected_bert_imports # Test with base imports with open(tmp_folder / BERT_MODELING_FILE, "w") as f: f.write( "from transformers.utils import (\n cached_file,\n is_torch_available\n)\nfrom transformers import BertConfig\n" ) expected_bert_imports = [ ("src/transformers/__init__.py", ["BertConfig"]), ("src/transformers/utils/__init__.py", ["cached_file", "is_torch_available"]), ] with patch_transformer_repo_path(tmp_folder): assert extract_imports(BERT_MODELING_FILE) == expected_bert_imports def test_get_module_dependencies(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) expected_bert_dependencies = [ "src/transformers/modeling_utils.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/utils/imports.py", ] with patch_transformer_repo_path(tmp_folder): assert get_module_dependencies(BERT_MODELING_FILE) == expected_bert_dependencies expected_test_bert_dependencies = [ "tests/test_modeling_common.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/models/bert/modeling_bert.py", ] with patch_transformer_repo_path(tmp_folder): assert ( get_module_dependencies("tests/models/bert/test_modeling_bert.py") == expected_test_bert_dependencies ) # Test with a submodule (tmp_folder / "src/transformers/utils/logging.py").touch() with open(tmp_folder / BERT_MODELING_FILE, "a") as f: f.write("from ...utils import logging\n") expected_bert_dependencies = [ "src/transformers/modeling_utils.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/utils/logging.py", "src/transformers/utils/imports.py", ] with patch_transformer_repo_path(tmp_folder): assert get_module_dependencies(BERT_MODELING_FILE) == expected_bert_dependencies # Test with an object non-imported in the init create_tmp_repo(tmp_folder) with open(tmp_folder / BERT_MODELING_FILE, "a") as f: f.write("from ...utils import CONSTANT\n") expected_bert_dependencies = [ "src/transformers/modeling_utils.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/utils/__init__.py", "src/transformers/utils/imports.py", ] with patch_transformer_repo_path(tmp_folder): assert get_module_dependencies(BERT_MODELING_FILE) == expected_bert_dependencies # Test with an example create_tmp_repo(tmp_folder) expected_example_dependencies = ["src/transformers/models/bert/modeling_bert.py"] with patch_transformer_repo_path(tmp_folder): assert ( get_module_dependencies("examples/pytorch/text-classification/run_glue.py") == expected_example_dependencies ) def test_create_reverse_dependency_tree(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) with patch_transformer_repo_path(tmp_folder): tree = create_reverse_dependency_tree() init_edges = [ "src/transformers/utils/hub.py", "src/transformers/utils/imports.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/models/bert/modeling_bert.py", ] assert {f for f, g in tree if g == "src/transformers/__init__.py"} == set(init_edges) bert_edges = [ "src/transformers/modeling_utils.py", "src/transformers/utils/imports.py", "src/transformers/models/bert/configuration_bert.py", ] assert {f for f, g in tree if g == "src/transformers/models/bert/modeling_bert.py"} == set(bert_edges) test_bert_edges = [ "tests/test_modeling_common.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/models/bert/modeling_bert.py", ] assert {f for f, g in tree if g == "tests/models/bert/test_modeling_bert.py"} == set(test_bert_edges) def test_get_tree_starting_at(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) with patch_transformer_repo_path(tmp_folder): edges = create_reverse_dependency_tree() bert_tree = get_tree_starting_at("src/transformers/models/bert/modeling_bert.py", edges) config_utils_tree = get_tree_starting_at("src/transformers/configuration_utils.py", edges) expected_bert_tree = [ "src/transformers/models/bert/modeling_bert.py", [("src/transformers/models/bert/modeling_bert.py", "tests/models/bert/test_modeling_bert.py")], ] assert bert_tree == expected_bert_tree expected_config_tree = [ "src/transformers/configuration_utils.py", [("src/transformers/configuration_utils.py", "src/transformers/models/bert/configuration_bert.py")], [ ("src/transformers/models/bert/configuration_bert.py", "tests/models/bert/test_modeling_bert.py"), ( "src/transformers/models/bert/configuration_bert.py", "src/transformers/models/bert/modeling_bert.py", ), ], ] # Order of the edges is random assert [set(v) for v in config_utils_tree] == [set(v) for v in expected_config_tree] def test_print_tree_deps_of(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) # There are two possible outputs since the order of the last two lines is non-deterministic. expected_std_out = """src/transformers/models/bert/modeling_bert.py tests/models/bert/test_modeling_bert.py src/transformers/configuration_utils.py src/transformers/models/bert/configuration_bert.py src/transformers/models/bert/modeling_bert.py tests/models/bert/test_modeling_bert.py""" expected_std_out_2 = """src/transformers/models/bert/modeling_bert.py tests/models/bert/test_modeling_bert.py src/transformers/configuration_utils.py src/transformers/models/bert/configuration_bert.py tests/models/bert/test_modeling_bert.py src/transformers/models/bert/modeling_bert.py""" with patch_transformer_repo_path(tmp_folder), CaptureStdout() as cs: print_tree_deps_of("src/transformers/models/bert/modeling_bert.py") print_tree_deps_of("src/transformers/configuration_utils.py") assert cs.out.strip() in [expected_std_out, expected_std_out_2] def test_init_test_examples_dependencies(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) expected_example_deps = { "examples/flax/test_flax_examples.py": [ "examples/flax/text-classification/run_glue.py", "examples/flax/test_flax_examples.py", ], "examples/pytorch/test_pytorch_examples.py": [ "examples/pytorch/text-classification/run_glue.py", "examples/pytorch/test_pytorch_examples.py", ], "examples/tensorflow/test_tensorflow_examples.py": [ "examples/tensorflow/text-classification/run_glue.py", "examples/tensorflow/test_tensorflow_examples.py", ], } expected_examples = { "examples/flax/test_flax_examples.py", "examples/flax/text-classification/run_glue.py", "examples/pytorch/test_pytorch_examples.py", "examples/pytorch/text-classification/run_glue.py", "examples/tensorflow/test_tensorflow_examples.py", "examples/tensorflow/text-classification/run_glue.py", } with patch_transformer_repo_path(tmp_folder): example_deps, all_examples = init_test_examples_dependencies() assert example_deps == expected_example_deps assert {str(f.relative_to(tmp_folder)) for f in all_examples} == expected_examples def test_create_reverse_dependency_map(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) with patch_transformer_repo_path(tmp_folder): reverse_map = create_reverse_dependency_map() # impact of BERT modeling file (note that we stop at the inits and don't go down further) expected_bert_deps = { "src/transformers/__init__.py", "src/transformers/models/bert/__init__.py", "tests/models/bert/test_modeling_bert.py", "examples/flax/test_flax_examples.py", "examples/flax/text-classification/run_glue.py", "examples/pytorch/test_pytorch_examples.py", "examples/pytorch/text-classification/run_glue.py", "examples/tensorflow/test_tensorflow_examples.py", "examples/tensorflow/text-classification/run_glue.py", } assert set(reverse_map["src/transformers/models/bert/modeling_bert.py"]) == expected_bert_deps # init gets the direct deps (and their recursive deps) expected_init_deps = { "src/transformers/utils/__init__.py", "src/transformers/utils/hub.py", "src/transformers/utils/imports.py", "src/transformers/models/bert/__init__.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/models/bert/modeling_bert.py", "src/transformers/configuration_utils.py", "src/transformers/modeling_utils.py", "tests/test_modeling_common.py", "tests/models/bert/test_modeling_bert.py", "examples/flax/test_flax_examples.py", "examples/flax/text-classification/run_glue.py", "examples/pytorch/test_pytorch_examples.py", "examples/pytorch/text-classification/run_glue.py", "examples/tensorflow/test_tensorflow_examples.py", "examples/tensorflow/text-classification/run_glue.py", } assert set(reverse_map["src/transformers/__init__.py"]) == expected_init_deps expected_init_deps = { "src/transformers/__init__.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/models/bert/modeling_bert.py", "tests/models/bert/test_modeling_bert.py", "examples/flax/test_flax_examples.py", "examples/flax/text-classification/run_glue.py", "examples/pytorch/test_pytorch_examples.py", "examples/pytorch/text-classification/run_glue.py", "examples/tensorflow/test_tensorflow_examples.py", "examples/tensorflow/text-classification/run_glue.py", } assert set(reverse_map["src/transformers/models/bert/__init__.py"]) == expected_init_deps # Test that with more models init of bert only gets deps to bert. create_tmp_repo(tmp_folder, models=["bert", "gpt2"]) with patch_transformer_repo_path(tmp_folder): reverse_map = create_reverse_dependency_map() # init gets the direct deps (and their recursive deps) expected_init_deps = { "src/transformers/__init__.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/models/bert/modeling_bert.py", "tests/models/bert/test_modeling_bert.py", "examples/flax/test_flax_examples.py", "examples/flax/text-classification/run_glue.py", "examples/pytorch/test_pytorch_examples.py", "examples/pytorch/text-classification/run_glue.py", "examples/tensorflow/test_tensorflow_examples.py", "examples/tensorflow/text-classification/run_glue.py", } assert set(reverse_map["src/transformers/models/bert/__init__.py"]) == expected_init_deps def test_create_module_to_test_map(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) models = models = ["bert", "gpt2"] + [f"bert{i}" for i in range(10)] create_tmp_repo(tmp_folder, models=models) with patch_transformer_repo_path(tmp_folder): test_map = create_module_to_test_map(filter_models=True) expected_bert_tests = { "examples/flax/test_flax_examples.py", "examples/pytorch/test_pytorch_examples.py", "examples/tensorflow/test_tensorflow_examples.py", "tests/models/bert/test_modeling_bert.py", } for model in models: if model != "bert": assert test_map[f"src/transformers/models/{model}/modeling_{model}.py"] == [ f"tests/models/{model}/test_modeling_{model}.py" ] else: assert set(test_map[f"src/transformers/models/{model}/modeling_{model}.py"]) == expected_bert_tests # Init got filtered expected_init_tests = { "examples/flax/test_flax_examples.py", "examples/pytorch/test_pytorch_examples.py", "examples/tensorflow/test_tensorflow_examples.py", "tests/test_modeling_common.py", "tests/models/bert/test_modeling_bert.py", "tests/models/gpt2/test_modeling_gpt2.py", } assert set(test_map["src/transformers/__init__.py"]) == expected_init_tests def test_infer_tests_to_run(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) models = ["bert", "gpt2"] + [f"bert{i}" for i in range(10)] repo = create_tmp_repo(tmp_folder, models=models) commit_changes("src/transformers/models/bert/modeling_bert.py", BERT_MODEL_FILE_NEW_CODE, repo) example_tests = { "examples/flax/test_flax_examples.py", "examples/pytorch/test_pytorch_examples.py", "examples/tensorflow/test_tensorflow_examples.py", } with patch_transformer_repo_path(tmp_folder): infer_tests_to_run(tmp_folder / "test-output.txt", diff_with_last_commit=True) with open(tmp_folder / "test-output.txt", "r") as f: tests_to_run = f.read() with open(tmp_folder / "examples_test_list.txt", "r") as f: example_tests_to_run = f.read() assert tests_to_run == "tests/models/bert/test_modeling_bert.py" assert set(example_tests_to_run.split(" ")) == example_tests # Fake a new model addition repo = create_tmp_repo(tmp_folder, models=models) branch = repo.create_head("new_model") branch.checkout() with open(tmp_folder / "src/transformers/__init__.py", "a") as f: f.write("from .models.t5 import T5Config, T5Model\n") model_dir = tmp_folder / "src/transformers/models/t5" model_dir.mkdir(exist_ok=True) with open(model_dir / "__init__.py", "w") as f: f.write("from .configuration_t5 import T5Config\nfrom .modeling_t5 import T5Model\n") with open(model_dir / "configuration_t5.py", "w") as f: f.write("from ...configuration_utils import PretrainedConfig\ncode") with open(model_dir / "modeling_t5.py", "w") as f: modeling_code = BERT_MODEL_FILE.replace("bert", "t5").replace("Bert", "T5") f.write(modeling_code) test_dir = tmp_folder / "tests/models/t5" test_dir.mkdir(exist_ok=True) (test_dir / "__init__.py").touch() with open(test_dir / "test_modeling_t5.py", "w") as f: f.write( "from transformers import T5Config, T5Model\nfrom ...test_modeling_common import ModelTesterMixin\n\ncode" ) repo.index.add(["src", "tests"]) repo.index.commit("Add T5 model") with patch_transformer_repo_path(tmp_folder): infer_tests_to_run(tmp_folder / "test-output.txt") with open(tmp_folder / "test-output.txt", "r") as f: tests_to_run = f.read() with open(tmp_folder / "examples_test_list.txt", "r") as f: example_tests_to_run = f.read() expected_tests = { "tests/models/bert/test_modeling_bert.py", "tests/models/gpt2/test_modeling_gpt2.py", "tests/models/t5/test_modeling_t5.py", "tests/test_modeling_common.py", } assert set(tests_to_run.split(" ")) == expected_tests assert set(example_tests_to_run.split(" ")) == example_tests with patch_transformer_repo_path(tmp_folder): infer_tests_to_run(tmp_folder / "test-output.txt", filter_models=False) with open(tmp_folder / "test-output.txt", "r") as f: tests_to_run = f.read() with open(tmp_folder / "examples_test_list.txt", "r") as f: example_tests_to_run = f.read() expected_tests = [f"tests/models/{name}/test_modeling_{name}.py" for name in models + ["t5"]] expected_tests = set(expected_tests + ["tests/test_modeling_common.py"]) assert set(tests_to_run.split(" ")) == expected_tests assert set(example_tests_to_run.split(" ")) == example_tests def test_infer_tests_to_run_with_test_modifs(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) models = ["bert", "gpt2"] + [f"bert{i}" for i in range(10)] repo = create_tmp_repo(tmp_folder, models=models) commit_changes( "tests/models/bert/test_modeling_bert.py", "from transformers import BertConfig, BertModel\nfrom ...test_modeling_common import ModelTesterMixin\n\ncode1", repo, ) with patch_transformer_repo_path(tmp_folder): infer_tests_to_run(tmp_folder / "test-output.txt", diff_with_last_commit=True) with open(tmp_folder / "test-output.txt", "r") as f: tests_to_run = f.read() assert tests_to_run == "tests/models/bert/test_modeling_bert.py" def test_infer_tests_to_run_with_examples_modifs(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) models = ["bert", "gpt2"] repo = create_tmp_repo(tmp_folder, models=models) # Modification in one example trigger the corresponding test commit_changes( "examples/pytorch/text-classification/run_glue.py", "from transformers import BertModeln\n\ncode1", repo, ) with patch_transformer_repo_path(tmp_folder): infer_tests_to_run(tmp_folder / "test-output.txt", diff_with_last_commit=True) with open(tmp_folder / "examples_test_list.txt", "r") as f: example_tests_to_run = f.read() assert example_tests_to_run == "examples/pytorch/test_pytorch_examples.py" # Modification in one test example file trigger that test repo = create_tmp_repo(tmp_folder, models=models) commit_changes( "examples/pytorch/test_pytorch_examples.py", """test_args = "run_glue.py"\nmore_code""", repo, ) with patch_transformer_repo_path(tmp_folder): infer_tests_to_run(tmp_folder / "test-output.txt", diff_with_last_commit=True) with open(tmp_folder / "examples_test_list.txt", "r") as f: example_tests_to_run = f.read() assert example_tests_to_run == "examples/pytorch/test_pytorch_examples.py" def test_parse_commit_message(self): assert parse_commit_message("Normal commit") == {"skip": False, "no_filter": False, "test_all": False} assert parse_commit_message("[skip ci] commit") == {"skip": True, "no_filter": False, "test_all": False} assert parse_commit_message("[ci skip] commit") == {"skip": True, "no_filter": False, "test_all": False} assert parse_commit_message("[skip-ci] commit") == {"skip": True, "no_filter": False, "test_all": False} assert parse_commit_message("[skip_ci] commit") == {"skip": True, "no_filter": False, "test_all": False} assert parse_commit_message("[no filter] commit") == {"skip": False, "no_filter": True, "test_all": False} assert parse_commit_message("[no-filter] commit") == {"skip": False, "no_filter": True, "test_all": False} assert parse_commit_message("[no_filter] commit") == {"skip": False, "no_filter": True, "test_all": False} assert parse_commit_message("[filter-no] commit") == {"skip": False, "no_filter": True, "test_all": False} assert parse_commit_message("[test all] commit") == {"skip": False, "no_filter": False, "test_all": True} assert parse_commit_message("[all test] commit") == {"skip": False, "no_filter": False, "test_all": True} assert parse_commit_message("[test-all] commit") == {"skip": False, "no_filter": False, "test_all": True} assert parse_commit_message("[all_test] commit") == {"skip": False, "no_filter": False, "test_all": True}
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/repo_utils/test_check_copies.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import shutil import sys import tempfile import unittest from contextlib import contextmanager from pathlib import Path git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_copies # noqa: E402 from check_copies import convert_to_localized_md, find_code_in_transformers, is_copy_consistent # noqa: E402 # This is the reference code that will be used in the tests. # If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated. REFERENCE_CODE = """ def __init__(self, config): super().__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states """ MOCK_BERT_CODE = """from ...modeling_utils import PreTrainedModel def bert_function(x): return x class BertAttention(nn.Module): def __init__(self, config): super().__init__() class BertModel(BertPreTrainedModel): def __init__(self, config): super().__init__() self.bert = BertEncoder(config) @add_docstring(BERT_DOCSTRING) def forward(self, x): return self.bert(x) """ MOCK_BERT_COPY_CODE = """from ...modeling_utils import PreTrainedModel # Copied from transformers.models.bert.modeling_bert.bert_function def bert_copy_function(x): return x # Copied from transformers.models.bert.modeling_bert.BertAttention class BertCopyAttention(nn.Module): def __init__(self, config): super().__init__() # Copied from transformers.models.bert.modeling_bert.BertModel with Bert->BertCopy all-casing class BertCopyModel(BertCopyPreTrainedModel): def __init__(self, config): super().__init__() self.bertcopy = BertCopyEncoder(config) @add_docstring(BERTCOPY_DOCSTRING) def forward(self, x): return self.bertcopy(x) """ def replace_in_file(filename, old, new): with open(filename, "r", encoding="utf-8") as f: content = f.read() content = content.replace(old, new) with open(filename, "w", encoding="utf-8") as f: f.write(content) def create_tmp_repo(tmp_dir): """ Creates a mock repository in a temporary folder for testing. """ tmp_dir = Path(tmp_dir) if tmp_dir.exists(): shutil.rmtree(tmp_dir) tmp_dir.mkdir(exist_ok=True) model_dir = tmp_dir / "src" / "transformers" / "models" model_dir.mkdir(parents=True, exist_ok=True) models = {"bert": MOCK_BERT_CODE, "bertcopy": MOCK_BERT_COPY_CODE} for model, code in models.items(): model_subdir = model_dir / model model_subdir.mkdir(exist_ok=True) with open(model_subdir / f"modeling_{model}.py", "w", encoding="utf-8") as f: f.write(code) @contextmanager def patch_transformer_repo_path(new_folder): """ Temporarily patches the variables defines in `check_copies` to use a different location for the repo. """ old_repo_path = check_copies.REPO_PATH old_doc_path = check_copies.PATH_TO_DOCS old_transformer_path = check_copies.TRANSFORMERS_PATH repo_path = Path(new_folder).resolve() check_copies.REPO_PATH = str(repo_path) check_copies.PATH_TO_DOCS = str(repo_path / "docs" / "source" / "en") check_copies.TRANSFORMERS_PATH = str(repo_path / "src" / "transformers") try: yield finally: check_copies.REPO_PATH = old_repo_path check_copies.PATH_TO_DOCS = old_doc_path check_copies.TRANSFORMERS_PATH = old_transformer_path class CopyCheckTester(unittest.TestCase): def test_find_code_in_transformers(self): with tempfile.TemporaryDirectory() as tmp_folder: create_tmp_repo(tmp_folder) with patch_transformer_repo_path(tmp_folder): code = find_code_in_transformers("models.bert.modeling_bert.BertAttention") reference_code = ( "class BertAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n" ) self.assertEqual(code, reference_code) def test_is_copy_consistent(self): path_to_check = ["src", "transformers", "models", "bertcopy", "modeling_bertcopy.py"] with tempfile.TemporaryDirectory() as tmp_folder: # Base check create_tmp_repo(tmp_folder) with patch_transformer_repo_path(tmp_folder): file_to_check = os.path.join(tmp_folder, *path_to_check) diffs = is_copy_consistent(file_to_check) self.assertEqual(diffs, []) # Base check with an inconsistency create_tmp_repo(tmp_folder) with patch_transformer_repo_path(tmp_folder): file_to_check = os.path.join(tmp_folder, *path_to_check) replace_in_file(file_to_check, "self.bertcopy(x)", "self.bert(x)") diffs = is_copy_consistent(file_to_check) self.assertEqual(diffs, [["models.bert.modeling_bert.BertModel", 22]]) diffs = is_copy_consistent(file_to_check, overwrite=True) with open(file_to_check, "r", encoding="utf-8") as f: self.assertEqual(f.read(), MOCK_BERT_COPY_CODE) def test_convert_to_localized_md(self): localized_readme = check_copies.LOCALIZED_READMES["README_zh-hans.md"] md_list = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the" " Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for" " Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong" " Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1." " **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace)," " released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and" " lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same" " method has been applied to compress GPT2 into" " [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into" " [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation)," " Multilingual BERT into" " [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German" " version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**" " (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders" " as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang" " Luong, Quoc V. Le, Christopher D. Manning." ) localized_md_list = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the" " Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of" " Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian" " Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n" ) converted_md_list_sample = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the" " Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of" " Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian" " Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1." " **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文" " [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and" " lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same" " method has been applied to compress GPT2 into" " [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into" " [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation)," " Multilingual BERT into" " [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German" " version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自" " Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather" " than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le," " Christopher D. Manning 发布。\n" ) num_models_equal, converted_md_list = convert_to_localized_md( md_list, localized_md_list, localized_readme["format_model_list"] ) self.assertFalse(num_models_equal) self.assertEqual(converted_md_list, converted_md_list_sample) num_models_equal, converted_md_list = convert_to_localized_md( md_list, converted_md_list, localized_readme["format_model_list"] ) # Check whether the number of models is equal to README.md after conversion. self.assertTrue(num_models_equal) link_changed_md_list = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the" " Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for" " Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong" " Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut." ) link_unchanged_md_list = ( "1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and" " the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of" " Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian" " Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n" ) converted_md_list_sample = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the" " Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of" " Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian" " Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n" ) num_models_equal, converted_md_list = convert_to_localized_md( link_changed_md_list, link_unchanged_md_list, localized_readme["format_model_list"] ) # Check if the model link is synchronized. self.assertEqual(converted_md_list, converted_md_list_sample)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/repo_utils/test_get_test_info.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import unittest git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) BERT_TEST_FILE = os.path.join("tests", "models", "bert", "test_modeling_bert.py") BLIP_TEST_FILE = os.path.join("tests", "models", "blip", "test_modeling_blip.py") class GetTestInfoTester(unittest.TestCase): def test_get_test_to_tester_mapping(self): bert_test_tester_mapping = get_test_to_tester_mapping(BERT_TEST_FILE) blip_test_tester_mapping = get_test_to_tester_mapping(BLIP_TEST_FILE) EXPECTED_BERT_MAPPING = {"BertModelTest": "BertModelTester"} EXPECTED_BLIP_MAPPING = { "BlipModelTest": "BlipModelTester", "BlipTextImageModelTest": "BlipTextImageModelsModelTester", "BlipTextModelTest": "BlipTextModelTester", "BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester", "BlipVQAModelTest": "BlipVQAModelTester", "BlipVisionModelTest": "BlipVisionModelTester", } self.assertEqual(get_test_info.to_json(bert_test_tester_mapping), EXPECTED_BERT_MAPPING) self.assertEqual(get_test_info.to_json(blip_test_tester_mapping), EXPECTED_BLIP_MAPPING) def test_get_model_to_test_mapping(self): bert_model_test_mapping = get_model_to_test_mapping(BERT_TEST_FILE) blip_model_test_mapping = get_model_to_test_mapping(BLIP_TEST_FILE) EXPECTED_BERT_MAPPING = { "BertForMaskedLM": ["BertModelTest"], "BertForMultipleChoice": ["BertModelTest"], "BertForNextSentencePrediction": ["BertModelTest"], "BertForPreTraining": ["BertModelTest"], "BertForQuestionAnswering": ["BertModelTest"], "BertForSequenceClassification": ["BertModelTest"], "BertForTokenClassification": ["BertModelTest"], "BertLMHeadModel": ["BertModelTest"], "BertModel": ["BertModelTest"], } EXPECTED_BLIP_MAPPING = { "BlipForConditionalGeneration": ["BlipTextImageModelTest"], "BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"], "BlipForQuestionAnswering": ["BlipVQAModelTest"], "BlipModel": ["BlipModelTest"], "BlipTextModel": ["BlipTextModelTest"], "BlipVisionModel": ["BlipVisionModelTest"], } self.assertEqual(get_test_info.to_json(bert_model_test_mapping), EXPECTED_BERT_MAPPING) self.assertEqual(get_test_info.to_json(blip_model_test_mapping), EXPECTED_BLIP_MAPPING) def test_get_model_to_tester_mapping(self): bert_model_tester_mapping = get_model_to_tester_mapping(BERT_TEST_FILE) blip_model_tester_mapping = get_model_to_tester_mapping(BLIP_TEST_FILE) EXPECTED_BERT_MAPPING = { "BertForMaskedLM": ["BertModelTester"], "BertForMultipleChoice": ["BertModelTester"], "BertForNextSentencePrediction": ["BertModelTester"], "BertForPreTraining": ["BertModelTester"], "BertForQuestionAnswering": ["BertModelTester"], "BertForSequenceClassification": ["BertModelTester"], "BertForTokenClassification": ["BertModelTester"], "BertLMHeadModel": ["BertModelTester"], "BertModel": ["BertModelTester"], } EXPECTED_BLIP_MAPPING = { "BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"], "BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"], "BlipForQuestionAnswering": ["BlipVQAModelTester"], "BlipModel": ["BlipModelTester"], "BlipTextModel": ["BlipTextModelTester"], "BlipVisionModel": ["BlipVisionModelTester"], } self.assertEqual(get_test_info.to_json(bert_model_tester_mapping), EXPECTED_BERT_MAPPING) self.assertEqual(get_test_info.to_json(blip_model_tester_mapping), EXPECTED_BLIP_MAPPING)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_zero_shot_image_classification.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_vision class ZeroShotImageClassificationPipelineTests(unittest.TestCase): # Deactivating auto tests since we don't have a good MODEL_FOR_XX mapping, # and only CLIP would be there for now. # model_mapping = {CLIPConfig: CLIPModel} # def get_test_pipeline(self, model, tokenizer, processor): # if tokenizer is None: # # Side effect of no Fast Tokenizer class for these model, so skipping # # But the slow tokenizer test should still run as they're quite small # self.skipTest("No tokenizer available") # return # # return None, None # image_classifier = ZeroShotImageClassificationPipeline( # model=model, tokenizer=tokenizer, feature_extractor=processor # ) # # test with a raw waveform # image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") # image2 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") # return image_classifier, [image, image2] # def run_pipeline_test(self, pipe, examples): # image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") # outputs = pipe(image, candidate_labels=["A", "B"]) # self.assertEqual(outputs, {"text": ANY(str)}) # # Batching # outputs = pipe([image] * 3, batch_size=2, candidate_labels=["A", "B"]) @require_torch def test_small_model_pt(self): image_classifier = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", ) image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") output = image_classifier(image, candidate_labels=["a", "b", "c"]) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(output), [ [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}], [{"score": 0.333, "label": "b"}, {"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}], ], ) output = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2) self.assertEqual( nested_simplify(output), # Pipeline outputs are supposed to be deterministic and # So we could in theory have real values "A", "B", "C" instead # of ANY(str). # However it seems that in this particular case, the floating # scores are so close, we enter floating error approximation # and the order is not guaranteed anymore with batching. [ [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], ], ) @require_tf def test_small_model_tf(self): image_classifier = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", framework="tf" ) image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") output = image_classifier(image, candidate_labels=["a", "b", "c"]) self.assertEqual( nested_simplify(output), [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], ) output = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2) self.assertEqual( nested_simplify(output), # Pipeline outputs are supposed to be deterministic and # So we could in theory have real values "A", "B", "C" instead # of ANY(str). # However it seems that in this particular case, the floating # scores are so close, we enter floating error approximation # and the order is not guaranteed anymore with batching. [ [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], ], ) @slow @require_torch def test_large_model_pt(self): image_classifier = pipeline( task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", ) # This is an image of 2 cats with remotes and no planes image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") output = image_classifier(image, candidate_labels=["cat", "plane", "remote"]) self.assertEqual( nested_simplify(output), [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ) output = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2) self.assertEqual( nested_simplify(output), [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5, ) @slow @require_tf def test_large_model_tf(self): image_classifier = pipeline( task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", framework="tf" ) # This is an image of 2 cats with remotes and no planes image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") output = image_classifier(image, candidate_labels=["cat", "plane", "remote"]) self.assertEqual( nested_simplify(output), [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ) output = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2) self.assertEqual( nested_simplify(output), [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5, )
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_image_classification.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, PreTrainedTokenizerBase, is_vision_available, ) from transformers.pipelines import ImageClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_torch_or_tf, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_torch_or_tf @require_vision class ImageClassificationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING tf_model_mapping = TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING def get_test_pipeline(self, model, tokenizer, processor): image_classifier = ImageClassificationPipeline(model=model, image_processor=processor, top_k=2) examples = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "http://images.cocodataset.org/val2017/000000039769.jpg", ] return image_classifier, examples def run_pipeline_test(self, image_classifier, examples): outputs = image_classifier("./tests/fixtures/tests_samples/COCO/000000039769.png") self.assertEqual( outputs, [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], ) import datasets dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test") # Accepts URL + PIL.Image + lists outputs = image_classifier( [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["file"], # LA dataset[1]["file"], # L dataset[2]["file"], ] ) self.assertEqual( outputs, [ [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], ], ) @require_torch def test_small_model_pt(self): small_model = "hf-internal-testing/tiny-random-vit" image_classifier = pipeline("image-classification", model=small_model) outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], ) outputs = image_classifier( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ], top_k=2, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], ], ) @require_tf def test_small_model_tf(self): small_model = "hf-internal-testing/tiny-random-vit" image_classifier = pipeline("image-classification", model=small_model, framework="tf") outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], ) outputs = image_classifier( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ], top_k=2, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], ], ) def test_custom_tokenizer(self): tokenizer = PreTrainedTokenizerBase() # Assert that the pipeline can be initialized with a feature extractor that is not in any mapping image_classifier = pipeline( "image-classification", model="hf-internal-testing/tiny-random-vit", tokenizer=tokenizer ) self.assertIs(image_classifier.tokenizer, tokenizer) @slow @require_torch def test_perceiver(self): # Perceiver is not tested by `run_pipeline_test` properly. # That is because the type of feature_extractor and model preprocessor need to be kept # in sync, which is not the case in the current design image_classifier = pipeline("image-classification", model="deepmind/vision-perceiver-conv") outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.4385, "label": "tabby, tabby cat"}, {"score": 0.321, "label": "tiger cat"}, {"score": 0.0502, "label": "Egyptian cat"}, {"score": 0.0137, "label": "crib, cot"}, {"score": 0.007, "label": "radiator"}, ], ) image_classifier = pipeline("image-classification", model="deepmind/vision-perceiver-fourier") outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.5658, "label": "tabby, tabby cat"}, {"score": 0.1309, "label": "tiger cat"}, {"score": 0.0722, "label": "Egyptian cat"}, {"score": 0.0707, "label": "remote control, remote"}, {"score": 0.0082, "label": "computer keyboard, keypad"}, ], ) image_classifier = pipeline("image-classification", model="deepmind/vision-perceiver-learned") outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.3022, "label": "tabby, tabby cat"}, {"score": 0.2362, "label": "Egyptian cat"}, {"score": 0.1856, "label": "tiger cat"}, {"score": 0.0324, "label": "remote control, remote"}, {"score": 0.0096, "label": "quilt, comforter, comfort, puff"}, ], )
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_image_to_text.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import requests from transformers import MODEL_FOR_VISION_2_SEQ_MAPPING, TF_MODEL_FOR_VISION_2_SEQ_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, is_torch_available, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: is_torch_greater_or_equal_than_1_11 = False if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_vision class ImageToTextPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_VISION_2_SEQ_MAPPING tf_model_mapping = TF_MODEL_FOR_VISION_2_SEQ_MAPPING def get_test_pipeline(self, model, tokenizer, processor): pipe = pipeline("image-to-text", model=model, tokenizer=tokenizer, image_processor=processor) examples = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "./tests/fixtures/tests_samples/COCO/000000039769.png", ] return pipe, examples def run_pipeline_test(self, pipe, examples): outputs = pipe(examples) self.assertEqual( outputs, [ [{"generated_text": ANY(str)}], [{"generated_text": ANY(str)}], ], ) @require_tf def test_small_model_tf(self): pipe = pipeline("image-to-text", model="hf-internal-testing/tiny-random-vit-gpt2", framework="tf") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" outputs = pipe(image) self.assertEqual( outputs, [ { "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" }, ], ) outputs = pipe([image, image]) self.assertEqual( outputs, [ [ { "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" } ], [ { "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" } ], ], ) outputs = pipe(image, max_new_tokens=1) self.assertEqual( outputs, [{"generated_text": "growth"}], ) @require_torch def test_small_model_pt(self): pipe = pipeline("image-to-text", model="hf-internal-testing/tiny-random-vit-gpt2") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" outputs = pipe(image) self.assertEqual( outputs, [ { "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" }, ], ) outputs = pipe([image, image]) self.assertEqual( outputs, [ [ { "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" } ], [ { "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" } ], ], ) @require_torch def test_small_model_pt_conditional(self): pipe = pipeline("image-to-text", model="hf-internal-testing/tiny-random-BlipForConditionalGeneration") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" prompt = "a photo of" outputs = pipe(image, prompt=prompt) self.assertTrue(outputs[0]["generated_text"].startswith(prompt)) @slow @require_torch def test_large_model_pt(self): pipe = pipeline("image-to-text", model="ydshieh/vit-gpt2-coco-en") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" outputs = pipe(image) self.assertEqual(outputs, [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}]) outputs = pipe([image, image]) self.assertEqual( outputs, [ [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}], [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}], ], ) @slow @require_torch def test_generation_pt_blip(self): pipe = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base") url = "https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/pokemon.png" image = Image.open(requests.get(url, stream=True).raw) outputs = pipe(image) self.assertEqual(outputs, [{"generated_text": "a pink pokemon pokemon with a blue shirt and a blue shirt"}]) @slow @require_torch def test_generation_pt_git(self): pipe = pipeline("image-to-text", model="microsoft/git-base-coco") url = "https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/pokemon.png" image = Image.open(requests.get(url, stream=True).raw) outputs = pipe(image) self.assertEqual(outputs, [{"generated_text": "a cartoon of a purple character."}]) @slow @require_torch def test_conditional_generation_pt_blip(self): pipe = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base") url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg" image = Image.open(requests.get(url, stream=True).raw) prompt = "a photography of" outputs = pipe(image, prompt=prompt) self.assertEqual(outputs, [{"generated_text": "a photography of a volcano"}]) with self.assertRaises(ValueError): outputs = pipe([image, image], prompt=[prompt, prompt]) @slow @require_torch def test_conditional_generation_pt_git(self): pipe = pipeline("image-to-text", model="microsoft/git-base-coco") url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg" image = Image.open(requests.get(url, stream=True).raw) prompt = "a photo of a" outputs = pipe(image, prompt=prompt) self.assertEqual(outputs, [{"generated_text": "a photo of a tent with a tent and a tent in the background."}]) with self.assertRaises(ValueError): outputs = pipe([image, image], prompt=[prompt, prompt]) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11, reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." ) @slow @require_torch def test_conditional_generation_pt_pix2struct(self): pipe = pipeline("image-to-text", model="google/pix2struct-ai2d-base") url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg" image = Image.open(requests.get(url, stream=True).raw) prompt = "What does the label 15 represent? (1) lava (2) core (3) tunnel (4) ash cloud" outputs = pipe(image, prompt=prompt) self.assertEqual(outputs, [{"generated_text": "ash cloud"}]) with self.assertRaises(ValueError): outputs = pipe([image, image], prompt=[prompt, prompt]) @slow @require_tf def test_large_model_tf(self): pipe = pipeline("image-to-text", model="ydshieh/vit-gpt2-coco-en", framework="tf") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" outputs = pipe(image) self.assertEqual(outputs, [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}]) outputs = pipe([image, image]) self.assertEqual( outputs, [ [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}], [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}], ], )
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_table_question_answering.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING, AutoModelForTableQuestionAnswering, AutoTokenizer, TableQuestionAnsweringPipeline, TFAutoModelForTableQuestionAnswering, is_torch_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, require_pandas, require_tensorflow_probability, require_tf, require_torch, slow, ) if is_torch_available(): from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_12 else: is_torch_greater_or_equal_than_1_12 = False @is_pipeline_test class TQAPipelineTests(unittest.TestCase): # Putting it there for consistency, but TQA do not have fast tokenizer # which are needed to generate automatic tests model_mapping = MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING @require_tensorflow_probability @require_pandas @require_tf @require_torch def test_small_model_tf(self): model_id = "lysandre/tiny-tapas-random-wtq" model = TFAutoModelForTableQuestionAnswering.from_pretrained(model_id, from_pt=True) tokenizer = AutoTokenizer.from_pretrained(model_id) self.assertIsInstance(model.config.aggregation_labels, dict) self.assertIsInstance(model.config.no_aggregation_label_index, int) table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer) outputs = table_querier( table={ "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, query="how many movies has george clooney played in?", ) self.assertEqual( outputs, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, ) outputs = table_querier( table={ "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, query=["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"], ) self.assertEqual( outputs, [ {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, ], ) outputs = table_querier( table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, query=[ "What repository has the largest number of stars?", "Given that the numbers of stars defines if a repository is active, what repository is the most" " active?", "What is the number of repositories?", "What is the average number of stars?", "What is the total amount of stars?", ], ) self.assertEqual( outputs, [ {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, ], ) with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table=None) with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table="") with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table={}) with self.assertRaises(ValueError): table_querier( table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], } ) with self.assertRaises(ValueError): table_querier( query="", table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, ) with self.assertRaises(ValueError): table_querier( query=None, table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, ) @unittest.skipIf(not is_torch_greater_or_equal_than_1_12, reason="Tapas is only available in torch v1.12+") @require_torch def test_small_model_pt(self): model_id = "lysandre/tiny-tapas-random-wtq" model = AutoModelForTableQuestionAnswering.from_pretrained(model_id) tokenizer = AutoTokenizer.from_pretrained(model_id) self.assertIsInstance(model.config.aggregation_labels, dict) self.assertIsInstance(model.config.no_aggregation_label_index, int) table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer) outputs = table_querier( table={ "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, query="how many movies has george clooney played in?", ) self.assertEqual( outputs, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, ) outputs = table_querier( table={ "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, query=["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"], ) self.assertEqual( outputs, [ {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, ], ) outputs = table_querier( table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, query=[ "What repository has the largest number of stars?", "Given that the numbers of stars defines if a repository is active, what repository is the most" " active?", "What is the number of repositories?", "What is the average number of stars?", "What is the total amount of stars?", ], ) self.assertEqual( outputs, [ {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, ], ) with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table=None) with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table="") with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table={}) with self.assertRaises(ValueError): table_querier( table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], } ) with self.assertRaises(ValueError): table_querier( query="", table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, ) with self.assertRaises(ValueError): table_querier( query=None, table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, ) @unittest.skipIf(not is_torch_greater_or_equal_than_1_12, reason="Tapas is only available in torch v1.12+") @require_torch def test_slow_tokenizer_sqa_pt(self): model_id = "lysandre/tiny-tapas-random-sqa" model = AutoModelForTableQuestionAnswering.from_pretrained(model_id) tokenizer = AutoTokenizer.from_pretrained(model_id) table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer) inputs = { "table": { "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, "query": ["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"], } sequential_outputs = table_querier(**inputs, sequential=True) batch_outputs = table_querier(**inputs, sequential=False) self.assertEqual(len(sequential_outputs), 3) self.assertEqual(len(batch_outputs), 3) self.assertEqual(sequential_outputs[0], batch_outputs[0]) self.assertNotEqual(sequential_outputs[1], batch_outputs[1]) # self.assertNotEqual(sequential_outputs[2], batch_outputs[2]) table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer) outputs = table_querier( table={ "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, query="how many movies has george clooney played in?", ) self.assertEqual( outputs, {"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]}, ) outputs = table_querier( table={ "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, query=["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"], ) self.assertEqual( outputs, [ {"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]}, {"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]}, {"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]}, ], ) outputs = table_querier( table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, query=[ "What repository has the largest number of stars?", "Given that the numbers of stars defines if a repository is active, what repository is the most" " active?", "What is the number of repositories?", "What is the average number of stars?", "What is the total amount of stars?", ], ) self.assertEqual( outputs, [ {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, ], ) with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table=None) with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table="") with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table={}) with self.assertRaises(ValueError): table_querier( table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], } ) with self.assertRaises(ValueError): table_querier( query="", table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, ) with self.assertRaises(ValueError): table_querier( query=None, table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, ) @require_tf @require_tensorflow_probability @require_pandas @require_torch def test_slow_tokenizer_sqa_tf(self): model_id = "lysandre/tiny-tapas-random-sqa" model = TFAutoModelForTableQuestionAnswering.from_pretrained(model_id, from_pt=True) tokenizer = AutoTokenizer.from_pretrained(model_id) table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer) inputs = { "table": { "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, "query": ["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"], } sequential_outputs = table_querier(**inputs, sequential=True) batch_outputs = table_querier(**inputs, sequential=False) self.assertEqual(len(sequential_outputs), 3) self.assertEqual(len(batch_outputs), 3) self.assertEqual(sequential_outputs[0], batch_outputs[0]) self.assertNotEqual(sequential_outputs[1], batch_outputs[1]) # self.assertNotEqual(sequential_outputs[2], batch_outputs[2]) table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer) outputs = table_querier( table={ "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, query="how many movies has george clooney played in?", ) self.assertEqual( outputs, {"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]}, ) outputs = table_querier( table={ "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, query=["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"], ) self.assertEqual( outputs, [ {"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]}, {"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]}, {"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]}, ], ) outputs = table_querier( table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, query=[ "What repository has the largest number of stars?", "Given that the numbers of stars defines if a repository is active, what repository is the most" " active?", "What is the number of repositories?", "What is the average number of stars?", "What is the total amount of stars?", ], ) self.assertEqual( outputs, [ {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, ], ) with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table=None) with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table="") with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table={}) with self.assertRaises(ValueError): table_querier( table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], } ) with self.assertRaises(ValueError): table_querier( query="", table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, ) with self.assertRaises(ValueError): table_querier( query=None, table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, ) @unittest.skipIf(not is_torch_greater_or_equal_than_1_12, reason="Tapas is only available in torch v1.12+") @slow @require_torch def test_integration_wtq_pt(self): table_querier = pipeline("table-question-answering") data = { "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], } queries = [ "What repository has the largest number of stars?", "Given that the numbers of stars defines if a repository is active, what repository is the most active?", "What is the number of repositories?", "What is the average number of stars?", "What is the total amount of stars?", ] results = table_querier(data, queries) expected_results = [ {"answer": "Transformers", "coordinates": [(0, 0)], "cells": ["Transformers"], "aggregator": "NONE"}, {"answer": "Transformers", "coordinates": [(0, 0)], "cells": ["Transformers"], "aggregator": "NONE"}, { "answer": "COUNT > Transformers, Datasets, Tokenizers", "coordinates": [(0, 0), (1, 0), (2, 0)], "cells": ["Transformers", "Datasets", "Tokenizers"], "aggregator": "COUNT", }, { "answer": "AVERAGE > 36542, 4512, 3934", "coordinates": [(0, 1), (1, 1), (2, 1)], "cells": ["36542", "4512", "3934"], "aggregator": "AVERAGE", }, { "answer": "SUM > 36542, 4512, 3934", "coordinates": [(0, 1), (1, 1), (2, 1)], "cells": ["36542", "4512", "3934"], "aggregator": "SUM", }, ] self.assertListEqual(results, expected_results) @slow @require_tensorflow_probability @require_pandas def test_integration_wtq_tf(self): model_id = "google/tapas-base-finetuned-wtq" model = TFAutoModelForTableQuestionAnswering.from_pretrained(model_id) tokenizer = AutoTokenizer.from_pretrained(model_id) table_querier = pipeline("table-question-answering", model=model, tokenizer=tokenizer) data = { "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], } queries = [ "What repository has the largest number of stars?", "Given that the numbers of stars defines if a repository is active, what repository is the most active?", "What is the number of repositories?", "What is the average number of stars?", "What is the total amount of stars?", ] results = table_querier(data, queries) expected_results = [ {"answer": "Transformers", "coordinates": [(0, 0)], "cells": ["Transformers"], "aggregator": "NONE"}, {"answer": "Transformers", "coordinates": [(0, 0)], "cells": ["Transformers"], "aggregator": "NONE"}, { "answer": "COUNT > Transformers, Datasets, Tokenizers", "coordinates": [(0, 0), (1, 0), (2, 0)], "cells": ["Transformers", "Datasets", "Tokenizers"], "aggregator": "COUNT", }, { "answer": "AVERAGE > 36542, 4512, 3934", "coordinates": [(0, 1), (1, 1), (2, 1)], "cells": ["36542", "4512", "3934"], "aggregator": "AVERAGE", }, { "answer": "SUM > 36542, 4512, 3934", "coordinates": [(0, 1), (1, 1), (2, 1)], "cells": ["36542", "4512", "3934"], "aggregator": "SUM", }, ] self.assertListEqual(results, expected_results) @unittest.skipIf(not is_torch_greater_or_equal_than_1_12, reason="Tapas is only available in torch v1.12+") @slow @require_torch def test_integration_sqa_pt(self): table_querier = pipeline( "table-question-answering", model="google/tapas-base-finetuned-sqa", tokenizer="google/tapas-base-finetuned-sqa", ) data = { "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], "Age": ["56", "45", "59"], "Number of movies": ["87", "53", "69"], "Date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], } queries = ["How many movies has George Clooney played in?", "How old is he?", "What's his date of birth?"] results = table_querier(data, queries, sequential=True) expected_results = [ {"answer": "69", "coordinates": [(2, 2)], "cells": ["69"]}, {"answer": "59", "coordinates": [(2, 1)], "cells": ["59"]}, {"answer": "28 november 1967", "coordinates": [(2, 3)], "cells": ["28 november 1967"]}, ] self.assertListEqual(results, expected_results) @slow @require_tensorflow_probability @require_pandas def test_integration_sqa_tf(self): model_id = "google/tapas-base-finetuned-sqa" model = TFAutoModelForTableQuestionAnswering.from_pretrained(model_id) tokenizer = AutoTokenizer.from_pretrained(model_id) table_querier = pipeline( "table-question-answering", model=model, tokenizer=tokenizer, ) data = { "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], "Age": ["56", "45", "59"], "Number of movies": ["87", "53", "69"], "Date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], } queries = ["How many movies has George Clooney played in?", "How old is he?", "What's his date of birth?"] results = table_querier(data, queries, sequential=True) expected_results = [ {"answer": "69", "coordinates": [(2, 2)], "cells": ["69"]}, {"answer": "59", "coordinates": [(2, 1)], "cells": ["59"]}, {"answer": "28 november 1967", "coordinates": [(2, 3)], "cells": ["28 november 1967"]}, ] self.assertListEqual(results, expected_results) @slow @require_torch def test_large_model_pt_tapex(self): model_id = "microsoft/tapex-large-finetuned-wtq" table_querier = pipeline( "table-question-answering", model=model_id, ) data = { "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], "Age": ["56", "45", "59"], "Number of movies": ["87", "53", "69"], "Date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], } queries = [ "How many movies has George Clooney played in?", "How old is Mr Clooney ?", "What's the date of birth of Leonardo ?", ] results = table_querier(data, queries, sequential=True) expected_results = [ {"answer": " 69"}, {"answer": " 59"}, {"answer": " 10 june 1996"}, ] self.assertListEqual(results, expected_results)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_automatic_speech_recognition.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import pytest from datasets import Audio, load_dataset from huggingface_hub import hf_hub_download, snapshot_download from transformers import ( MODEL_FOR_CTC_MAPPING, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, Speech2TextForConditionalGeneration, Wav2Vec2ForCTC, WhisperForConditionalGeneration, ) from transformers.pipelines import AutomaticSpeechRecognitionPipeline, pipeline from transformers.pipelines.audio_utils import chunk_bytes_iter from transformers.pipelines.automatic_speech_recognition import _find_timestamp_sequence, chunk_iter from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_pyctcdecode, require_tf, require_torch, require_torch_accelerator, require_torchaudio, slow, torch_device, ) from .test_pipelines_common import ANY if is_torch_available(): import torch # We can't use this mixin because it assumes TF support. # from .test_pipelines_common import CustomInputPipelineCommonMixin @is_pipeline_test class AutomaticSpeechRecognitionPipelineTests(unittest.TestCase): model_mapping = dict( (list(MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING.items()) if MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING else []) + (MODEL_FOR_CTC_MAPPING.items() if MODEL_FOR_CTC_MAPPING else []) ) def get_test_pipeline(self, model, tokenizer, processor): if tokenizer is None: # Side effect of no Fast Tokenizer class for these model, so skipping # But the slow tokenizer test should still run as they're quite small self.skipTest("No tokenizer available") return # return None, None speech_recognizer = AutomaticSpeechRecognitionPipeline( model=model, tokenizer=tokenizer, feature_extractor=processor ) # test with a raw waveform audio = np.zeros((34000,)) audio2 = np.zeros((14000,)) return speech_recognizer, [audio, audio2] def run_pipeline_test(self, speech_recognizer, examples): audio = np.zeros((34000,)) outputs = speech_recognizer(audio) self.assertEqual(outputs, {"text": ANY(str)}) # Striding audio = {"raw": audio, "stride": (0, 4000), "sampling_rate": speech_recognizer.feature_extractor.sampling_rate} if speech_recognizer.type == "ctc": outputs = speech_recognizer(audio) self.assertEqual(outputs, {"text": ANY(str)}) elif "Whisper" in speech_recognizer.model.__class__.__name__: outputs = speech_recognizer(audio) self.assertEqual(outputs, {"text": ANY(str)}) else: # Non CTC models cannot use striding. with self.assertRaises(ValueError): outputs = speech_recognizer(audio) # Timestamps audio = np.zeros((34000,)) if speech_recognizer.type == "ctc": outputs = speech_recognizer(audio, return_timestamps="char") self.assertIsInstance(outputs["chunks"], list) n = len(outputs["chunks"]) self.assertEqual( outputs, { "text": ANY(str), "chunks": [{"text": ANY(str), "timestamp": (ANY(float), ANY(float))} for i in range(n)], }, ) outputs = speech_recognizer(audio, return_timestamps="word") self.assertIsInstance(outputs["chunks"], list) n = len(outputs["chunks"]) self.assertEqual( outputs, { "text": ANY(str), "chunks": [{"text": ANY(str), "timestamp": (ANY(float), ANY(float))} for i in range(n)], }, ) elif "Whisper" in speech_recognizer.model.__class__.__name__: outputs = speech_recognizer(audio, return_timestamps=True) self.assertIsInstance(outputs["chunks"], list) nb_chunks = len(outputs["chunks"]) self.assertGreater(nb_chunks, 0) self.assertEqual( outputs, { "text": ANY(str), "chunks": [{"text": ANY(str), "timestamp": (ANY(float), ANY(float))} for i in range(nb_chunks)], }, ) else: # Non CTC models cannot use return_timestamps with self.assertRaisesRegex( ValueError, "^We cannot return_timestamps yet on non-CTC models apart from Whisper!$" ): outputs = speech_recognizer(audio, return_timestamps="char") @require_torch @slow def test_pt_defaults(self): pipeline("automatic-speech-recognition", framework="pt") @require_torch def test_small_model_pt(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="facebook/s2t-small-mustc-en-fr-st", tokenizer="facebook/s2t-small-mustc-en-fr-st", framework="pt", ) waveform = np.tile(np.arange(1000, dtype=np.float32), 34) output = speech_recognizer(waveform) self.assertEqual(output, {"text": "(Applaudissements)"}) output = speech_recognizer(waveform, chunk_length_s=10) self.assertEqual(output, {"text": "(Applaudissements)"}) # Non CTC models cannot use return_timestamps with self.assertRaisesRegex( ValueError, "^We cannot return_timestamps yet on non-CTC models apart from Whisper!$" ): _ = speech_recognizer(waveform, return_timestamps="char") @slow @require_torch_accelerator def test_whisper_fp16(self): speech_recognizer = pipeline( model="openai/whisper-base", device=torch_device, torch_dtype=torch.float16, ) waveform = np.tile(np.arange(1000, dtype=np.float32), 34) speech_recognizer(waveform) @require_torch def test_small_model_pt_seq2seq(self): speech_recognizer = pipeline( model="hf-internal-testing/tiny-random-speech-encoder-decoder", framework="pt", ) waveform = np.tile(np.arange(1000, dtype=np.float32), 34) output = speech_recognizer(waveform) self.assertEqual(output, {"text": "あл ش 湯 清 ه ܬ া लᆨしث ल eか u w 全 u"}) @require_torch def test_small_model_pt_seq2seq_gen_kwargs(self): speech_recognizer = pipeline( model="hf-internal-testing/tiny-random-speech-encoder-decoder", framework="pt", ) waveform = np.tile(np.arange(1000, dtype=np.float32), 34) output = speech_recognizer(waveform, max_new_tokens=10, generate_kwargs={"num_beams": 2}) self.assertEqual(output, {"text": "あл † γ ت ב オ 束 泣 足"}) @slow @require_torch @require_pyctcdecode def test_large_model_pt_with_lm(self): dataset = load_dataset("Narsil/asr_dummy", streaming=True) third_item = next(iter(dataset["test"].skip(3))) filename = third_item["file"] speech_recognizer = pipeline( task="automatic-speech-recognition", model="patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm", framework="pt", ) self.assertEqual(speech_recognizer.type, "ctc_with_lm") output = speech_recognizer(filename) self.assertEqual( output, {"text": "y en las ramas medio sumergidas revoloteaban algunos pájaros de quimérico y legendario plumaje"}, ) # Override back to pure CTC speech_recognizer.type = "ctc" output = speech_recognizer(filename) # plumajre != plumaje self.assertEqual( output, { "text": ( "y en las ramas medio sumergidas revoloteaban algunos pájaros de quimérico y legendario plumajre" ) }, ) speech_recognizer.type = "ctc_with_lm" # Simple test with CTC with LM, chunking + timestamps output = speech_recognizer(filename, chunk_length_s=2.0, return_timestamps="word") self.assertEqual( output, { "text": ( "y en las ramas medio sumergidas revoloteaban algunos pájaros de quimérico y legendario plumajcri" ), "chunks": [ {"text": "y", "timestamp": (0.52, 0.54)}, {"text": "en", "timestamp": (0.6, 0.68)}, {"text": "las", "timestamp": (0.74, 0.84)}, {"text": "ramas", "timestamp": (0.94, 1.24)}, {"text": "medio", "timestamp": (1.32, 1.52)}, {"text": "sumergidas", "timestamp": (1.56, 2.22)}, {"text": "revoloteaban", "timestamp": (2.36, 3.0)}, {"text": "algunos", "timestamp": (3.06, 3.38)}, {"text": "pájaros", "timestamp": (3.46, 3.86)}, {"text": "de", "timestamp": (3.92, 4.0)}, {"text": "quimérico", "timestamp": (4.08, 4.6)}, {"text": "y", "timestamp": (4.66, 4.68)}, {"text": "legendario", "timestamp": (4.74, 5.26)}, {"text": "plumajcri", "timestamp": (5.34, 5.74)}, ], }, ) # CTC + LM models cannot use return_timestamps="char" with self.assertRaisesRegex( ValueError, "^CTC with LM can only predict word level timestamps, set `return_timestamps='word'`$" ): _ = speech_recognizer(filename, return_timestamps="char") @require_tf def test_small_model_tf(self): self.skipTest("Tensorflow not supported yet.") @require_torch def test_torch_small_no_tokenizer_files(self): # test that model without tokenizer file cannot be loaded with pytest.raises(OSError): pipeline( task="automatic-speech-recognition", model="patrickvonplaten/tiny-wav2vec2-no-tokenizer", framework="pt", ) @require_torch @slow def test_torch_large(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="facebook/wav2vec2-base-960h", tokenizer="facebook/wav2vec2-base-960h", framework="pt", ) waveform = np.tile(np.arange(1000, dtype=np.float32), 34) output = speech_recognizer(waveform) self.assertEqual(output, {"text": ""}) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") filename = ds[40]["file"] output = speech_recognizer(filename) self.assertEqual(output, {"text": "A MAN SAID TO THE UNIVERSE SIR I EXIST"}) @slow @require_torch @slow def test_return_timestamps_in_preprocess(self): pipe = pipeline( task="automatic-speech-recognition", model="openai/whisper-tiny", chunk_length_s=8, stride_length_s=1, ) data = load_dataset("librispeech_asr", "clean", split="test", streaming=True) sample = next(iter(data)) pipe.model.config.forced_decoder_ids = pipe.tokenizer.get_decoder_prompt_ids(language="en", task="transcribe") res = pipe(sample["audio"]["array"]) self.assertEqual(res, {"text": " Conquered returned to its place amidst the tents."}) res = pipe(sample["audio"]["array"], return_timestamps=True) self.assertEqual( res, { "text": " Conquered returned to its place amidst the tents.", "chunks": [{"timestamp": (0.0, 3.36), "text": " Conquered returned to its place amidst the tents."}], }, ) pipe.model.generation_config.alignment_heads = [[2, 2], [3, 0], [3, 2], [3, 3], [3, 4], [3, 5]] res = pipe(sample["audio"]["array"], return_timestamps="word") # fmt: off self.assertEqual( res, { 'text': ' Conquered returned to its place amidst the tents.', 'chunks': [ {'text': ' Conquered', 'timestamp': (0.5, 1.2)}, {'text': ' returned', 'timestamp': (1.2, 1.64)}, {'text': ' to', 'timestamp': (1.64, 1.84)}, {'text': ' its', 'timestamp': (1.84, 2.02)}, {'text': ' place', 'timestamp': (2.02, 2.28)}, {'text': ' amidst', 'timestamp': (2.28, 2.8)}, {'text': ' the', 'timestamp': (2.8, 2.98)}, {'text': ' tents.', 'timestamp': (2.98, 3.48)}, ], }, ) # fmt: on @require_torch def test_return_timestamps_in_init(self): # segment-level timestamps are accepted model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny") tokenizer = AutoTokenizer.from_pretrained("openai/whisper-tiny") feature_extractor = AutoFeatureExtractor.from_pretrained("openai/whisper-tiny") dummy_speech = np.ones(100) pipe = pipeline( task="automatic-speech-recognition", model=model, feature_extractor=feature_extractor, tokenizer=tokenizer, chunk_length_s=8, stride_length_s=1, return_timestamps=True, ) _ = pipe(dummy_speech) # word-level timestamps are accepted pipe = pipeline( task="automatic-speech-recognition", model=model, feature_extractor=feature_extractor, tokenizer=tokenizer, chunk_length_s=8, stride_length_s=1, return_timestamps="word", ) _ = pipe(dummy_speech) # char-level timestamps are not accepted with self.assertRaisesRegex( ValueError, "^Whisper cannot return `char` timestamps, only word level or segment level timestamps. " "Use `return_timestamps='word'` or `return_timestamps=True` respectively.$", ): pipe = pipeline( task="automatic-speech-recognition", model=model, feature_extractor=feature_extractor, tokenizer=tokenizer, chunk_length_s=8, stride_length_s=1, return_timestamps="char", ) _ = pipe(dummy_speech) @require_torch @slow def test_torch_whisper(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="openai/whisper-tiny", framework="pt", ) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") filename = ds[40]["file"] output = speech_recognizer(filename) self.assertEqual(output, {"text": " A man said to the universe, Sir, I exist."}) output = speech_recognizer([filename], chunk_length_s=5, batch_size=4) self.assertEqual(output, [{"text": " A man said to the universe, Sir, I exist."}]) @slow def test_find_longest_common_subsequence(self): max_source_positions = 1500 processor = AutoProcessor.from_pretrained("openai/whisper-tiny") previous_sequence = [[51492, 406, 3163, 1953, 466, 13, 51612, 51612]] self.assertEqual( processor.decode(previous_sequence[0], output_offsets=True), { "text": " not worth thinking about.", "offsets": [{"text": " not worth thinking about.", "timestamp": (22.56, 24.96)}], }, ) # Merge when the previous sequence is a suffix of the next sequence # fmt: off next_sequences_1 = [ [50364, 295, 6177, 3391, 11, 19817, 3337, 507, 307, 406, 3163, 1953, 466, 13, 50614, 50614, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 50834, 50257] ] # fmt: on self.assertEqual( processor.decode(next_sequences_1[0], output_offsets=True), { "text": ( " of spectators, retrievality is not worth thinking about. His instant panic was followed by a" " small, sharp blow high on his chest.<|endoftext|>" ), "offsets": [ {"text": " of spectators, retrievality is not worth thinking about.", "timestamp": (0.0, 5.0)}, { "text": " His instant panic was followed by a small, sharp blow high on his chest.", "timestamp": (5.0, 9.4), }, ], }, ) merge = _find_timestamp_sequence( [[previous_sequence, (480_000, 0, 0)], [next_sequences_1, (480_000, 120_000, 0)]], processor.tokenizer, processor.feature_extractor, max_source_positions, ) # fmt: off self.assertEqual( merge, [51492, 406, 3163, 1953, 466, 13, 51739, 51739, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 51959], ) # fmt: on self.assertEqual( processor.decode(merge, output_offsets=True), { "text": ( " not worth thinking about. His instant panic was followed by a small, sharp blow high on his" " chest." ), "offsets": [ {"text": " not worth thinking about.", "timestamp": (22.56, 27.5)}, { "text": " His instant panic was followed by a small, sharp blow high on his chest.", "timestamp": (27.5, 31.900000000000002), }, ], }, ) # Merge when the sequence is in the middle of the 1st next sequence # fmt: off next_sequences_2 = [ [50364, 295, 6177, 3391, 11, 19817, 3337, 507, 307, 406, 3163, 1953, 466, 13, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 50834, 50257] ] # fmt: on # {'text': ' of spectators, retrievality is not worth thinking about. His instant panic was followed by a small, sharp blow high on his chest.','timestamp': (0.0, 9.4)} merge = _find_timestamp_sequence( [[previous_sequence, (480_000, 0, 0)], [next_sequences_2, (480_000, 120_000, 0)]], processor.tokenizer, processor.feature_extractor, max_source_positions, ) # fmt: off self.assertEqual( merge, [51492, 406, 3163, 1953, 466, 13, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 51959], ) # fmt: on self.assertEqual( processor.decode(merge, output_offsets=True), { "text": ( " not worth thinking about. His instant panic was followed by a small, sharp blow high on his" " chest." ), "offsets": [ { "text": ( " not worth thinking about. His instant panic was followed by a small, sharp blow high on" " his chest." ), "timestamp": (22.56, 31.900000000000002), }, ], }, ) # Merge when the previous sequence is not included in the current sequence next_sequences_3 = [[50364, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 50584, 50257]] # fmt: skip # {'text': ' His instant panic was followed by a small, sharp blow high on his chest.','timestamp': (0.0, 9.4)} merge = _find_timestamp_sequence( [[previous_sequence, (480_000, 0, 0)], [next_sequences_3, (480_000, 120_000, 0)]], processor.tokenizer, processor.feature_extractor, max_source_positions, ) self.assertEqual( merge, [51492, 406, 3163, 1953, 466, 13, 51612, 51612, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 51832], ) # fmt: skip self.assertEqual( processor.decode(merge, output_offsets=True), { "text": ( " not worth thinking about. His instant panic was followed by a small, sharp blow high on his" " chest." ), "offsets": [ {"text": " not worth thinking about.", "timestamp": (22.56, 24.96)}, { "text": " His instant panic was followed by a small, sharp blow high on his chest.", "timestamp": (24.96, 29.36), }, ], }, ) # last case is when the sequence is not in the first next predicted start and end of timestamp next_sequences_3 = [ [50364, 2812, 9836, 14783, 390, 406, 3163, 1953, 466, 13, 50634, 50634, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 50934] ] # fmt: skip merge = _find_timestamp_sequence( [[previous_sequence, (480_000, 0, 0)], [next_sequences_3, (480_000, 167_000, 0)]], processor.tokenizer, processor.feature_extractor, max_source_positions, ) self.assertEqual( merge, [51492, 406, 3163, 1953, 466, 13, 51612, 51612, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 51912] ) # fmt: skip self.assertEqual( processor.decode(merge, output_offsets=True), { "text": ( " not worth thinking about. His instant panic was followed by a small, sharp blow high on his" " chest." ), "offsets": [ {"text": " not worth thinking about.", "timestamp": (22.56, 24.96)}, { "text": " His instant panic was followed by a small, sharp blow high on his chest.", "timestamp": (24.96, 30.96), }, ], }, ) @slow @require_torch def test_whisper_timestamp_prediction(self): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") array = np.concatenate( [ds[40]["audio"]["array"], ds[41]["audio"]["array"], ds[42]["audio"]["array"], ds[43]["audio"]["array"]] ) pipe = pipeline( model="openai/whisper-small", return_timestamps=True, ) output = pipe(ds[40]["audio"]) self.assertDictEqual( output, { "text": " A man said to the universe, Sir, I exist.", "chunks": [{"text": " A man said to the universe, Sir, I exist.", "timestamp": (0.0, 4.26)}], }, ) output = pipe(array, chunk_length_s=10) self.assertDictEqual( nested_simplify(output), { "chunks": [ {"text": " A man said to the universe, Sir, I exist.", "timestamp": (0.0, 5.5)}, { "text": ( " Sweat covered Brion's body, trickling into the " "tight-loan cloth that was the only garment he wore, the " "cut" ), "timestamp": (5.5, 11.95), }, { "text": ( " on his chest still dripping blood, the ache of his " "overstrained eyes, even the soaring arena around him " "with" ), "timestamp": (11.95, 19.61), }, { "text": " the thousands of spectators, retrievality is not worth thinking about.", "timestamp": (19.61, 25.0), }, { "text": " His instant panic was followed by a small, sharp blow high on his chest.", "timestamp": (25.0, 29.4), }, ], "text": ( " A man said to the universe, Sir, I exist. Sweat covered Brion's " "body, trickling into the tight-loan cloth that was the only garment " "he wore, the cut on his chest still dripping blood, the ache of his " "overstrained eyes, even the soaring arena around him with the " "thousands of spectators, retrievality is not worth thinking about. " "His instant panic was followed by a small, sharp blow high on his " "chest." ), }, ) output = pipe(array) self.assertDictEqual( output, { "chunks": [ {"text": " A man said to the universe, Sir, I exist.", "timestamp": (0.0, 5.5)}, { "text": ( " Sweat covered Brion's body, trickling into the " "tight-loan cloth that was the only garment" ), "timestamp": (5.5, 10.18), }, {"text": " he wore.", "timestamp": (10.18, 11.68)}, {"text": " The cut on his chest still dripping blood.", "timestamp": (11.68, 14.92)}, {"text": " The ache of his overstrained eyes.", "timestamp": (14.92, 17.6)}, { "text": ( " Even the soaring arena around him with the thousands of spectators were trivialities" ), "timestamp": (17.6, 22.56), }, {"text": " not worth thinking about.", "timestamp": (22.56, 24.96)}, ], "text": ( " A man said to the universe, Sir, I exist. Sweat covered Brion's " "body, trickling into the tight-loan cloth that was the only garment " "he wore. The cut on his chest still dripping blood. The ache of his " "overstrained eyes. Even the soaring arena around him with the " "thousands of spectators were trivialities not worth thinking about." ), }, ) @require_torch @slow def test_torch_speech_encoder_decoder(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="facebook/s2t-wav2vec2-large-en-de", feature_extractor="facebook/s2t-wav2vec2-large-en-de", framework="pt", ) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") filename = ds[40]["file"] output = speech_recognizer(filename) self.assertEqual(output, {"text": 'Ein Mann sagte zum Universum : " Sir, ich existiert! "'}) @slow @require_torch def test_simple_wav2vec2(self): model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h") tokenizer = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h") feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h") asr = AutomaticSpeechRecognitionPipeline(model=model, tokenizer=tokenizer, feature_extractor=feature_extractor) waveform = np.tile(np.arange(1000, dtype=np.float32), 34) output = asr(waveform) self.assertEqual(output, {"text": ""}) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") filename = ds[40]["file"] output = asr(filename) self.assertEqual(output, {"text": "A MAN SAID TO THE UNIVERSE SIR I EXIST"}) filename = ds[40]["file"] with open(filename, "rb") as f: data = f.read() output = asr(data) self.assertEqual(output, {"text": "A MAN SAID TO THE UNIVERSE SIR I EXIST"}) @slow @require_torch @require_torchaudio def test_simple_s2t(self): model = Speech2TextForConditionalGeneration.from_pretrained("facebook/s2t-small-mustc-en-it-st") tokenizer = AutoTokenizer.from_pretrained("facebook/s2t-small-mustc-en-it-st") feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/s2t-small-mustc-en-it-st") asr = AutomaticSpeechRecognitionPipeline(model=model, tokenizer=tokenizer, feature_extractor=feature_extractor) waveform = np.tile(np.arange(1000, dtype=np.float32), 34) output = asr(waveform) self.assertEqual(output, {"text": "(Applausi)"}) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") filename = ds[40]["file"] output = asr(filename) self.assertEqual(output, {"text": "Un uomo disse all'universo: \"Signore, io esisto."}) filename = ds[40]["file"] with open(filename, "rb") as f: data = f.read() output = asr(data) self.assertEqual(output, {"text": "Un uomo disse all'universo: \"Signore, io esisto."}) @slow @require_torch @require_torchaudio def test_simple_whisper_asr(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="openai/whisper-tiny.en", framework="pt", ) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") filename = ds[0]["file"] output = speech_recognizer(filename) self.assertEqual( output, {"text": " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel."}, ) output = speech_recognizer(filename, return_timestamps=True) self.assertEqual( output, { "text": " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.", "chunks": [ { "text": ( " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel." ), "timestamp": (0.0, 5.44), } ], }, ) speech_recognizer.model.generation_config.alignment_heads = [[2, 2], [3, 0], [3, 2], [3, 3], [3, 4], [3, 5]] output = speech_recognizer(filename, return_timestamps="word") # fmt: off self.assertEqual( output, { 'text': ' Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.', 'chunks': [ {'text': ' Mr.', 'timestamp': (0.38, 1.04)}, {'text': ' Quilter', 'timestamp': (1.04, 1.18)}, {'text': ' is', 'timestamp': (1.18, 1.44)}, {'text': ' the', 'timestamp': (1.44, 1.58)}, {'text': ' apostle', 'timestamp': (1.58, 1.98)}, {'text': ' of', 'timestamp': (1.98, 2.32)}, {'text': ' the', 'timestamp': (2.32, 2.46)}, {'text': ' middle', 'timestamp': (2.46, 2.56)}, {'text': ' classes,', 'timestamp': (2.56, 3.4)}, {'text': ' and', 'timestamp': (3.4, 3.54)}, {'text': ' we', 'timestamp': (3.54, 3.62)}, {'text': ' are', 'timestamp': (3.62, 3.72)}, {'text': ' glad', 'timestamp': (3.72, 4.0)}, {'text': ' to', 'timestamp': (4.0, 4.26)}, {'text': ' welcome', 'timestamp': (4.26, 4.56)}, {'text': ' his', 'timestamp': (4.56, 4.92)}, {'text': ' gospel.', 'timestamp': (4.92, 5.84)} ] } ) # fmt: on # Whisper can only predict segment level timestamps or word level, not character level with self.assertRaisesRegex( ValueError, "^Whisper cannot return `char` timestamps, only word level or segment level timestamps. " "Use `return_timestamps='word'` or `return_timestamps=True` respectively.$", ): _ = speech_recognizer(filename, return_timestamps="char") @slow @require_torch @require_torchaudio def test_simple_whisper_translation(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="openai/whisper-large", framework="pt", ) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") filename = ds[40]["file"] output = speech_recognizer(filename) self.assertEqual(output, {"text": " A man said to the universe, Sir, I exist."}) model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large") tokenizer = AutoTokenizer.from_pretrained("openai/whisper-large") feature_extractor = AutoFeatureExtractor.from_pretrained("openai/whisper-large") speech_recognizer_2 = AutomaticSpeechRecognitionPipeline( model=model, tokenizer=tokenizer, feature_extractor=feature_extractor ) output_2 = speech_recognizer_2(filename) self.assertEqual(output, output_2) # either use generate_kwargs or set the model's generation_config # model.generation_config.task = "transcribe" # model.generation_config.lang = "<|it|>" speech_translator = AutomaticSpeechRecognitionPipeline( model=model, tokenizer=tokenizer, feature_extractor=feature_extractor, generate_kwargs={"task": "transcribe", "language": "<|it|>"}, ) output_3 = speech_translator(filename) self.assertEqual(output_3, {"text": " Un uomo ha detto all'universo, Sir, esiste."}) @slow @require_torch def test_whisper_language(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="openai/whisper-tiny.en", framework="pt", ) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") filename = ds[0]["file"] # 1. English-only model compatible with no language argument output = speech_recognizer(filename) self.assertEqual( output, {"text": " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel."}, ) # 2. English-only Whisper does not accept the language argument with self.assertRaisesRegex( ValueError, "Cannot specify `task` or `language` for an English-only model. If the model is intended to be multilingual, " "pass `is_multilingual=True` to generate, or update the generation config.", ): _ = speech_recognizer(filename, generate_kwargs={"language": "en"}) # 3. Multilingual model accepts language argument speech_recognizer = pipeline( task="automatic-speech-recognition", model="openai/whisper-tiny", framework="pt", ) output = speech_recognizer(filename, generate_kwargs={"language": "en"}) self.assertEqual( output, {"text": " Mr. Quilter is the apostle of the middle classes and we are glad to welcome his gospel."}, ) @slow @require_torch @require_torchaudio def test_xls_r_to_en(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="facebook/wav2vec2-xls-r-1b-21-to-en", feature_extractor="facebook/wav2vec2-xls-r-1b-21-to-en", framework="pt", ) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") filename = ds[40]["file"] output = speech_recognizer(filename) self.assertEqual(output, {"text": "A man said to the universe: “Sir, I exist."}) @slow @require_torch @require_torchaudio def test_xls_r_from_en(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="facebook/wav2vec2-xls-r-1b-en-to-15", feature_extractor="facebook/wav2vec2-xls-r-1b-en-to-15", framework="pt", ) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") filename = ds[40]["file"] output = speech_recognizer(filename) self.assertEqual(output, {"text": "Ein Mann sagte zu dem Universum, Sir, ich bin da."}) @slow @require_torch @require_torchaudio def test_speech_to_text_leveraged(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="patrickvonplaten/wav2vec2-2-bart-base", feature_extractor="patrickvonplaten/wav2vec2-2-bart-base", tokenizer=AutoTokenizer.from_pretrained("patrickvonplaten/wav2vec2-2-bart-base"), framework="pt", ) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") filename = ds[40]["file"] output = speech_recognizer(filename) self.assertEqual(output, {"text": "a man said to the universe sir i exist"}) @slow @require_torch_accelerator def test_wav2vec2_conformer_float16(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="facebook/wav2vec2-conformer-rope-large-960h-ft", device=torch_device, torch_dtype=torch.float16, framework="pt", ) dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") sample = dataset[0]["audio"] output = speech_recognizer(sample) self.assertEqual( output, {"text": "MISTER QUILTER IS THE APOSTLE OF THE MIDDLE CLASSES AND WE ARE GLAD TO WELCOME HIS GOSPEL"}, ) @require_torch def test_chunking_fast(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="hf-internal-testing/tiny-random-wav2vec2", chunk_length_s=10.0, ) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") audio = ds[40]["audio"]["array"] n_repeats = 2 audio_tiled = np.tile(audio, n_repeats) output = speech_recognizer([audio_tiled], batch_size=2) self.assertEqual(output, [{"text": ANY(str)}]) self.assertEqual(output[0]["text"][:6], "ZBT ZC") @require_torch def test_return_timestamps_ctc_fast(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="hf-internal-testing/tiny-random-wav2vec2", ) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") # Take short audio to keep the test readable audio = ds[40]["audio"]["array"][:800] output = speech_recognizer(audio, return_timestamps="char") self.assertEqual( output, { "text": "ZBT ZX G", "chunks": [ {"text": " ", "timestamp": (0.0, 0.012)}, {"text": "Z", "timestamp": (0.012, 0.016)}, {"text": "B", "timestamp": (0.016, 0.02)}, {"text": "T", "timestamp": (0.02, 0.024)}, {"text": " ", "timestamp": (0.024, 0.028)}, {"text": "Z", "timestamp": (0.028, 0.032)}, {"text": "X", "timestamp": (0.032, 0.036)}, {"text": " ", "timestamp": (0.036, 0.04)}, {"text": "G", "timestamp": (0.04, 0.044)}, ], }, ) output = speech_recognizer(audio, return_timestamps="word") self.assertEqual( output, { "text": "ZBT ZX G", "chunks": [ {"text": "ZBT", "timestamp": (0.012, 0.024)}, {"text": "ZX", "timestamp": (0.028, 0.036)}, {"text": "G", "timestamp": (0.04, 0.044)}, ], }, ) @require_torch @require_pyctcdecode def test_chunking_fast_with_lm(self): speech_recognizer = pipeline( model="hf-internal-testing/processor_with_lm", chunk_length_s=10.0, ) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") audio = ds[40]["audio"]["array"] n_repeats = 2 audio_tiled = np.tile(audio, n_repeats) # Batch_size = 1 output1 = speech_recognizer([audio_tiled], batch_size=1) self.assertEqual(output1, [{"text": ANY(str)}]) self.assertEqual(output1[0]["text"][:6], "<s> <s") # batch_size = 2 output2 = speech_recognizer([audio_tiled], batch_size=2) self.assertEqual(output2, [{"text": ANY(str)}]) self.assertEqual(output2[0]["text"][:6], "<s> <s") # TODO There is an offby one error because of the ratio. # Maybe logits get affected by the padding on this random # model is more likely. Add some masking ? # self.assertEqual(output1, output2) @require_torch @require_pyctcdecode def test_with_lm_fast(self): speech_recognizer = pipeline( model="hf-internal-testing/processor_with_lm", ) self.assertEqual(speech_recognizer.type, "ctc_with_lm") ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") audio = ds[40]["audio"]["array"] n_repeats = 2 audio_tiled = np.tile(audio, n_repeats) output = speech_recognizer([audio_tiled], batch_size=2) self.assertEqual(output, [{"text": ANY(str)}]) self.assertEqual(output[0]["text"][:6], "<s> <s") # Making sure the argument are passed to the decoder # Since no change happens in the result, check the error comes from # the `decode_beams` function. with self.assertRaises(TypeError) as e: output = speech_recognizer([audio_tiled], decoder_kwargs={"num_beams": 2}) self.assertContains(e.msg, "TypeError: decode_beams() got an unexpected keyword argument 'num_beams'") output = speech_recognizer([audio_tiled], decoder_kwargs={"beam_width": 2}) @require_torch @require_pyctcdecode def test_with_local_lm_fast(self): local_dir = snapshot_download("hf-internal-testing/processor_with_lm") speech_recognizer = pipeline( task="automatic-speech-recognition", model=local_dir, ) self.assertEqual(speech_recognizer.type, "ctc_with_lm") ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") audio = ds[40]["audio"]["array"] n_repeats = 2 audio_tiled = np.tile(audio, n_repeats) output = speech_recognizer([audio_tiled], batch_size=2) self.assertEqual(output, [{"text": ANY(str)}]) self.assertEqual(output[0]["text"][:6], "<s> <s") @require_torch @slow def test_whisper_longform(self): # fmt: off EXPECTED_RESULT = """ Folks, if you watch the show, you know, I spent a lot of time right over there. Patiently and astutely scrutinizing the boxwood and mahogany chest set of the day's biggest stories developing the central headline pawns, definitely maneuvering an oso topical night to F6, fainting a classic Sicilian, nade door variation on the news, all the while seeing eight moves deep and patiently marshalling the latest press releases into a fisher's shows in Lip Nitsky attack that culminates in the elegant lethal slow-played, all-passant checkmate that is my nightly monologue. But sometimes, sometimes, folks, I. CHEERING AND APPLAUSE Sometimes I startle away, cubside down in the monkey bars of a condemned playground on a super fun site. Get all hept up on goofballs. Rummage that were discarded tag bag of defective toys. Yank out of fist bowl of disembodied doll limbs, toss them on a stained kid's place mat from a defunct denny's, set up a table inside a rusty cargo container down by the Wharf and challenged toothless drifters to the godless bughouse blitz of tournament that is my segment. Meanwhile!""" # fmt: on processor = AutoProcessor.from_pretrained("openai/whisper-tiny.en") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") model = model.to("cuda") pipe = pipeline( "automatic-speech-recognition", model=model, tokenizer=processor.tokenizer, feature_extractor=processor.feature_extractor, max_new_tokens=128, device="cuda:0", ) ds = load_dataset("distil-whisper/meanwhile", "default")["test"] ds = ds.cast_column("audio", Audio(sampling_rate=16000)) audio = ds[:1]["audio"] result = pipe(audio)[0]["text"] assert result == EXPECTED_RESULT @require_torch @slow def test_chunking_and_timestamps(self): model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h") tokenizer = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h") feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h") speech_recognizer = pipeline( task="automatic-speech-recognition", model=model, tokenizer=tokenizer, feature_extractor=feature_extractor, framework="pt", chunk_length_s=10.0, ) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") audio = ds[40]["audio"]["array"] n_repeats = 10 audio_tiled = np.tile(audio, n_repeats) output = speech_recognizer([audio_tiled], batch_size=2) self.assertEqual(output, [{"text": ("A MAN SAID TO THE UNIVERSE SIR I EXIST " * n_repeats).strip()}]) output = speech_recognizer(audio, return_timestamps="char") self.assertEqual(audio.shape, (74_400,)) self.assertEqual(speech_recognizer.feature_extractor.sampling_rate, 16_000) # The audio is 74_400 / 16_000 = 4.65s long. self.assertEqual( output, { "text": "A MAN SAID TO THE UNIVERSE SIR I EXIST", "chunks": [ {"text": "A", "timestamp": (0.6, 0.62)}, {"text": " ", "timestamp": (0.62, 0.66)}, {"text": "M", "timestamp": (0.68, 0.7)}, {"text": "A", "timestamp": (0.78, 0.8)}, {"text": "N", "timestamp": (0.84, 0.86)}, {"text": " ", "timestamp": (0.92, 0.98)}, {"text": "S", "timestamp": (1.06, 1.08)}, {"text": "A", "timestamp": (1.14, 1.16)}, {"text": "I", "timestamp": (1.16, 1.18)}, {"text": "D", "timestamp": (1.2, 1.24)}, {"text": " ", "timestamp": (1.24, 1.28)}, {"text": "T", "timestamp": (1.28, 1.32)}, {"text": "O", "timestamp": (1.34, 1.36)}, {"text": " ", "timestamp": (1.38, 1.42)}, {"text": "T", "timestamp": (1.42, 1.44)}, {"text": "H", "timestamp": (1.44, 1.46)}, {"text": "E", "timestamp": (1.46, 1.5)}, {"text": " ", "timestamp": (1.5, 1.56)}, {"text": "U", "timestamp": (1.58, 1.62)}, {"text": "N", "timestamp": (1.64, 1.68)}, {"text": "I", "timestamp": (1.7, 1.72)}, {"text": "V", "timestamp": (1.76, 1.78)}, {"text": "E", "timestamp": (1.84, 1.86)}, {"text": "R", "timestamp": (1.86, 1.9)}, {"text": "S", "timestamp": (1.96, 1.98)}, {"text": "E", "timestamp": (1.98, 2.02)}, {"text": " ", "timestamp": (2.02, 2.06)}, {"text": "S", "timestamp": (2.82, 2.86)}, {"text": "I", "timestamp": (2.94, 2.96)}, {"text": "R", "timestamp": (2.98, 3.02)}, {"text": " ", "timestamp": (3.06, 3.12)}, {"text": "I", "timestamp": (3.5, 3.52)}, {"text": " ", "timestamp": (3.58, 3.6)}, {"text": "E", "timestamp": (3.66, 3.68)}, {"text": "X", "timestamp": (3.68, 3.7)}, {"text": "I", "timestamp": (3.9, 3.92)}, {"text": "S", "timestamp": (3.94, 3.96)}, {"text": "T", "timestamp": (4.0, 4.02)}, {"text": " ", "timestamp": (4.06, 4.1)}, ], }, ) output = speech_recognizer(audio, return_timestamps="word") self.assertEqual( output, { "text": "A MAN SAID TO THE UNIVERSE SIR I EXIST", "chunks": [ {"text": "A", "timestamp": (0.6, 0.62)}, {"text": "MAN", "timestamp": (0.68, 0.86)}, {"text": "SAID", "timestamp": (1.06, 1.24)}, {"text": "TO", "timestamp": (1.28, 1.36)}, {"text": "THE", "timestamp": (1.42, 1.5)}, {"text": "UNIVERSE", "timestamp": (1.58, 2.02)}, {"text": "SIR", "timestamp": (2.82, 3.02)}, {"text": "I", "timestamp": (3.5, 3.52)}, {"text": "EXIST", "timestamp": (3.66, 4.02)}, ], }, ) output = speech_recognizer(audio, return_timestamps="word", chunk_length_s=2.0) self.assertEqual( output, { "text": "A MAN SAID TO THE UNIVERSE SIR I EXIST", "chunks": [ {"text": "A", "timestamp": (0.6, 0.62)}, {"text": "MAN", "timestamp": (0.68, 0.86)}, {"text": "SAID", "timestamp": (1.06, 1.24)}, {"text": "TO", "timestamp": (1.3, 1.36)}, {"text": "THE", "timestamp": (1.42, 1.48)}, {"text": "UNIVERSE", "timestamp": (1.58, 2.02)}, # Tiny change linked to chunking. {"text": "SIR", "timestamp": (2.84, 3.02)}, {"text": "I", "timestamp": (3.5, 3.52)}, {"text": "EXIST", "timestamp": (3.66, 4.02)}, ], }, ) # CTC models must specify return_timestamps type - cannot set `return_timestamps=True` blindly with self.assertRaisesRegex( ValueError, "^CTC can either predict character level timestamps, or word level timestamps. " "Set `return_timestamps='char'` or `return_timestamps='word'` as required.$", ): _ = speech_recognizer(audio, return_timestamps=True) @require_torch @slow def test_chunking_with_lm(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="patrickvonplaten/wav2vec2-base-100h-with-lm", chunk_length_s=10.0, ) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") audio = ds[40]["audio"]["array"] n_repeats = 10 audio = np.tile(audio, n_repeats) output = speech_recognizer([audio], batch_size=2) expected_text = "A MAN SAID TO THE UNIVERSE SIR I EXIST " * n_repeats expected = [{"text": expected_text.strip()}] self.assertEqual(output, expected) @require_torch def test_chunk_iterator(self): feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h") inputs = torch.arange(100).long() ratio = 1 outs = list(chunk_iter(inputs, feature_extractor, 100, 0, 0, ratio)) self.assertEqual(len(outs), 1) self.assertEqual([o["stride"] for o in outs], [(100, 0, 0)]) self.assertEqual([o["input_values"].shape for o in outs], [(1, 100)]) self.assertEqual([o["is_last"] for o in outs], [True]) # two chunks no stride outs = list(chunk_iter(inputs, feature_extractor, 50, 0, 0, ratio)) self.assertEqual(len(outs), 2) self.assertEqual([o["stride"] for o in outs], [(50, 0, 0), (50, 0, 0)]) self.assertEqual([o["input_values"].shape for o in outs], [(1, 50), (1, 50)]) self.assertEqual([o["is_last"] for o in outs], [False, True]) # two chunks incomplete last outs = list(chunk_iter(inputs, feature_extractor, 80, 0, 0, ratio)) self.assertEqual(len(outs), 2) self.assertEqual([o["stride"] for o in outs], [(80, 0, 0), (20, 0, 0)]) self.assertEqual([o["input_values"].shape for o in outs], [(1, 80), (1, 20)]) self.assertEqual([o["is_last"] for o in outs], [False, True]) # one chunk since first is also last, because it contains only data # in the right strided part we just mark that part as non stride # This test is specifically crafted to trigger a bug if next chunk # would be ignored by the fact that all the data would be # contained in the strided left data. outs = list(chunk_iter(inputs, feature_extractor, 105, 5, 5, ratio)) self.assertEqual(len(outs), 1) self.assertEqual([o["stride"] for o in outs], [(100, 0, 0)]) self.assertEqual([o["input_values"].shape for o in outs], [(1, 100)]) self.assertEqual([o["is_last"] for o in outs], [True]) @require_torch def test_chunk_iterator_stride(self): feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h") inputs = torch.arange(100).long() input_values = feature_extractor(inputs, sampling_rate=feature_extractor.sampling_rate, return_tensors="pt")[ "input_values" ] ratio = 1 outs = list(chunk_iter(inputs, feature_extractor, 100, 20, 10, ratio)) self.assertEqual(len(outs), 2) self.assertEqual([o["stride"] for o in outs], [(100, 0, 10), (30, 20, 0)]) self.assertEqual([o["input_values"].shape for o in outs], [(1, 100), (1, 30)]) self.assertEqual([o["is_last"] for o in outs], [False, True]) outs = list(chunk_iter(inputs, feature_extractor, 80, 20, 10, ratio)) self.assertEqual(len(outs), 2) self.assertEqual([o["stride"] for o in outs], [(80, 0, 10), (50, 20, 0)]) self.assertEqual([o["input_values"].shape for o in outs], [(1, 80), (1, 50)]) self.assertEqual([o["is_last"] for o in outs], [False, True]) outs = list(chunk_iter(inputs, feature_extractor, 90, 20, 0, ratio)) self.assertEqual(len(outs), 2) self.assertEqual([o["stride"] for o in outs], [(90, 0, 0), (30, 20, 0)]) self.assertEqual([o["input_values"].shape for o in outs], [(1, 90), (1, 30)]) outs = list(chunk_iter(inputs, feature_extractor, 36, 6, 6, ratio)) self.assertEqual(len(outs), 4) self.assertEqual([o["stride"] for o in outs], [(36, 0, 6), (36, 6, 6), (36, 6, 6), (28, 6, 0)]) self.assertEqual([o["input_values"].shape for o in outs], [(1, 36), (1, 36), (1, 36), (1, 28)]) inputs = torch.LongTensor([i % 2 for i in range(100)]) input_values = feature_extractor(inputs, sampling_rate=feature_extractor.sampling_rate, return_tensors="pt")[ "input_values" ] outs = list(chunk_iter(inputs, feature_extractor, 30, 5, 5, ratio)) self.assertEqual(len(outs), 5) self.assertEqual([o["stride"] for o in outs], [(30, 0, 5), (30, 5, 5), (30, 5, 5), (30, 5, 5), (20, 5, 0)]) self.assertEqual([o["input_values"].shape for o in outs], [(1, 30), (1, 30), (1, 30), (1, 30), (1, 20)]) self.assertEqual([o["is_last"] for o in outs], [False, False, False, False, True]) # (0, 25) self.assertEqual(nested_simplify(input_values[:, :30]), nested_simplify(outs[0]["input_values"])) # (25, 45) self.assertEqual(nested_simplify(input_values[:, 20:50]), nested_simplify(outs[1]["input_values"])) # (45, 65) self.assertEqual(nested_simplify(input_values[:, 40:70]), nested_simplify(outs[2]["input_values"])) # (65, 85) self.assertEqual(nested_simplify(input_values[:, 60:90]), nested_simplify(outs[3]["input_values"])) # (85, 100) self.assertEqual(nested_simplify(input_values[:, 80:100]), nested_simplify(outs[4]["input_values"])) @require_torch def test_stride(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="hf-internal-testing/tiny-random-wav2vec2", ) waveform = np.tile(np.arange(1000, dtype=np.float32), 10) output = speech_recognizer({"raw": waveform, "stride": (0, 0), "sampling_rate": 16_000}) self.assertEqual(output, {"text": "OB XB B EB BB B EB B OB X"}) # 0 effective ids Just take the middle one output = speech_recognizer({"raw": waveform, "stride": (5000, 5000), "sampling_rate": 16_000}) self.assertEqual(output, {"text": ""}) # Only 1 arange. output = speech_recognizer({"raw": waveform, "stride": (0, 9000), "sampling_rate": 16_000}) self.assertEqual(output, {"text": "OB"}) # 2nd arange output = speech_recognizer({"raw": waveform, "stride": (1000, 8000), "sampling_rate": 16_000}) self.assertEqual(output, {"text": "XB"}) @slow @require_torch_accelerator def test_slow_unfinished_sequence(self): from transformers import GenerationConfig pipe = pipeline( "automatic-speech-recognition", model="vasista22/whisper-hindi-large-v2", device=torch_device, ) # Original model wasn't trained with timestamps and has incorrect generation config pipe.model.generation_config = GenerationConfig.from_pretrained("openai/whisper-large-v2") audio = hf_hub_download("Narsil/asr_dummy", filename="hindi.ogg", repo_type="dataset") out = pipe( audio, return_timestamps=True, ) self.assertEqual( out, { "chunks": [ {"text": "", "timestamp": (18.94, 0.02)}, {"text": "मिर्ची में कितने विभिन्न प्रजातियां हैं", "timestamp": (None, None)}, ], "text": "मिर्ची में कितने विभिन्न प्रजातियां हैं", }, ) def require_ffmpeg(test_case): """ Decorator marking a test that requires FFmpeg. These tests are skipped when FFmpeg isn't installed. """ import subprocess try: subprocess.check_output(["ffmpeg", "-h"], stderr=subprocess.DEVNULL) return test_case except Exception: return unittest.skip("test requires ffmpeg")(test_case) def bytes_iter(chunk_size, chunks): for i in range(chunks): yield bytes(range(i * chunk_size, (i + 1) * chunk_size)) @require_ffmpeg class AudioUtilsTest(unittest.TestCase): def test_chunk_bytes_iter_too_big(self): iter_ = iter(chunk_bytes_iter(bytes_iter(chunk_size=3, chunks=2), 10, stride=(0, 0))) self.assertEqual(next(iter_), {"raw": b"\x00\x01\x02\x03\x04\x05", "stride": (0, 0)}) with self.assertRaises(StopIteration): next(iter_) def test_chunk_bytes_iter(self): iter_ = iter(chunk_bytes_iter(bytes_iter(chunk_size=3, chunks=2), 3, stride=(0, 0))) self.assertEqual(next(iter_), {"raw": b"\x00\x01\x02", "stride": (0, 0)}) self.assertEqual(next(iter_), {"raw": b"\x03\x04\x05", "stride": (0, 0)}) with self.assertRaises(StopIteration): next(iter_) def test_chunk_bytes_iter_stride(self): iter_ = iter(chunk_bytes_iter(bytes_iter(chunk_size=3, chunks=2), 3, stride=(1, 1))) self.assertEqual(next(iter_), {"raw": b"\x00\x01\x02", "stride": (0, 1)}) self.assertEqual(next(iter_), {"raw": b"\x01\x02\x03", "stride": (1, 1)}) self.assertEqual(next(iter_), {"raw": b"\x02\x03\x04", "stride": (1, 1)}) # This is finished, but the chunk_bytes doesn't know it yet. self.assertEqual(next(iter_), {"raw": b"\x03\x04\x05", "stride": (1, 1)}) self.assertEqual(next(iter_), {"raw": b"\x04\x05", "stride": (1, 0)}) with self.assertRaises(StopIteration): next(iter_) def test_chunk_bytes_iter_stride_stream(self): iter_ = iter(chunk_bytes_iter(bytes_iter(chunk_size=3, chunks=2), 5, stride=(1, 1), stream=True)) self.assertEqual(next(iter_), {"raw": b"\x00\x01\x02", "stride": (0, 0), "partial": True}) self.assertEqual(next(iter_), {"raw": b"\x00\x01\x02\x03\x04", "stride": (0, 1), "partial": False}) self.assertEqual(next(iter_), {"raw": b"\x03\x04\x05", "stride": (1, 0), "partial": False}) with self.assertRaises(StopIteration): next(iter_) iter_ = iter(chunk_bytes_iter(bytes_iter(chunk_size=3, chunks=3), 5, stride=(1, 1), stream=True)) self.assertEqual(next(iter_), {"raw": b"\x00\x01\x02", "stride": (0, 0), "partial": True}) self.assertEqual(next(iter_), {"raw": b"\x00\x01\x02\x03\x04", "stride": (0, 1), "partial": False}) self.assertEqual(next(iter_), {"raw": b"\x03\x04\x05\x06\x07", "stride": (1, 1), "partial": False}) self.assertEqual(next(iter_), {"raw": b"\x06\x07\x08", "stride": (1, 0), "partial": False}) with self.assertRaises(StopIteration): next(iter_) iter_ = iter(chunk_bytes_iter(bytes_iter(chunk_size=3, chunks=3), 10, stride=(1, 1), stream=True)) self.assertEqual(next(iter_), {"raw": b"\x00\x01\x02", "stride": (0, 0), "partial": True}) self.assertEqual(next(iter_), {"raw": b"\x00\x01\x02\x03\x04\x05", "stride": (0, 0), "partial": True}) self.assertEqual( next(iter_), {"raw": b"\x00\x01\x02\x03\x04\x05\x06\x07\x08", "stride": (0, 0), "partial": True} ) self.assertEqual( next(iter_), {"raw": b"\x00\x01\x02\x03\x04\x05\x06\x07\x08", "stride": (0, 0), "partial": False} ) with self.assertRaises(StopIteration): next(iter_)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_zero_shot_audio_classification.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from datasets import load_dataset from transformers.pipelines import pipeline from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow @is_pipeline_test @require_torch class ZeroShotAudioClassificationPipelineTests(unittest.TestCase): # Deactivating auto tests since we don't have a good MODEL_FOR_XX mapping, # and only CLAP would be there for now. # model_mapping = {CLAPConfig: CLAPModel} @require_torch def test_small_model_pt(self): audio_classifier = pipeline( task="zero-shot-audio-classification", model="hf-internal-testing/tiny-clap-htsat-unfused" ) dataset = load_dataset("ashraq/esc50") audio = dataset["train"]["audio"][-1]["array"] output = audio_classifier(audio, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"]) self.assertEqual( nested_simplify(output), [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}], ) @unittest.skip("No models are available in TF") def test_small_model_tf(self): pass @slow @require_torch def test_large_model_pt(self): audio_classifier = pipeline( task="zero-shot-audio-classification", model="laion/clap-htsat-unfused", ) # This is an audio of a dog dataset = load_dataset("ashraq/esc50") audio = dataset["train"]["audio"][-1]["array"] output = audio_classifier(audio, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"]) self.assertEqual( nested_simplify(output), [ {"score": 0.999, "label": "Sound of a dog"}, {"score": 0.001, "label": "Sound of vaccum cleaner"}, ], ) output = audio_classifier([audio] * 5, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"]) self.assertEqual( nested_simplify(output), [ [ {"score": 0.999, "label": "Sound of a dog"}, {"score": 0.001, "label": "Sound of vaccum cleaner"}, ], ] * 5, ) output = audio_classifier( [audio] * 5, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"], batch_size=5 ) self.assertEqual( nested_simplify(output), [ [ {"score": 0.999, "label": "Sound of a dog"}, {"score": 0.001, "label": "Sound of vaccum cleaner"}, ], ] * 5, ) @unittest.skip("No models are available in TF") def test_large_model_tf(self): pass
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_visual_question_answering.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_accelerator, require_vision, slow, torch_device, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_torch @require_vision class VisualQuestionAnsweringPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def get_test_pipeline(self, model, tokenizer, processor): vqa_pipeline = pipeline("visual-question-answering", model="hf-internal-testing/tiny-vilt-random-vqa") examples = [ { "image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "question": "How many cats are there?", }, { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "question": "How many cats are there?", }, ] return vqa_pipeline, examples def run_pipeline_test(self, vqa_pipeline, examples): outputs = vqa_pipeline(examples, top_k=1) self.assertEqual( outputs, [ [{"score": ANY(float), "answer": ANY(str)}], [{"score": ANY(float), "answer": ANY(str)}], ], ) @require_torch def test_small_model_pt(self): vqa_pipeline = pipeline("visual-question-answering", model="hf-internal-testing/tiny-vilt-random-vqa") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" question = "How many cats are there?" outputs = vqa_pipeline(image=image, question="How many cats are there?", top_k=2) self.assertEqual( outputs, [{"score": ANY(float), "answer": ANY(str)}, {"score": ANY(float), "answer": ANY(str)}] ) outputs = vqa_pipeline({"image": image, "question": question}, top_k=2) self.assertEqual( outputs, [{"score": ANY(float), "answer": ANY(str)}, {"score": ANY(float), "answer": ANY(str)}] ) @require_torch @require_torch_accelerator def test_small_model_pt_blip2(self): vqa_pipeline = pipeline( "visual-question-answering", model="hf-internal-testing/tiny-random-Blip2ForConditionalGeneration" ) image = "./tests/fixtures/tests_samples/COCO/000000039769.png" question = "How many cats are there?" outputs = vqa_pipeline(image=image, question=question) self.assertEqual(outputs, [{"answer": ANY(str)}]) outputs = vqa_pipeline({"image": image, "question": question}) self.assertEqual(outputs, [{"answer": ANY(str)}]) outputs = vqa_pipeline([{"image": image, "question": question}, {"image": image, "question": question}]) self.assertEqual(outputs, [[{"answer": ANY(str)}]] * 2) vqa_pipeline = pipeline( "visual-question-answering", model="hf-internal-testing/tiny-random-Blip2ForConditionalGeneration", model_kwargs={"torch_dtype": torch.float16}, device=torch_device, ) self.assertEqual(vqa_pipeline.model.device, torch.device("{}:0".format(torch_device))) self.assertEqual(vqa_pipeline.model.language_model.dtype, torch.float16) self.assertEqual(vqa_pipeline.model.vision_model.dtype, torch.float16) outputs = vqa_pipeline(image=image, question=question) self.assertEqual(outputs, [{"answer": ANY(str)}]) @slow @require_torch def test_large_model_pt(self): vqa_pipeline = pipeline("visual-question-answering", model="dandelin/vilt-b32-finetuned-vqa") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" question = "How many cats are there?" outputs = vqa_pipeline(image=image, question=question, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] ) outputs = vqa_pipeline({"image": image, "question": question}, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] ) outputs = vqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(outputs, decimals=4), [[{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2, ) @slow @require_torch @require_torch_accelerator def test_large_model_pt_blip2(self): vqa_pipeline = pipeline( "visual-question-answering", model="Salesforce/blip2-opt-2.7b", model_kwargs={"torch_dtype": torch.float16}, device=torch_device, ) self.assertEqual(vqa_pipeline.model.device, torch.device("{}:0".format(torch_device))) self.assertEqual(vqa_pipeline.model.language_model.dtype, torch.float16) image = "./tests/fixtures/tests_samples/COCO/000000039769.png" question = "Question: how many cats are there? Answer:" outputs = vqa_pipeline(image=image, question=question) self.assertEqual(outputs, [{"answer": "two"}]) outputs = vqa_pipeline({"image": image, "question": question}) self.assertEqual(outputs, [{"answer": "two"}]) outputs = vqa_pipeline([{"image": image, "question": question}, {"image": image, "question": question}]) self.assertEqual(outputs, [[{"answer": "two"}]] * 2) @require_tf @unittest.skip("Visual question answering not implemented in TF") def test_small_model_tf(self): pass
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_zero_shot_object_detection.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_vision @require_torch class ZeroShotObjectDetectionPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def get_test_pipeline(self, model, tokenizer, processor): object_detector = pipeline( "zero-shot-object-detection", model="hf-internal-testing/tiny-random-owlvit-object-detection" ) examples = [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ] return object_detector, examples def run_pipeline_test(self, object_detector, examples): outputs = object_detector(examples[0], threshold=0.0) n = len(outputs) self.assertGreater(n, 0) self.assertEqual( outputs, [ { "score": ANY(float), "label": ANY(str), "box": {"xmin": ANY(int), "ymin": ANY(int), "xmax": ANY(int), "ymax": ANY(int)}, } for i in range(n) ], ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF") def test_small_model_tf(self): pass @require_torch def test_small_model_pt(self): object_detector = pipeline( "zero-shot-object-detection", model="hf-internal-testing/tiny-random-owlvit-object-detection" ) outputs = object_detector( "./tests/fixtures/tests_samples/COCO/000000039769.png", candidate_labels=["cat", "remote", "couch"], threshold=0.64, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, {"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}}, {"score": 0.6419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, ], ) outputs = object_detector( [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ], threshold=0.64, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, {"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}}, {"score": 0.6419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, ] ], ) @require_torch @slow def test_large_model_pt(self): object_detector = pipeline("zero-shot-object-detection") outputs = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg", candidate_labels=["cat", "remote", "couch"], ) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}}, ], ) outputs = object_detector( [ { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, ], ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}}, ], [ {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}}, ], ], ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF") def test_large_model_tf(self): pass @require_torch @slow def test_threshold(self): threshold = 0.2 object_detector = pipeline("zero-shot-object-detection") outputs = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg", candidate_labels=["cat", "remote", "couch"], threshold=threshold, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, ], ) @require_torch @slow def test_top_k(self): top_k = 2 object_detector = pipeline("zero-shot-object-detection") outputs = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg", candidate_labels=["cat", "remote", "couch"], top_k=top_k, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, ], )
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_zero_shot.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, Pipeline, ZeroShotClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. _TO_SKIP = {"LayoutLMv2Config", "LayoutLMv3Config"} @is_pipeline_test class ZeroShotClassificationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING tf_model_mapping = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: model_mapping = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: tf_model_mapping = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def get_test_pipeline(self, model, tokenizer, processor): classifier = ZeroShotClassificationPipeline( model=model, tokenizer=tokenizer, candidate_labels=["polics", "health"] ) return classifier, ["Who are you voting for in 2020?", "My stomach hurts."] def run_pipeline_test(self, classifier, _): outputs = classifier("Who are you voting for in 2020?", candidate_labels="politics") self.assertEqual(outputs, {"sequence": ANY(str), "labels": [ANY(str)], "scores": [ANY(float)]}) # No kwarg outputs = classifier("Who are you voting for in 2020?", ["politics"]) self.assertEqual(outputs, {"sequence": ANY(str), "labels": [ANY(str)], "scores": [ANY(float)]}) outputs = classifier("Who are you voting for in 2020?", candidate_labels=["politics"]) self.assertEqual(outputs, {"sequence": ANY(str), "labels": [ANY(str)], "scores": [ANY(float)]}) outputs = classifier("Who are you voting for in 2020?", candidate_labels="politics, public health") self.assertEqual( outputs, {"sequence": ANY(str), "labels": [ANY(str), ANY(str)], "scores": [ANY(float), ANY(float)]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0) outputs = classifier("Who are you voting for in 2020?", candidate_labels=["politics", "public health"]) self.assertEqual( outputs, {"sequence": ANY(str), "labels": [ANY(str), ANY(str)], "scores": [ANY(float), ANY(float)]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0) outputs = classifier( "Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="This text is about {}" ) self.assertEqual(outputs, {"sequence": ANY(str), "labels": [ANY(str)], "scores": [ANY(float)]}) # https://github.com/huggingface/transformers/issues/13846 outputs = classifier(["I am happy"], ["positive", "negative"]) self.assertEqual( outputs, [ {"sequence": ANY(str), "labels": [ANY(str), ANY(str)], "scores": [ANY(float), ANY(float)]} for i in range(1) ], ) outputs = classifier(["I am happy", "I am sad"], ["positive", "negative"]) self.assertEqual( outputs, [ {"sequence": ANY(str), "labels": [ANY(str), ANY(str)], "scores": [ANY(float), ANY(float)]} for i in range(2) ], ) with self.assertRaises(ValueError): classifier("", candidate_labels="politics") with self.assertRaises(TypeError): classifier(None, candidate_labels="politics") with self.assertRaises(ValueError): classifier("Who are you voting for in 2020?", candidate_labels="") with self.assertRaises(TypeError): classifier("Who are you voting for in 2020?", candidate_labels=None) with self.assertRaises(ValueError): classifier( "Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="Not formatting template", ) with self.assertRaises(AttributeError): classifier( "Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template=None, ) self.run_entailment_id(classifier) def run_entailment_id(self, zero_shot_classifier: Pipeline): config = zero_shot_classifier.model.config original_label2id = config.label2id original_entailment = zero_shot_classifier.entailment_id config.label2id = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2} self.assertEqual(zero_shot_classifier.entailment_id, -1) config.label2id = {"entailment": 0, "neutral": 1, "contradiction": 2} self.assertEqual(zero_shot_classifier.entailment_id, 0) config.label2id = {"ENTAIL": 0, "NON-ENTAIL": 1} self.assertEqual(zero_shot_classifier.entailment_id, 0) config.label2id = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0} self.assertEqual(zero_shot_classifier.entailment_id, 2) zero_shot_classifier.model.config.label2id = original_label2id self.assertEqual(original_entailment, zero_shot_classifier.entailment_id) @require_torch def test_truncation(self): zero_shot_classifier = pipeline( "zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", ) # There was a regression in 4.10 for this # Adding a test so we don't make the mistake again. # https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499 zero_shot_classifier( "Who are you voting for in 2020?" * 100, candidate_labels=["politics", "public health", "science"] ) @require_torch def test_small_model_pt(self): zero_shot_classifier = pipeline( "zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", ) outputs = zero_shot_classifier( "Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(outputs), { "sequence": "Who are you voting for in 2020?", "labels": ["science", "public health", "politics"], "scores": [0.333, 0.333, 0.333], }, ) @require_tf def test_small_model_tf(self): zero_shot_classifier = pipeline( "zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="tf", ) outputs = zero_shot_classifier( "Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(outputs), { "sequence": "Who are you voting for in 2020?", "labels": ["science", "public health", "politics"], "scores": [0.333, 0.333, 0.333], }, ) @slow @require_torch def test_large_model_pt(self): zero_shot_classifier = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="pt") outputs = zero_shot_classifier( "Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(outputs), { "sequence": "Who are you voting for in 2020?", "labels": ["politics", "public health", "science"], "scores": [0.976, 0.015, 0.009], }, ) outputs = zero_shot_classifier( "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks" " in an encoder-decoder configuration. The best performing models also connect the encoder and decoder" " through an attention mechanism. We propose a new simple network architecture, the Transformer, based" " solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two" " machine translation tasks show these models to be superior in quality while being more parallelizable" " and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014" " English-to-German translation task, improving over the existing best results, including ensembles by" " over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new" " single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small" " fraction of the training costs of the best models from the literature. We show that the Transformer" " generalizes well to other tasks by applying it successfully to English constituency parsing both with" " large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=True, ) self.assertEqual( nested_simplify(outputs), { "sequence": ( "The dominant sequence transduction models are based on complex recurrent or convolutional neural" " networks in an encoder-decoder configuration. The best performing models also connect the" " encoder and decoder through an attention mechanism. We propose a new simple network" " architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence" " and convolutions entirely. Experiments on two machine translation tasks show these models to be" " superior in quality while being more parallelizable and requiring significantly less time to" " train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task," " improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014" " English-to-French translation task, our model establishes a new single-model state-of-the-art" " BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training" " costs of the best models from the literature. We show that the Transformer generalizes well to" " other tasks by applying it successfully to English constituency parsing both with large and" " limited training data." ), "labels": ["translation", "machine learning", "vision", "statistics"], "scores": [0.817, 0.713, 0.018, 0.018], }, ) @slow @require_tf def test_large_model_tf(self): zero_shot_classifier = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="tf") outputs = zero_shot_classifier( "Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(outputs), { "sequence": "Who are you voting for in 2020?", "labels": ["politics", "public health", "science"], "scores": [0.976, 0.015, 0.009], }, ) outputs = zero_shot_classifier( "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks" " in an encoder-decoder configuration. The best performing models also connect the encoder and decoder" " through an attention mechanism. We propose a new simple network architecture, the Transformer, based" " solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two" " machine translation tasks show these models to be superior in quality while being more parallelizable" " and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014" " English-to-German translation task, improving over the existing best results, including ensembles by" " over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new" " single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small" " fraction of the training costs of the best models from the literature. We show that the Transformer" " generalizes well to other tasks by applying it successfully to English constituency parsing both with" " large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=True, ) self.assertEqual( nested_simplify(outputs), { "sequence": ( "The dominant sequence transduction models are based on complex recurrent or convolutional neural" " networks in an encoder-decoder configuration. The best performing models also connect the" " encoder and decoder through an attention mechanism. We propose a new simple network" " architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence" " and convolutions entirely. Experiments on two machine translation tasks show these models to be" " superior in quality while being more parallelizable and requiring significantly less time to" " train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task," " improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014" " English-to-French translation task, our model establishes a new single-model state-of-the-art" " BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training" " costs of the best models from the literature. We show that the Transformer generalizes well to" " other tasks by applying it successfully to English constituency parsing both with large and" " limited training data." ), "labels": ["translation", "machine learning", "vision", "statistics"], "scores": [0.817, 0.713, 0.018, 0.018], }, )
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_feature_extraction.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers import ( FEATURE_EXTRACTOR_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_MAPPING, TF_MODEL_MAPPING, FeatureExtractionPipeline, LxmertConfig, is_tf_available, is_torch_available, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf @is_pipeline_test class FeatureExtractionPipelineTests(unittest.TestCase): model_mapping = MODEL_MAPPING tf_model_mapping = TF_MODEL_MAPPING @require_torch def test_small_model_pt(self): feature_extractor = pipeline( task="feature-extraction", model="hf-internal-testing/tiny-random-distilbert", framework="pt" ) outputs = feature_extractor("This is a test") self.assertEqual( nested_simplify(outputs), [[[2.287, 1.234, 0.042, 1.53, 1.306, 0.879, -0.526, -1.71, -1.276, 0.756, -0.775, -1.048, -0.25, -0.595, -0.137, -0.598, 2.022, -0.812, 0.284, -0.488, -0.391, -0.403, -0.525, -0.061, -0.228, 1.086, 0.378, -0.14, 0.599, -0.087, -2.259, -0.098], [1.676, 0.232, -1.508, -0.145, 1.798, -1.388, 1.331, -0.37, -0.939, 0.043, 0.06, -0.414, -1.408, 0.24, 0.622, -0.55, -0.569, 1.873, -0.706, 1.924, -0.254, 1.927, -0.423, 0.152, -0.952, 0.509, -0.496, -0.968, 0.093, -1.049, -0.65, 0.312], [0.207, -0.775, -1.822, 0.321, -0.71, -0.201, 0.3, 1.146, -0.233, -0.753, -0.305, 1.309, -1.47, -0.21, 1.802, -1.555, -1.175, 1.323, -0.303, 0.722, -0.076, 0.103, -1.406, 1.931, 0.091, 0.237, 1.172, 1.607, 0.253, -0.9, -1.068, 0.438], [0.615, 1.077, 0.171, -0.175, 1.3, 0.901, -0.653, -0.138, 0.341, -0.654, -0.184, -0.441, -0.424, 0.356, -0.075, 0.26, -1.023, 0.814, 0.524, -0.904, -0.204, -0.623, 1.234, -1.03, 2.594, 0.56, 1.831, -0.199, -1.508, -0.492, -1.687, -2.165], [0.129, 0.008, -1.279, -0.412, -0.004, 1.663, 0.196, 0.104, 0.123, 0.119, 0.635, 1.757, 2.334, -0.799, -1.626, -1.26, 0.595, -0.316, -1.399, 0.232, 0.264, 1.386, -1.171, -0.256, -0.256, -1.944, 1.168, -0.368, -0.714, -0.51, 0.454, 1.148], [-0.32, 0.29, -1.309, -0.177, 0.453, 0.636, -0.024, 0.509, 0.931, -1.754, -1.575, 0.786, 0.046, -1.165, -1.416, 1.373, 1.293, -0.285, -1.541, -1.186, -0.106, -0.994, 2.001, 0.972, -0.02, 1.654, -0.236, 0.643, 1.02, 0.572, -0.914, -0.154], [0.7, -0.937, 0.441, 0.25, 0.78, -0.022, 0.282, -0.095, 1.558, -0.336, 1.706, 0.884, 1.28, 0.198, -0.796, 1.218, -1.769, 1.197, -0.342, -0.177, -0.645, 1.364, 0.008, -0.597, -0.484, -2.772, -0.696, -0.632, -0.34, -1.527, -0.562, 0.862], [2.504, 0.831, -1.271, -0.033, 0.298, -0.735, 1.339, 1.74, 0.233, -1.424, -0.819, -0.761, 0.291, 0.853, -0.092, -0.885, 0.164, 1.025, 0.907, 0.749, -1.515, -0.545, -1.365, 0.271, 0.034, -2.005, 0.031, 0.244, 0.621, 0.176, 0.336, -1.196], [-0.711, 0.591, -1.001, -0.946, 0.784, -1.66, 1.545, 0.799, -0.857, 1.148, 0.213, -0.285, 0.464, -0.139, 0.79, -1.663, -1.121, 0.575, -0.178, -0.508, 1.565, -0.242, -0.346, 1.024, -1.135, -0.158, -2.101, 0.275, 2.009, -0.425, 0.716, 0.981], [0.912, -1.186, -0.846, -0.421, -1.315, -0.827, 0.309, 0.533, 1.029, -2.343, 1.513, -1.238, 1.487, -0.849, 0.896, -0.927, -0.459, 0.159, 0.177, 0.873, 0.935, 1.433, -0.485, 0.737, 1.327, -0.338, 1.608, -0.47, -0.445, -1.118, -0.213, -0.446], [-0.434, -1.362, -1.098, -1.068, 1.507, 0.003, 0.413, -0.395, 0.897, -0.237, 1.405, -0.344, 1.693, 0.677, 0.097, -0.257, -0.602, 1.026, -1.229, 0.855, -0.713, 1.014, 0.443, 0.238, 0.425, -2.184, 1.933, -1.157, -1.132, -0.597, -0.785, 0.967], [0.58, -0.971, 0.789, -0.468, -0.576, 1.779, 1.747, 1.715, -1.939, 0.125, 0.656, -0.042, -1.024, -1.767, 0.107, -0.408, -0.866, -1.774, 1.248, 0.939, -0.033, 1.523, 1.168, -0.744, 0.209, -0.168, -0.316, 0.207, -0.432, 0.047, -0.646, -0.664], [-0.185, -0.613, -1.695, 1.602, -0.32, -0.277, 0.967, 0.728, -0.965, -0.234, 1.069, -0.63, -1.631, 0.711, 0.426, 1.298, -0.191, -0.467, -0.771, 0.971, -0.118, -1.577, -2.064, -0.055, -0.59, 0.642, -0.997, 1.251, 0.538, 1.367, 0.106, 1.704]]]) # fmt: skip @require_tf def test_small_model_tf(self): feature_extractor = pipeline( task="feature-extraction", model="hf-internal-testing/tiny-random-distilbert", framework="tf" ) outputs = feature_extractor("This is a test") self.assertEqual( nested_simplify(outputs), [[[2.287, 1.234, 0.042, 1.53, 1.306, 0.879, -0.526, -1.71, -1.276, 0.756, -0.775, -1.048, -0.25, -0.595, -0.137, -0.598, 2.022, -0.812, 0.284, -0.488, -0.391, -0.403, -0.525, -0.061, -0.228, 1.086, 0.378, -0.14, 0.599, -0.087, -2.259, -0.098], [1.676, 0.232, -1.508, -0.145, 1.798, -1.388, 1.331, -0.37, -0.939, 0.043, 0.06, -0.414, -1.408, 0.24, 0.622, -0.55, -0.569, 1.873, -0.706, 1.924, -0.254, 1.927, -0.423, 0.152, -0.952, 0.509, -0.496, -0.968, 0.093, -1.049, -0.65, 0.312], [0.207, -0.775, -1.822, 0.321, -0.71, -0.201, 0.3, 1.146, -0.233, -0.753, -0.305, 1.309, -1.47, -0.21, 1.802, -1.555, -1.175, 1.323, -0.303, 0.722, -0.076, 0.103, -1.406, 1.931, 0.091, 0.237, 1.172, 1.607, 0.253, -0.9, -1.068, 0.438], [0.615, 1.077, 0.171, -0.175, 1.3, 0.901, -0.653, -0.138, 0.341, -0.654, -0.184, -0.441, -0.424, 0.356, -0.075, 0.26, -1.023, 0.814, 0.524, -0.904, -0.204, -0.623, 1.234, -1.03, 2.594, 0.56, 1.831, -0.199, -1.508, -0.492, -1.687, -2.165], [0.129, 0.008, -1.279, -0.412, -0.004, 1.663, 0.196, 0.104, 0.123, 0.119, 0.635, 1.757, 2.334, -0.799, -1.626, -1.26, 0.595, -0.316, -1.399, 0.232, 0.264, 1.386, -1.171, -0.256, -0.256, -1.944, 1.168, -0.368, -0.714, -0.51, 0.454, 1.148], [-0.32, 0.29, -1.309, -0.177, 0.453, 0.636, -0.024, 0.509, 0.931, -1.754, -1.575, 0.786, 0.046, -1.165, -1.416, 1.373, 1.293, -0.285, -1.541, -1.186, -0.106, -0.994, 2.001, 0.972, -0.02, 1.654, -0.236, 0.643, 1.02, 0.572, -0.914, -0.154], [0.7, -0.937, 0.441, 0.25, 0.78, -0.022, 0.282, -0.095, 1.558, -0.336, 1.706, 0.884, 1.28, 0.198, -0.796, 1.218, -1.769, 1.197, -0.342, -0.177, -0.645, 1.364, 0.008, -0.597, -0.484, -2.772, -0.696, -0.632, -0.34, -1.527, -0.562, 0.862], [2.504, 0.831, -1.271, -0.033, 0.298, -0.735, 1.339, 1.74, 0.233, -1.424, -0.819, -0.761, 0.291, 0.853, -0.092, -0.885, 0.164, 1.025, 0.907, 0.749, -1.515, -0.545, -1.365, 0.271, 0.034, -2.005, 0.031, 0.244, 0.621, 0.176, 0.336, -1.196], [-0.711, 0.591, -1.001, -0.946, 0.784, -1.66, 1.545, 0.799, -0.857, 1.148, 0.213, -0.285, 0.464, -0.139, 0.79, -1.663, -1.121, 0.575, -0.178, -0.508, 1.565, -0.242, -0.346, 1.024, -1.135, -0.158, -2.101, 0.275, 2.009, -0.425, 0.716, 0.981], [0.912, -1.186, -0.846, -0.421, -1.315, -0.827, 0.309, 0.533, 1.029, -2.343, 1.513, -1.238, 1.487, -0.849, 0.896, -0.927, -0.459, 0.159, 0.177, 0.873, 0.935, 1.433, -0.485, 0.737, 1.327, -0.338, 1.608, -0.47, -0.445, -1.118, -0.213, -0.446], [-0.434, -1.362, -1.098, -1.068, 1.507, 0.003, 0.413, -0.395, 0.897, -0.237, 1.405, -0.344, 1.693, 0.677, 0.097, -0.257, -0.602, 1.026, -1.229, 0.855, -0.713, 1.014, 0.443, 0.238, 0.425, -2.184, 1.933, -1.157, -1.132, -0.597, -0.785, 0.967], [0.58, -0.971, 0.789, -0.468, -0.576, 1.779, 1.747, 1.715, -1.939, 0.125, 0.656, -0.042, -1.024, -1.767, 0.107, -0.408, -0.866, -1.774, 1.248, 0.939, -0.033, 1.523, 1.168, -0.744, 0.209, -0.168, -0.316, 0.207, -0.432, 0.047, -0.646, -0.664], [-0.185, -0.613, -1.695, 1.602, -0.32, -0.277, 0.967, 0.728, -0.965, -0.234, 1.069, -0.63, -1.631, 0.711, 0.426, 1.298, -0.191, -0.467, -0.771, 0.971, -0.118, -1.577, -2.064, -0.055, -0.59, 0.642, -0.997, 1.251, 0.538, 1.367, 0.106, 1.704]]]) # fmt: skip @require_torch def test_tokenization_small_model_pt(self): feature_extractor = pipeline( task="feature-extraction", model="hf-internal-testing/tiny-random-distilbert", framework="pt" ) # test with empty parameters outputs = feature_extractor("This is a test") self.assertEqual( nested_simplify(outputs), [[[2.287, 1.234, 0.042, 1.53, 1.306, 0.879, -0.526, -1.71, -1.276, 0.756, -0.775, -1.048, -0.25, -0.595, -0.137, -0.598, 2.022, -0.812, 0.284, -0.488, -0.391, -0.403, -0.525, -0.061, -0.228, 1.086, 0.378, -0.14, 0.599, -0.087, -2.259, -0.098], [1.676, 0.232, -1.508, -0.145, 1.798, -1.388, 1.331, -0.37, -0.939, 0.043, 0.06, -0.414, -1.408, 0.24, 0.622, -0.55, -0.569, 1.873, -0.706, 1.924, -0.254, 1.927, -0.423, 0.152, -0.952, 0.509, -0.496, -0.968, 0.093, -1.049, -0.65, 0.312], [0.207, -0.775, -1.822, 0.321, -0.71, -0.201, 0.3, 1.146, -0.233, -0.753, -0.305, 1.309, -1.47, -0.21, 1.802, -1.555, -1.175, 1.323, -0.303, 0.722, -0.076, 0.103, -1.406, 1.931, 0.091, 0.237, 1.172, 1.607, 0.253, -0.9, -1.068, 0.438], [0.615, 1.077, 0.171, -0.175, 1.3, 0.901, -0.653, -0.138, 0.341, -0.654, -0.184, -0.441, -0.424, 0.356, -0.075, 0.26, -1.023, 0.814, 0.524, -0.904, -0.204, -0.623, 1.234, -1.03, 2.594, 0.56, 1.831, -0.199, -1.508, -0.492, -1.687, -2.165], [0.129, 0.008, -1.279, -0.412, -0.004, 1.663, 0.196, 0.104, 0.123, 0.119, 0.635, 1.757, 2.334, -0.799, -1.626, -1.26, 0.595, -0.316, -1.399, 0.232, 0.264, 1.386, -1.171, -0.256, -0.256, -1.944, 1.168, -0.368, -0.714, -0.51, 0.454, 1.148], [-0.32, 0.29, -1.309, -0.177, 0.453, 0.636, -0.024, 0.509, 0.931, -1.754, -1.575, 0.786, 0.046, -1.165, -1.416, 1.373, 1.293, -0.285, -1.541, -1.186, -0.106, -0.994, 2.001, 0.972, -0.02, 1.654, -0.236, 0.643, 1.02, 0.572, -0.914, -0.154], [0.7, -0.937, 0.441, 0.25, 0.78, -0.022, 0.282, -0.095, 1.558, -0.336, 1.706, 0.884, 1.28, 0.198, -0.796, 1.218, -1.769, 1.197, -0.342, -0.177, -0.645, 1.364, 0.008, -0.597, -0.484, -2.772, -0.696, -0.632, -0.34, -1.527, -0.562, 0.862], [2.504, 0.831, -1.271, -0.033, 0.298, -0.735, 1.339, 1.74, 0.233, -1.424, -0.819, -0.761, 0.291, 0.853, -0.092, -0.885, 0.164, 1.025, 0.907, 0.749, -1.515, -0.545, -1.365, 0.271, 0.034, -2.005, 0.031, 0.244, 0.621, 0.176, 0.336, -1.196], [-0.711, 0.591, -1.001, -0.946, 0.784, -1.66, 1.545, 0.799, -0.857, 1.148, 0.213, -0.285, 0.464, -0.139, 0.79, -1.663, -1.121, 0.575, -0.178, -0.508, 1.565, -0.242, -0.346, 1.024, -1.135, -0.158, -2.101, 0.275, 2.009, -0.425, 0.716, 0.981], [0.912, -1.186, -0.846, -0.421, -1.315, -0.827, 0.309, 0.533, 1.029, -2.343, 1.513, -1.238, 1.487, -0.849, 0.896, -0.927, -0.459, 0.159, 0.177, 0.873, 0.935, 1.433, -0.485, 0.737, 1.327, -0.338, 1.608, -0.47, -0.445, -1.118, -0.213, -0.446], [-0.434, -1.362, -1.098, -1.068, 1.507, 0.003, 0.413, -0.395, 0.897, -0.237, 1.405, -0.344, 1.693, 0.677, 0.097, -0.257, -0.602, 1.026, -1.229, 0.855, -0.713, 1.014, 0.443, 0.238, 0.425, -2.184, 1.933, -1.157, -1.132, -0.597, -0.785, 0.967], [0.58, -0.971, 0.789, -0.468, -0.576, 1.779, 1.747, 1.715, -1.939, 0.125, 0.656, -0.042, -1.024, -1.767, 0.107, -0.408, -0.866, -1.774, 1.248, 0.939, -0.033, 1.523, 1.168, -0.744, 0.209, -0.168, -0.316, 0.207, -0.432, 0.047, -0.646, -0.664], [-0.185, -0.613, -1.695, 1.602, -0.32, -0.277, 0.967, 0.728, -0.965, -0.234, 1.069, -0.63, -1.631, 0.711, 0.426, 1.298, -0.191, -0.467, -0.771, 0.971, -0.118, -1.577, -2.064, -0.055, -0.59, 0.642, -0.997, 1.251, 0.538, 1.367, 0.106, 1.704]]]) # fmt: skip # test with various tokenizer parameters tokenize_kwargs = {"max_length": 3} outputs = feature_extractor("This is a test", tokenize_kwargs=tokenize_kwargs) self.assertEqual(np.squeeze(outputs).shape, (3, 32)) tokenize_kwargs = {"truncation": True, "padding": True, "max_length": 4} outputs = feature_extractor( ["This is a test", "This", "This is", "This is a", "This is a test test test test"], tokenize_kwargs=tokenize_kwargs, ) self.assertEqual(np.squeeze(outputs).shape, (5, 4, 32)) tokenize_kwargs = {"padding": True, "max_length": 4} outputs = feature_extractor( ["This is a test", "This", "This is", "This is a", "This is a test test test test"], truncation=True, tokenize_kwargs=tokenize_kwargs, ) self.assertEqual(np.squeeze(outputs).shape, (5, 4, 32)) # raise value error if truncation parameter given for two places tokenize_kwargs = {"truncation": True} with self.assertRaises(ValueError): _ = feature_extractor( ["This is a test", "This", "This is", "This is a", "This is a test test test test"], truncation=True, tokenize_kwargs=tokenize_kwargs, ) @require_tf def test_tokenization_small_model_tf(self): feature_extractor = pipeline( task="feature-extraction", model="hf-internal-testing/tiny-random-distilbert", framework="tf" ) # test with empty parameters outputs = feature_extractor("This is a test") self.assertEqual( nested_simplify(outputs), [[[2.287, 1.234, 0.042, 1.53, 1.306, 0.879, -0.526, -1.71, -1.276, 0.756, -0.775, -1.048, -0.25, -0.595, -0.137, -0.598, 2.022, -0.812, 0.284, -0.488, -0.391, -0.403, -0.525, -0.061, -0.228, 1.086, 0.378, -0.14, 0.599, -0.087, -2.259, -0.098], [1.676, 0.232, -1.508, -0.145, 1.798, -1.388, 1.331, -0.37, -0.939, 0.043, 0.06, -0.414, -1.408, 0.24, 0.622, -0.55, -0.569, 1.873, -0.706, 1.924, -0.254, 1.927, -0.423, 0.152, -0.952, 0.509, -0.496, -0.968, 0.093, -1.049, -0.65, 0.312], [0.207, -0.775, -1.822, 0.321, -0.71, -0.201, 0.3, 1.146, -0.233, -0.753, -0.305, 1.309, -1.47, -0.21, 1.802, -1.555, -1.175, 1.323, -0.303, 0.722, -0.076, 0.103, -1.406, 1.931, 0.091, 0.237, 1.172, 1.607, 0.253, -0.9, -1.068, 0.438], [0.615, 1.077, 0.171, -0.175, 1.3, 0.901, -0.653, -0.138, 0.341, -0.654, -0.184, -0.441, -0.424, 0.356, -0.075, 0.26, -1.023, 0.814, 0.524, -0.904, -0.204, -0.623, 1.234, -1.03, 2.594, 0.56, 1.831, -0.199, -1.508, -0.492, -1.687, -2.165], [0.129, 0.008, -1.279, -0.412, -0.004, 1.663, 0.196, 0.104, 0.123, 0.119, 0.635, 1.757, 2.334, -0.799, -1.626, -1.26, 0.595, -0.316, -1.399, 0.232, 0.264, 1.386, -1.171, -0.256, -0.256, -1.944, 1.168, -0.368, -0.714, -0.51, 0.454, 1.148], [-0.32, 0.29, -1.309, -0.177, 0.453, 0.636, -0.024, 0.509, 0.931, -1.754, -1.575, 0.786, 0.046, -1.165, -1.416, 1.373, 1.293, -0.285, -1.541, -1.186, -0.106, -0.994, 2.001, 0.972, -0.02, 1.654, -0.236, 0.643, 1.02, 0.572, -0.914, -0.154], [0.7, -0.937, 0.441, 0.25, 0.78, -0.022, 0.282, -0.095, 1.558, -0.336, 1.706, 0.884, 1.28, 0.198, -0.796, 1.218, -1.769, 1.197, -0.342, -0.177, -0.645, 1.364, 0.008, -0.597, -0.484, -2.772, -0.696, -0.632, -0.34, -1.527, -0.562, 0.862], [2.504, 0.831, -1.271, -0.033, 0.298, -0.735, 1.339, 1.74, 0.233, -1.424, -0.819, -0.761, 0.291, 0.853, -0.092, -0.885, 0.164, 1.025, 0.907, 0.749, -1.515, -0.545, -1.365, 0.271, 0.034, -2.005, 0.031, 0.244, 0.621, 0.176, 0.336, -1.196], [-0.711, 0.591, -1.001, -0.946, 0.784, -1.66, 1.545, 0.799, -0.857, 1.148, 0.213, -0.285, 0.464, -0.139, 0.79, -1.663, -1.121, 0.575, -0.178, -0.508, 1.565, -0.242, -0.346, 1.024, -1.135, -0.158, -2.101, 0.275, 2.009, -0.425, 0.716, 0.981], [0.912, -1.186, -0.846, -0.421, -1.315, -0.827, 0.309, 0.533, 1.029, -2.343, 1.513, -1.238, 1.487, -0.849, 0.896, -0.927, -0.459, 0.159, 0.177, 0.873, 0.935, 1.433, -0.485, 0.737, 1.327, -0.338, 1.608, -0.47, -0.445, -1.118, -0.213, -0.446], [-0.434, -1.362, -1.098, -1.068, 1.507, 0.003, 0.413, -0.395, 0.897, -0.237, 1.405, -0.344, 1.693, 0.677, 0.097, -0.257, -0.602, 1.026, -1.229, 0.855, -0.713, 1.014, 0.443, 0.238, 0.425, -2.184, 1.933, -1.157, -1.132, -0.597, -0.785, 0.967], [0.58, -0.971, 0.789, -0.468, -0.576, 1.779, 1.747, 1.715, -1.939, 0.125, 0.656, -0.042, -1.024, -1.767, 0.107, -0.408, -0.866, -1.774, 1.248, 0.939, -0.033, 1.523, 1.168, -0.744, 0.209, -0.168, -0.316, 0.207, -0.432, 0.047, -0.646, -0.664], [-0.185, -0.613, -1.695, 1.602, -0.32, -0.277, 0.967, 0.728, -0.965, -0.234, 1.069, -0.63, -1.631, 0.711, 0.426, 1.298, -0.191, -0.467, -0.771, 0.971, -0.118, -1.577, -2.064, -0.055, -0.59, 0.642, -0.997, 1.251, 0.538, 1.367, 0.106, 1.704]]]) # fmt: skip # test with various tokenizer parameters tokenize_kwargs = {"max_length": 3} outputs = feature_extractor("This is a test", tokenize_kwargs=tokenize_kwargs) self.assertEqual(np.squeeze(outputs).shape, (3, 32)) tokenize_kwargs = {"truncation": True, "padding": True, "max_length": 4} outputs = feature_extractor( ["This is a test", "This", "This is", "This is a", "This is a test test test test"], tokenize_kwargs=tokenize_kwargs, ) self.assertEqual(np.squeeze(outputs).shape, (5, 4, 32)) tokenize_kwargs = {"padding": True, "max_length": 4} outputs = feature_extractor( ["This is a test", "This", "This is", "This is a", "This is a test test test test"], truncation=True, tokenize_kwargs=tokenize_kwargs, ) self.assertEqual(np.squeeze(outputs).shape, (5, 4, 32)) # raise value error if truncation parameter given for two places tokenize_kwargs = {"truncation": True} with self.assertRaises(ValueError): _ = feature_extractor( ["This is a test", "This", "This is", "This is a", "This is a test test test test"], truncation=True, tokenize_kwargs=tokenize_kwargs, ) @require_torch def test_return_tensors_pt(self): feature_extractor = pipeline( task="feature-extraction", model="hf-internal-testing/tiny-random-distilbert", framework="pt" ) outputs = feature_extractor("This is a test", return_tensors=True) self.assertTrue(torch.is_tensor(outputs)) @require_tf def test_return_tensors_tf(self): feature_extractor = pipeline( task="feature-extraction", model="hf-internal-testing/tiny-random-distilbert", framework="tf" ) outputs = feature_extractor("This is a test", return_tensors=True) self.assertTrue(tf.is_tensor(outputs)) def get_shape(self, input_, shape=None): if shape is None: shape = [] if isinstance(input_, list): subshapes = [self.get_shape(in_, shape) for in_ in input_] if all(s == 0 for s in subshapes): shape.append(len(input_)) else: subshape = subshapes[0] shape = [len(input_), *subshape] elif isinstance(input_, float): return 0 else: raise ValueError("We expect lists of floats, nothing else") return shape def get_test_pipeline(self, model, tokenizer, processor): if tokenizer is None: self.skipTest("No tokenizer") return elif ( type(model.config) in FEATURE_EXTRACTOR_MAPPING or isinstance(model.config, LxmertConfig) or type(model.config) in IMAGE_PROCESSOR_MAPPING ): self.skipTest("This is a bimodal model, we need to find a more consistent way to switch on those models.") return elif model.config.is_encoder_decoder: self.skipTest( """encoder_decoder models are trickier for this pipeline. Do we want encoder + decoder inputs to get some featues? Do we want encoder only features ? For now ignore those. """ ) return feature_extractor = FeatureExtractionPipeline(model=model, tokenizer=tokenizer, feature_extractor=processor) return feature_extractor, ["This is a test", "This is another test"] def run_pipeline_test(self, feature_extractor, examples): outputs = feature_extractor("This is a test") shape = self.get_shape(outputs) self.assertEqual(shape[0], 1) # If we send too small input # there's a bug within FunnelModel (output with shape [1, 4, 2, 1] doesn't match the broadcast shape [1, 4, 2, 2]) outputs = feature_extractor(["This is a test", "Another longer test"]) shape = self.get_shape(outputs) self.assertEqual(shape[0], 2) outputs = feature_extractor("This is a test" * 100, truncation=True) shape = self.get_shape(outputs) self.assertEqual(shape[0], 1)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_text_generation.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_accelerator, require_torch_gpu, require_torch_or_tf, torch_device, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class TextGenerationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_CAUSAL_LM_MAPPING tf_model_mapping = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def test_small_model_pt(self): text_generator = pipeline(task="text-generation", model="sshleifer/tiny-ctrl", framework="pt") # Using `do_sample=False` to force deterministic output outputs = text_generator("This is a test", do_sample=False) self.assertEqual( outputs, [ { "generated_text": ( "This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope." " oscope. FiliFili@@" ) } ], ) outputs = text_generator(["This is a test", "This is a second test"]) self.assertEqual( outputs, [ [ { "generated_text": ( "This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope." " oscope. FiliFili@@" ) } ], [ { "generated_text": ( "This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy" " oscope. oscope. FiliFili@@" ) } ], ], ) outputs = text_generator("This is a test", do_sample=True, num_return_sequences=2, return_tensors=True) self.assertEqual( outputs, [ {"generated_token_ids": ANY(list)}, {"generated_token_ids": ANY(list)}, ], ) text_generator.tokenizer.pad_token_id = text_generator.model.config.eos_token_id text_generator.tokenizer.pad_token = "<pad>" outputs = text_generator( ["This is a test", "This is a second test"], do_sample=True, num_return_sequences=2, batch_size=2, return_tensors=True, ) self.assertEqual( outputs, [ [ {"generated_token_ids": ANY(list)}, {"generated_token_ids": ANY(list)}, ], [ {"generated_token_ids": ANY(list)}, {"generated_token_ids": ANY(list)}, ], ], ) @require_tf def test_small_model_tf(self): text_generator = pipeline(task="text-generation", model="sshleifer/tiny-ctrl", framework="tf") # Using `do_sample=False` to force deterministic output outputs = text_generator("This is a test", do_sample=False) self.assertEqual( outputs, [ { "generated_text": ( "This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵" " please," ) } ], ) outputs = text_generator(["This is a test", "This is a second test"], do_sample=False) self.assertEqual( outputs, [ [ { "generated_text": ( "This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵" " please," ) } ], [ { "generated_text": ( "This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes" " Cannes 閲閲Cannes Cannes Cannes 攵 please," ) } ], ], ) def get_test_pipeline(self, model, tokenizer, processor): text_generator = TextGenerationPipeline(model=model, tokenizer=tokenizer) return text_generator, ["This is a test", "Another test"] def test_stop_sequence_stopping_criteria(self): prompt = """Hello I believe in""" text_generator = pipeline("text-generation", model="hf-internal-testing/tiny-random-gpt2") output = text_generator(prompt) self.assertEqual( output, [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}], ) output = text_generator(prompt, stop_sequence=" fe") self.assertEqual(output, [{"generated_text": "Hello I believe in fe"}]) def run_pipeline_test(self, text_generator, _): model = text_generator.model tokenizer = text_generator.tokenizer outputs = text_generator("This is a test") self.assertEqual(outputs, [{"generated_text": ANY(str)}]) self.assertTrue(outputs[0]["generated_text"].startswith("This is a test")) outputs = text_generator("This is a test", return_full_text=False) self.assertEqual(outputs, [{"generated_text": ANY(str)}]) self.assertNotIn("This is a test", outputs[0]["generated_text"]) text_generator = pipeline(task="text-generation", model=model, tokenizer=tokenizer, return_full_text=False) outputs = text_generator("This is a test") self.assertEqual(outputs, [{"generated_text": ANY(str)}]) self.assertNotIn("This is a test", outputs[0]["generated_text"]) outputs = text_generator("This is a test", return_full_text=True) self.assertEqual(outputs, [{"generated_text": ANY(str)}]) self.assertTrue(outputs[0]["generated_text"].startswith("This is a test")) outputs = text_generator(["This is great !", "Something else"], num_return_sequences=2, do_sample=True) self.assertEqual( outputs, [ [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], ], ) if text_generator.tokenizer.pad_token is not None: outputs = text_generator( ["This is great !", "Something else"], num_return_sequences=2, batch_size=2, do_sample=True ) self.assertEqual( outputs, [ [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], ], ) with self.assertRaises(ValueError): outputs = text_generator("test", return_full_text=True, return_text=True) with self.assertRaises(ValueError): outputs = text_generator("test", return_full_text=True, return_tensors=True) with self.assertRaises(ValueError): outputs = text_generator("test", return_text=True, return_tensors=True) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): outputs = text_generator("") self.assertEqual(outputs, [{"generated_text": ANY(str)}]) else: with self.assertRaises((ValueError, AssertionError)): outputs = text_generator("") if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS = [ "RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM", "FuyuForCausalLM", ] if ( tokenizer.model_max_length < 10000 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError)): text_generator("This is a test" * 500, max_new_tokens=20) outputs = text_generator("This is a test" * 500, handle_long_generation="hole", max_new_tokens=20) # Hole strategy cannot work with self.assertRaises(ValueError): text_generator( "This is a test" * 500, handle_long_generation="hole", max_new_tokens=tokenizer.model_max_length + 10, ) @require_torch @require_accelerate @require_torch_gpu def test_small_model_pt_bloom_accelerate(self): import torch # Classic `model_kwargs` pipe = pipeline( model="hf-internal-testing/tiny-random-bloom", model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloat16}, ) self.assertEqual(pipe.model.device, torch.device(0)) self.assertEqual(pipe.model.lm_head.weight.dtype, torch.bfloat16) out = pipe("This is a test") self.assertEqual( out, [ { "generated_text": ( "This is a test test test test test test test test test test test test test test test test" " test" ) } ], ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) pipe = pipeline(model="hf-internal-testing/tiny-random-bloom", device_map="auto", torch_dtype=torch.bfloat16) self.assertEqual(pipe.model.device, torch.device(0)) self.assertEqual(pipe.model.lm_head.weight.dtype, torch.bfloat16) out = pipe("This is a test") self.assertEqual( out, [ { "generated_text": ( "This is a test test test test test test test test test test test test test test test test" " test" ) } ], ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 pipe = pipeline(model="hf-internal-testing/tiny-random-bloom", device_map="auto") self.assertEqual(pipe.model.device, torch.device(0)) self.assertEqual(pipe.model.lm_head.weight.dtype, torch.float32) out = pipe("This is a test") self.assertEqual( out, [ { "generated_text": ( "This is a test test test test test test test test test test test test test test test test" " test" ) } ], ) @require_torch @require_torch_accelerator def test_small_model_fp16(self): import torch pipe = pipeline( model="hf-internal-testing/tiny-random-bloom", device=torch_device, torch_dtype=torch.float16, ) pipe("This is a test") @require_torch @require_accelerate @require_torch_accelerator def test_pipeline_accelerate_top_p(self): import torch pipe = pipeline(model="hf-internal-testing/tiny-random-bloom", device_map="auto", torch_dtype=torch.float16) pipe("This is a test", do_sample=True, top_p=0.5) def test_pipeline_length_setting_warning(self): prompt = """Hello world""" text_generator = pipeline("text-generation", model="hf-internal-testing/tiny-random-gpt2") if text_generator.model.framework == "tf": logger = logging.get_logger("transformers.generation.tf_utils") else: logger = logging.get_logger("transformers.generation.utils") logger_msg = "Both `max_new_tokens`" # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(logger) as cl: _ = text_generator(prompt, max_length=10, max_new_tokens=1) self.assertIn(logger_msg, cl.out) # The user only sets one -> no warning with CaptureLogger(logger) as cl: _ = text_generator(prompt, max_new_tokens=1) self.assertNotIn(logger_msg, cl.out) with CaptureLogger(logger) as cl: _ = text_generator(prompt, max_length=10) self.assertNotIn(logger_msg, cl.out)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_object_detection.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_vision @require_timm @require_torch class ObjectDetectionPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_OBJECT_DETECTION_MAPPING def get_test_pipeline(self, model, tokenizer, processor): object_detector = ObjectDetectionPipeline(model=model, image_processor=processor) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def run_pipeline_test(self, object_detector, examples): outputs = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png", threshold=0.0) self.assertGreater(len(outputs), 0) for detected_object in outputs: self.assertEqual( detected_object, { "score": ANY(float), "label": ANY(str), "box": {"xmin": ANY(int), "ymin": ANY(int), "xmax": ANY(int), "ymax": ANY(int)}, }, ) import datasets dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test") batch = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["file"], # LA dataset[1]["file"], # L dataset[2]["file"], ] batch_outputs = object_detector(batch, threshold=0.0) self.assertEqual(len(batch), len(batch_outputs)) for outputs in batch_outputs: self.assertGreater(len(outputs), 0) for detected_object in outputs: self.assertEqual( detected_object, { "score": ANY(float), "label": ANY(str), "box": {"xmin": ANY(int), "ymin": ANY(int), "xmax": ANY(int), "ymax": ANY(int)}, }, ) @require_tf @unittest.skip("Object detection not implemented in TF") def test_small_model_tf(self): pass @require_torch def test_small_model_pt(self): model_id = "hf-internal-testing/tiny-detr-mobilenetsv3" model = AutoModelForObjectDetection.from_pretrained(model_id) feature_extractor = AutoFeatureExtractor.from_pretrained(model_id) object_detector = ObjectDetectionPipeline(model=model, feature_extractor=feature_extractor) outputs = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg", threshold=0.0) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], ) outputs = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ], threshold=0.0, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], [ {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], ], ) @require_torch @slow def test_large_model_pt(self): model_id = "facebook/detr-resnet-50" model = AutoModelForObjectDetection.from_pretrained(model_id) feature_extractor = AutoFeatureExtractor.from_pretrained(model_id) object_detector = ObjectDetectionPipeline(model=model, feature_extractor=feature_extractor) outputs = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ) outputs = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ], ) @require_torch @slow def test_integration_torch_object_detection(self): model_id = "facebook/detr-resnet-50" object_detector = pipeline("object-detection", model=model_id) outputs = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ) outputs = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ], ) @require_torch @slow def test_threshold(self): threshold = 0.9985 model_id = "facebook/detr-resnet-50" object_detector = pipeline("object-detection", model=model_id) outputs = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg", threshold=threshold) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ) @require_torch @require_pytesseract @slow def test_layoutlm(self): model_id = "Narsil/layoutlmv3-finetuned-funsd" threshold = 0.9993 object_detector = pipeline("object-detection", model=model_id, threshold=threshold) outputs = object_detector( "https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" ) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}}, {"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}}, ], )
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_image_to_image.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_IMAGE_TO_IMAGE_MAPPING, AutoImageProcessor, AutoModelForImageToImage, ImageToImagePipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_torch @require_vision class ImageToImagePipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_IMAGE_TO_IMAGE_MAPPING examples = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "http://images.cocodataset.org/val2017/000000039769.jpg", ] @require_torch @require_vision @slow def test_pipeline(self): model_id = "caidas/swin2SR-classical-sr-x2-64" upscaler = pipeline("image-to-image", model=model_id) upscaled_list = upscaler(self.examples) self.assertEqual(len(upscaled_list), len(self.examples)) for output in upscaled_list: self.assertIsInstance(output, Image.Image) self.assertEqual(upscaled_list[0].size, (1296, 976)) self.assertEqual(upscaled_list[1].size, (1296, 976)) @require_torch @require_vision @slow def test_pipeline_model_processor(self): model_id = "caidas/swin2SR-classical-sr-x2-64" model = AutoModelForImageToImage.from_pretrained(model_id) image_processor = AutoImageProcessor.from_pretrained(model_id) upscaler = ImageToImagePipeline(model=model, image_processor=image_processor) upscaled_list = upscaler(self.examples) self.assertEqual(len(upscaled_list), len(self.examples)) for output in upscaled_list: self.assertIsInstance(output, Image.Image) self.assertEqual(upscaled_list[0].size, (1296, 976)) self.assertEqual(upscaled_list[1].size, (1296, 976))
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_text_classification.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow, torch_device from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. _TO_SKIP = {"LayoutLMv2Config", "LayoutLMv3Config"} @is_pipeline_test class TextClassificationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING tf_model_mapping = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: model_mapping = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: tf_model_mapping = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def test_small_model_pt(self): text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt" ) outputs = text_classifier("This is great !") self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}]) outputs = text_classifier("This is great !", top_k=2) self.assertEqual( nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}] ) outputs = text_classifier(["This is great !", "This is bad"], top_k=2) self.assertEqual( nested_simplify(outputs), [ [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], ], ) outputs = text_classifier("This is great !", top_k=1) self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}]) # Legacy behavior outputs = text_classifier("This is great !", return_all_scores=False) self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}]) outputs = text_classifier("This is great !", return_all_scores=True) self.assertEqual( nested_simplify(outputs), [[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}]] ) outputs = text_classifier(["This is great !", "Something else"], return_all_scores=True) self.assertEqual( nested_simplify(outputs), [ [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], ], ) outputs = text_classifier(["This is great !", "Something else"], return_all_scores=False) self.assertEqual( nested_simplify(outputs), [ {"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_0", "score": 0.504}, ], ) @require_torch def test_accepts_torch_device(self): text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt", device=torch_device, ) outputs = text_classifier("This is great !") self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}]) @require_tf def test_small_model_tf(self): text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="tf" ) outputs = text_classifier("This is great !") self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}]) @slow @require_torch def test_pt_bert(self): text_classifier = pipeline("text-classification") outputs = text_classifier("This is great !") self.assertEqual(nested_simplify(outputs), [{"label": "POSITIVE", "score": 1.0}]) outputs = text_classifier("This is bad !") self.assertEqual(nested_simplify(outputs), [{"label": "NEGATIVE", "score": 1.0}]) outputs = text_classifier("Birds are a type of animal") self.assertEqual(nested_simplify(outputs), [{"label": "POSITIVE", "score": 0.988}]) @slow @require_tf def test_tf_bert(self): text_classifier = pipeline("text-classification", framework="tf") outputs = text_classifier("This is great !") self.assertEqual(nested_simplify(outputs), [{"label": "POSITIVE", "score": 1.0}]) outputs = text_classifier("This is bad !") self.assertEqual(nested_simplify(outputs), [{"label": "NEGATIVE", "score": 1.0}]) outputs = text_classifier("Birds are a type of animal") self.assertEqual(nested_simplify(outputs), [{"label": "POSITIVE", "score": 0.988}]) def get_test_pipeline(self, model, tokenizer, processor): text_classifier = TextClassificationPipeline(model=model, tokenizer=tokenizer) return text_classifier, ["HuggingFace is in", "This is another test"] def run_pipeline_test(self, text_classifier, _): model = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 valid_inputs = "HuggingFace is in" outputs = text_classifier(valid_inputs) self.assertEqual(nested_simplify(outputs), [{"label": ANY(str), "score": ANY(float)}]) self.assertTrue(outputs[0]["label"] in model.config.id2label.values()) valid_inputs = ["HuggingFace is in ", "Paris is in France"] outputs = text_classifier(valid_inputs) self.assertEqual( nested_simplify(outputs), [{"label": ANY(str), "score": ANY(float)}, {"label": ANY(str), "score": ANY(float)}], ) self.assertTrue(outputs[0]["label"] in model.config.id2label.values()) self.assertTrue(outputs[1]["label"] in model.config.id2label.values()) # Forcing to get all results with `top_k=None` # This is NOT the legacy format outputs = text_classifier(valid_inputs, top_k=None) N = len(model.config.id2label.values()) self.assertEqual( nested_simplify(outputs), [[{"label": ANY(str), "score": ANY(float)}] * N, [{"label": ANY(str), "score": ANY(float)}] * N], ) valid_inputs = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"} outputs = text_classifier(valid_inputs) self.assertEqual( nested_simplify(outputs), {"label": ANY(str), "score": ANY(float)}, ) self.assertTrue(outputs["label"] in model.config.id2label.values()) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. invalid_input = [["HuggingFace is in ", "Paris is in France"]] with self.assertRaises(ValueError): text_classifier(invalid_input) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility outputs = text_classifier([[["HuggingFace is in ", "Paris is in France"]]]) self.assertEqual( nested_simplify(outputs), [{"label": ANY(str), "score": ANY(float)}], ) self.assertTrue(outputs[0]["label"] in model.config.id2label.values())
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_conversational.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer, BlenderbotSmallForConditionalGeneration, BlenderbotSmallTokenizer, Conversation, ConversationalPipeline, TFAutoModelForCausalLM, pipeline, ) from transformers.testing_utils import ( backend_empty_cache, is_pipeline_test, is_torch_available, require_tf, require_torch, slow, torch_device, ) from .test_pipelines_common import ANY @is_pipeline_test class ConversationalPipelineTests(unittest.TestCase): def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): backend_empty_cache(torch_device) model_mapping = dict( list(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.items()) if MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING else [] + list(MODEL_FOR_CAUSAL_LM_MAPPING.items()) if MODEL_FOR_CAUSAL_LM_MAPPING else [] ) tf_model_mapping = dict( list(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.items()) if TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING else [] + list(TF_MODEL_FOR_CAUSAL_LM_MAPPING.items()) if TF_MODEL_FOR_CAUSAL_LM_MAPPING else [] ) def get_test_pipeline(self, model, tokenizer, processor): conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer) return conversation_agent, [Conversation("Hi there!")] def run_pipeline_test(self, conversation_agent, _): # Simple outputs = conversation_agent(Conversation("Hi there!"), max_new_tokens=5) self.assertEqual( outputs, Conversation([{"role": "user", "content": "Hi there!"}, {"role": "assistant", "content": ANY(str)}]), ) # Single list outputs = conversation_agent([Conversation("Hi there!")], max_new_tokens=5) self.assertEqual( outputs, Conversation([{"role": "user", "content": "Hi there!"}, {"role": "assistant", "content": ANY(str)}]), ) # Batch conversation_1 = Conversation("Going to the movies tonight - any suggestions?") conversation_2 = Conversation("What's the last book you have read?") self.assertEqual(len(conversation_1), 1) self.assertEqual(len(conversation_2), 1) outputs = conversation_agent([conversation_1, conversation_2], max_new_tokens=5) self.assertEqual(outputs, [conversation_1, conversation_2]) self.assertEqual( outputs, [ Conversation( [ {"role": "user", "content": "Going to the movies tonight - any suggestions?"}, {"role": "assistant", "content": ANY(str)}, ], ), Conversation( [ {"role": "user", "content": "What's the last book you have read?"}, {"role": "assistant", "content": ANY(str)}, ] ), ], ) # One conversation with history conversation_2.add_message({"role": "user", "content": "Why do you recommend it?"}) outputs = conversation_agent(conversation_2, max_new_tokens=5) self.assertEqual(outputs, conversation_2) self.assertEqual( outputs, Conversation( [ {"role": "user", "content": "What's the last book you have read?"}, {"role": "assistant", "content": ANY(str)}, {"role": "user", "content": "Why do you recommend it?"}, {"role": "assistant", "content": ANY(str)}, ] ), ) @require_torch @slow def test_integration_torch_conversation(self): # When conversation_agent = pipeline(task="conversational", device=torch_device) conversation_1 = Conversation("Going to the movies tonight - any suggestions?") conversation_2 = Conversation("What's the last book you have read?") # Then self.assertEqual(len(conversation_1.past_user_inputs), 0) self.assertEqual(len(conversation_2.past_user_inputs), 0) # When result = conversation_agent([conversation_1, conversation_2], do_sample=False, max_length=1000) # Then self.assertEqual(result, [conversation_1, conversation_2]) self.assertEqual(len(result[0].past_user_inputs), 1) self.assertEqual(len(result[1].past_user_inputs), 1) self.assertEqual(len(result[0].generated_responses), 1) self.assertEqual(len(result[1].generated_responses), 1) self.assertEqual(result[0].past_user_inputs[0], "Going to the movies tonight - any suggestions?") self.assertEqual(result[0].generated_responses[0], "The Big Lebowski") self.assertEqual(result[1].past_user_inputs[0], "What's the last book you have read?") self.assertEqual(result[1].generated_responses[0], "The Last Question") # When conversation_2.add_user_input("Why do you recommend it?") result = conversation_agent(conversation_2, do_sample=False, max_length=1000) # Then self.assertEqual(result, conversation_2) self.assertEqual(len(result.past_user_inputs), 2) self.assertEqual(len(result.generated_responses), 2) self.assertEqual(result.past_user_inputs[1], "Why do you recommend it?") self.assertEqual(result.generated_responses[1], "It's a good book.") @require_torch @slow def test_integration_torch_conversation_truncated_history(self): # When conversation_agent = pipeline(task="conversational", min_length_for_response=24, device=torch_device) conversation_1 = Conversation("Going to the movies tonight - any suggestions?") # Then self.assertEqual(len(conversation_1.past_user_inputs), 0) # When result = conversation_agent(conversation_1, do_sample=False, max_length=36) # Then self.assertEqual(result, conversation_1) self.assertEqual(len(result.past_user_inputs), 1) self.assertEqual(len(result.generated_responses), 1) self.assertEqual(result.past_user_inputs[0], "Going to the movies tonight - any suggestions?") self.assertEqual(result.generated_responses[0], "The Big Lebowski") # When conversation_1.add_user_input("Is it an action movie?") result = conversation_agent(conversation_1, do_sample=False, max_length=36) # Then self.assertEqual(result, conversation_1) self.assertEqual(len(result.past_user_inputs), 2) self.assertEqual(len(result.generated_responses), 2) self.assertEqual(result.past_user_inputs[1], "Is it an action movie?") self.assertEqual(result.generated_responses[1], "It's a comedy.") @require_torch def test_small_model_pt(self): tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-small") model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-small") conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer) conversation = Conversation("hello") output = conversation_agent(conversation) self.assertEqual(output, Conversation(past_user_inputs=["hello"], generated_responses=["Hi"])) @require_tf def test_small_model_tf(self): tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-small") model = TFAutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-small") conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer) conversation = Conversation("hello") output = conversation_agent(conversation) self.assertEqual(output, Conversation(past_user_inputs=["hello"], generated_responses=["Hi"])) @require_torch @slow def test_integration_torch_conversation_dialogpt_input_ids(self): tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-small") model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-small") conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer) conversation_1 = Conversation("hello") inputs = conversation_agent.preprocess(conversation_1) self.assertEqual(inputs["input_ids"].tolist(), [[31373, 50256]]) conversation_2 = Conversation("how are you ?", past_user_inputs=["hello"], generated_responses=["Hi there!"]) inputs = conversation_agent.preprocess(conversation_2) self.assertEqual( inputs["input_ids"].tolist(), [[31373, 50256, 17250, 612, 0, 50256, 4919, 389, 345, 5633, 50256]] ) @unittest.skip("Model is curently gated") @require_torch @slow def test_integration_torch_conversation_llama2_input_ids(self): tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf", use_default_system_prompt=True) conversation = Conversation( "What is so great about #1?", past_user_inputs=["I am going to Paris, what should I see?"], generated_responses=[ """\ Paris, the capital of France, is known for its stunning architecture, art museums, historical landmarks, and romantic atmosphere. Here are some of the top attractions to see in Paris: 1. The Eiffel Tower: The iconic Eiffel Tower is one of the most recognizable landmarks in the world and offers breathtaking views of the city. 2. The Louvre Museum: The Louvre is one of the world's largest and most famous museums, housing an impressive collection of art and artifacts, including the Mona Lisa. 3. Notre-Dame Cathedral: This beautiful cathedral is one of the most famous landmarks in Paris and is known for its Gothic architecture and stunning stained glass windows. These are just a few of the many attractions that Paris has to offer. With so much to see and do, it's no wonder that Paris is one of the most popular tourist destinations in the world.""" ], ) inputs = tokenizer._build_conversation_input_ids(conversation) EXPECTED_INPUTS_IDS = [ 1, 518, 25580, 29962, 3532, 14816, 29903, 6778, 13, 3492, 526, 263, 8444, 29892, 3390, 1319, 322, 15993, 20255, 29889, 29849, 1234, 408, 1371, 3730, 408, 1950, 29892, 1550, 1641, 9109, 29889, 29871, 3575, 6089, 881, 451, 3160, 738, 10311, 1319, 29892, 443, 621, 936, 29892, 11021, 391, 29892, 7916, 391, 29892, 304, 27375, 29892, 18215, 29892, 470, 27302, 2793, 29889, 3529, 9801, 393, 596, 20890, 526, 5374, 635, 443, 5365, 1463, 322, 6374, 297, 5469, 29889, 13, 13, 3644, 263, 1139, 947, 451, 1207, 738, 4060, 29892, 470, 338, 451, 2114, 1474, 16165, 261, 296, 29892, 5649, 2020, 2012, 310, 22862, 1554, 451, 1959, 29889, 960, 366, 1016, 29915, 29873, 1073, 278, 1234, 304, 263, 1139, 29892, 3113, 1016, 29915, 29873, 6232, 2089, 2472, 29889, 13, 29966, 829, 14816, 29903, 6778, 13, 13, 29902, 626, 2675, 304, 3681, 29892, 825, 881, 306, 1074, 29973, 518, 29914, 25580, 29962, 3681, 29892, 278, 7483, 310, 3444, 29892, 338, 2998, 363, 967, 380, 27389, 11258, 29892, 1616, 19133, 29879, 29892, 15839, 2982, 22848, 29892, 322, 6017, 7716, 25005, 29889, 2266, 526, 777, 310, 278, 2246, 19650, 1953, 304, 1074, 297, 3681, 29901, 13, 13, 29896, 29889, 450, 382, 2593, 295, 23615, 29901, 450, 9849, 293, 382, 2593, 295, 23615, 338, 697, 310, 278, 1556, 5936, 13902, 2982, 22848, 297, 278, 3186, 322, 16688, 2078, 271, 400, 5086, 8386, 310, 278, 4272, 29889, 13, 29906, 29889, 450, 4562, 12675, 6838, 29901, 450, 4562, 12675, 338, 697, 310, 278, 3186, 29915, 29879, 10150, 322, 1556, 13834, 19133, 29879, 29892, 27261, 385, 21210, 573, 4333, 310, 1616, 322, 24238, 29879, 29892, 3704, 278, 2598, 29874, 29420, 29889, 13, 29941, 29889, 24337, 29899, 29928, 420, 315, 21471, 29901, 910, 9560, 274, 21471, 338, 697, 310, 278, 1556, 13834, 2982, 22848, 297, 3681, 322, 338, 2998, 363, 967, 22883, 293, 11258, 322, 380, 27389, 380, 7114, 12917, 5417, 29889, 13, 13, 1349, 968, 526, 925, 263, 2846, 310, 278, 1784, 19650, 1953, 393, 3681, 756, 304, 5957, 29889, 2973, 577, 1568, 304, 1074, 322, 437, 29892, 372, 29915, 29879, 694, 4997, 393, 3681, 338, 697, 310, 278, 1556, 5972, 6282, 391, 15422, 800, 297, 278, 3186, 29889, 29871, 2, 1, 518, 25580, 29962, 1724, 338, 577, 2107, 1048, 396, 29896, 29973, 518, 29914, 25580, 29962] # fmt: skip self.assertEqual(inputs, EXPECTED_INPUTS_IDS) model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf") conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer) EXPECTED_TEXT = "what topic you want to focus on and create content around it. This will help you stand out from other creators and attract a specific audience.\n\nStep 2: Set Up Your Channel\nCreate your YouTube account and customize your channel with your branding and logo. Make sure your channel name and profile picture are consistent with your niche.\n\nStep 3: Plan Your Content\nDevelop a content strategy that includes the type of content you want to create, how often you will post, and when you will post. Consider creating a content calendar to help you stay organized.\n\nStep 4: Invest in Quality Equipment\nInvest in good quality camera and microphone equipment to ensure your videos look and sound professional. You don't need to break the bank, but investing in good equipment will make a big difference in the quality of your videos.\n\nStep 5: Optimize Your Videos for Search\nUse keywords in your video titles, descriptions, and tags to help people find your videos when they search for topics related to your niche" conversation = Conversation( "<<SYS>>\n Only answer with emojis, and charades\n<</SYS>>\n\nHow can I build a house in 10 steps?" ) result = conversation_agent(conversation) self.assertEqual(result.generated_responses[-1], EXPECTED_TEXT) @require_torch @slow def test_integration_torch_conversation_blenderbot_400M_input_ids(self): tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill") model = AutoModelForSeq2SeqLM.from_pretrained("facebook/blenderbot-400M-distill") conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer) # test1 conversation_1 = Conversation("hello") inputs = conversation_agent.preprocess(conversation_1) self.assertEqual(inputs["input_ids"].tolist(), [[1710, 86, 2]]) # test2 conversation_1 = Conversation( "I like lasagne.", past_user_inputs=["hello"], generated_responses=[ " Do you like lasagne? It is a traditional Italian dish consisting of a shepherd's pie." ], ) inputs = conversation_agent.preprocess(conversation_1) self.assertEqual( inputs["input_ids"].tolist(), [ # This should be compared with the same conversation on ParlAI `safe_interactive` demo. [ 1710, # hello 86, 228, # Double space 228, 946, 304, 398, 6881, 558, 964, 38, 452, 315, 265, 6252, 452, 322, 968, 6884, 3146, 278, 306, 265, 617, 87, 388, 75, 341, 286, 521, 21, 228, # Double space 228, 281, # I like lasagne. 398, 6881, 558, 964, 21, 2, # EOS ], ], ) @require_torch @slow def test_integration_torch_conversation_blenderbot_400M(self): tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill") model = AutoModelForSeq2SeqLM.from_pretrained("facebook/blenderbot-400M-distill") conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer) conversation_1 = Conversation("hello") result = conversation_agent( conversation_1, ) self.assertEqual( result.generated_responses[0], # ParlAI implementation output, we have a different one, but it's our # second best, you can check by using num_return_sequences=10 # " Hello! How are you? I'm just getting ready to go to work, how about you?", " Hello! How are you doing today? I just got back from a walk with my dog.", ) conversation_1 = Conversation("Lasagne hello") result = conversation_agent(conversation_1, encoder_no_repeat_ngram_size=3) self.assertEqual( result.generated_responses[0], " Do you like lasagne? It is a traditional Italian dish consisting of a shepherd's pie.", ) conversation_1 = Conversation( "Lasagne hello Lasagne is my favorite Italian dish. Do you like lasagne? I like lasagne." ) result = conversation_agent( conversation_1, encoder_no_repeat_ngram_size=3, ) self.assertEqual( result.generated_responses[0], " Me too. I like how it can be topped with vegetables, meats, and condiments.", ) @require_torch @slow def test_integration_torch_conversation_encoder_decoder(self): # When tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M") model = AutoModelForSeq2SeqLM.from_pretrained("facebook/blenderbot_small-90M") conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer, device=torch_device) conversation_1 = Conversation("My name is Sarah and I live in London") conversation_2 = Conversation("Going to the movies tonight, What movie would you recommend? ") # Then self.assertEqual(len(conversation_1.past_user_inputs), 0) self.assertEqual(len(conversation_2.past_user_inputs), 0) # When result = conversation_agent([conversation_1, conversation_2], do_sample=False, max_length=1000) # Then self.assertEqual(result, [conversation_1, conversation_2]) self.assertEqual(len(result[0].past_user_inputs), 1) self.assertEqual(len(result[1].past_user_inputs), 1) self.assertEqual(len(result[0].generated_responses), 1) self.assertEqual(len(result[1].generated_responses), 1) self.assertEqual(result[0].past_user_inputs[0], "My name is Sarah and I live in London") self.assertEqual( result[0].generated_responses[0], "hi sarah, i live in london as well. do you have any plans for the weekend?", ) self.assertEqual( result[1].past_user_inputs[0], "Going to the movies tonight, What movie would you recommend? " ) self.assertEqual( result[1].generated_responses[0], "i don't know... i'm not really sure. what movie are you going to see?" ) # When conversation_1.add_user_input("Not yet, what about you?") conversation_2.add_user_input("What's your name?") result = conversation_agent([conversation_1, conversation_2], do_sample=False, max_length=1000) # Then self.assertEqual(result, [conversation_1, conversation_2]) self.assertEqual(len(result[0].past_user_inputs), 2) self.assertEqual(len(result[1].past_user_inputs), 2) self.assertEqual(len(result[0].generated_responses), 2) self.assertEqual(len(result[1].generated_responses), 2) self.assertEqual(result[0].past_user_inputs[1], "Not yet, what about you?") self.assertEqual(result[0].generated_responses[1], "i don't have any plans yet. i'm not sure what to do yet.") self.assertEqual(result[1].past_user_inputs[1], "What's your name?") self.assertEqual(result[1].generated_responses[1], "i don't have a name, but i'm going to see a horror movie.") @require_torch @slow def test_from_pipeline_conversation(self): model_id = "facebook/blenderbot_small-90M" # from model id conversation_agent_from_model_id = pipeline("conversational", model=model_id, tokenizer=model_id) # from model object model = BlenderbotSmallForConditionalGeneration.from_pretrained(model_id) tokenizer = BlenderbotSmallTokenizer.from_pretrained(model_id) conversation_agent_from_model = pipeline("conversational", model=model, tokenizer=tokenizer) conversation = Conversation("My name is Sarah and I live in London") conversation_copy = Conversation("My name is Sarah and I live in London") result_model_id = conversation_agent_from_model_id([conversation]) result_model = conversation_agent_from_model([conversation_copy]) # check for equality self.assertEqual( result_model_id.generated_responses[0], "hi sarah, i live in london as well. do you have any plans for the weekend?", ) self.assertEqual( result_model_id.generated_responses[0], result_model.generated_responses[0], )
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_fill_mask.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( backend_empty_cache, is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_accelerator, slow, torch_device, ) from .test_pipelines_common import ANY @is_pipeline_test class FillMaskPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_MASKED_LM_MAPPING tf_model_mapping = TF_MODEL_FOR_MASKED_LM_MAPPING def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): backend_empty_cache(torch_device) @require_tf def test_small_model_tf(self): unmasker = pipeline(task="fill-mask", model="sshleifer/tiny-distilroberta-base", top_k=2, framework="tf") outputs = unmasker("My name is <mask>") self.assertEqual( nested_simplify(outputs, decimals=6), [ {"sequence": "My name is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped"}, {"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser"}, ], ) outputs = unmasker("The largest city in France is <mask>") self.assertEqual( nested_simplify(outputs, decimals=6), [ { "sequence": "The largest city in France is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped", }, { "sequence": "The largest city in France is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser", }, ], ) outputs = unmasker("My name is <mask>", targets=[" Patrick", " Clara", " Teven"], top_k=3) self.assertEqual( nested_simplify(outputs, decimals=6), [ {"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"}, {"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"}, {"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"}, ], ) @require_torch def test_small_model_pt(self): unmasker = pipeline(task="fill-mask", model="sshleifer/tiny-distilroberta-base", top_k=2, framework="pt") outputs = unmasker("My name is <mask>") self.assertEqual( nested_simplify(outputs, decimals=6), [ {"sequence": "My name is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul"}, {"sequence": "My name isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"}, ], ) outputs = unmasker("The largest city in France is <mask>") self.assertEqual( nested_simplify(outputs, decimals=6), [ { "sequence": "The largest city in France is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul", }, {"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"}, ], ) outputs = unmasker("My name is <mask>", targets=[" Patrick", " Clara", " Teven"], top_k=3) self.assertEqual( nested_simplify(outputs, decimals=6), [ {"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"}, {"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"}, {"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"}, ], ) outputs = unmasker("My name is <mask> <mask>", top_k=2) self.assertEqual( nested_simplify(outputs, decimals=6), [ [ { "score": 2.2e-05, "token": 35676, "token_str": " Maul", "sequence": "<s>My name is Maul<mask></s>", }, {"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"}, ], [ { "score": 2.2e-05, "token": 35676, "token_str": " Maul", "sequence": "<s>My name is<mask> Maul</s>", }, {"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"}, ], ], ) @require_torch_accelerator def test_fp16_casting(self): pipe = pipeline( "fill-mask", model="hf-internal-testing/tiny-random-distilbert", device=torch_device, framework="pt", ) # convert model to fp16 pipe.model.half() response = pipe("Paris is the [MASK] of France.") # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(response, list) @slow @require_torch def test_large_model_pt(self): unmasker = pipeline(task="fill-mask", model="distilroberta-base", top_k=2, framework="pt") self.run_large_test(unmasker) @slow @require_tf def test_large_model_tf(self): unmasker = pipeline(task="fill-mask", model="distilroberta-base", top_k=2, framework="tf") self.run_large_test(unmasker) def run_large_test(self, unmasker): outputs = unmasker("My name is <mask>") self.assertEqual( nested_simplify(outputs), [ {"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"}, {"sequence": "My name is Chris", "score": 0.007, "token": 1573, "token_str": " Chris"}, ], ) outputs = unmasker("The largest city in France is <mask>") self.assertEqual( nested_simplify(outputs), [ { "sequence": "The largest city in France is Paris", "score": 0.251, "token": 2201, "token_str": " Paris", }, { "sequence": "The largest city in France is Lyon", "score": 0.214, "token": 12790, "token_str": " Lyon", }, ], ) outputs = unmasker("My name is <mask>", targets=[" Patrick", " Clara", " Teven"], top_k=3) self.assertEqual( nested_simplify(outputs), [ {"sequence": "My name is Patrick", "score": 0.005, "token": 3499, "token_str": " Patrick"}, {"sequence": "My name is Clara", "score": 0.000, "token": 13606, "token_str": " Clara"}, {"sequence": "My name is Te", "score": 0.000, "token": 2941, "token_str": " Te"}, ], ) outputs = unmasker( "My name is <mask>" + "Lorem ipsum dolor sit amet, consectetur adipiscing elit," * 100, tokenizer_kwargs={"truncation": True}, ) self.assertEqual( nested_simplify(outputs, decimals=6), [ {"sequence": "My name is grouped", "score": 2.2e-05, "token": 38015, "token_str": " grouped"}, {"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser"}, ], ) @require_torch def test_model_no_pad_pt(self): unmasker = pipeline(task="fill-mask", model="sshleifer/tiny-distilroberta-base", framework="pt") unmasker.tokenizer.pad_token_id = None unmasker.tokenizer.pad_token = None self.run_pipeline_test(unmasker, []) @require_tf def test_model_no_pad_tf(self): unmasker = pipeline(task="fill-mask", model="sshleifer/tiny-distilroberta-base", framework="tf") unmasker.tokenizer.pad_token_id = None unmasker.tokenizer.pad_token = None self.run_pipeline_test(unmasker, []) def get_test_pipeline(self, model, tokenizer, processor): if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)") fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer) examples = [ f"This is another {tokenizer.mask_token} test", ] return fill_masker, examples def run_pipeline_test(self, fill_masker, examples): tokenizer = fill_masker.tokenizer model = fill_masker.model outputs = fill_masker( f"This is a {tokenizer.mask_token}", ) self.assertEqual( outputs, [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], ) outputs = fill_masker([f"This is a {tokenizer.mask_token}"]) self.assertEqual( outputs, [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], ) outputs = fill_masker([f"This is a {tokenizer.mask_token}", f"Another {tokenizer.mask_token} great test."]) self.assertEqual( outputs, [ [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], ], ) with self.assertRaises(ValueError): fill_masker([None]) # No mask_token is not supported with self.assertRaises(PipelineException): fill_masker("This is") self.run_test_top_k(model, tokenizer) self.run_test_targets(model, tokenizer) self.run_test_top_k_targets(model, tokenizer) self.fill_mask_with_duplicate_targets_and_top_k(model, tokenizer) self.fill_mask_with_multiple_masks(model, tokenizer) def run_test_targets(self, model, tokenizer): vocab = tokenizer.get_vocab() targets = sorted(vocab.keys())[:2] # Pipeline argument fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer, targets=targets) outputs = fill_masker(f"This is a {tokenizer.mask_token}") self.assertEqual( outputs, [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], ) target_ids = {vocab[el] for el in targets} self.assertEqual({el["token"] for el in outputs}, target_ids) processed_targets = [tokenizer.decode([x]) for x in target_ids] self.assertEqual({el["token_str"] for el in outputs}, set(processed_targets)) # Call argument fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer) outputs = fill_masker(f"This is a {tokenizer.mask_token}", targets=targets) self.assertEqual( outputs, [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], ) target_ids = {vocab[el] for el in targets} self.assertEqual({el["token"] for el in outputs}, target_ids) processed_targets = [tokenizer.decode([x]) for x in target_ids] self.assertEqual({el["token_str"] for el in outputs}, set(processed_targets)) # Score equivalence outputs = fill_masker(f"This is a {tokenizer.mask_token}", targets=targets) tokens = [top_mask["token_str"] for top_mask in outputs] scores = [top_mask["score"] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(tokens) == set(targets): unmasked_targets = fill_masker(f"This is a {tokenizer.mask_token}", targets=tokens) target_scores = [top_mask["score"] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(scores), nested_simplify(target_scores)) # Raises with invalid with self.assertRaises(ValueError): outputs = fill_masker(f"This is a {tokenizer.mask_token}", targets=[]) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(ValueError): outputs = fill_masker(f"This is a {tokenizer.mask_token}", targets=[""]) with self.assertRaises(ValueError): outputs = fill_masker(f"This is a {tokenizer.mask_token}", targets="") def run_test_top_k(self, model, tokenizer): fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer, top_k=2) outputs = fill_masker(f"This is a {tokenizer.mask_token}") self.assertEqual( outputs, [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], ) fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer) outputs2 = fill_masker(f"This is a {tokenizer.mask_token}", top_k=2) self.assertEqual( outputs2, [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], ) self.assertEqual(nested_simplify(outputs), nested_simplify(outputs2)) def run_test_top_k_targets(self, model, tokenizer): vocab = tokenizer.get_vocab() fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer) # top_k=2, ntargets=3 targets = sorted(vocab.keys())[:3] outputs = fill_masker(f"This is a {tokenizer.mask_token}", top_k=2, targets=targets) # If we use the most probably targets, and filter differently, we should still # have the same results targets2 = [el["token_str"] for el in sorted(outputs, key=lambda x: x["score"], reverse=True)] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(targets2).issubset(targets): outputs2 = fill_masker(f"This is a {tokenizer.mask_token}", top_k=3, targets=targets2) # They should yield exactly the same result self.assertEqual(nested_simplify(outputs), nested_simplify(outputs2)) def fill_mask_with_duplicate_targets_and_top_k(self, model, tokenizer): fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer) vocab = tokenizer.get_vocab() # String duplicates + id duplicates targets = sorted(vocab.keys())[:3] targets = [targets[0], targets[1], targets[0], targets[2], targets[1]] outputs = fill_masker(f"My name is {tokenizer.mask_token}", targets=targets, top_k=10) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(outputs), 3) def fill_mask_with_multiple_masks(self, model, tokenizer): fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer) outputs = fill_masker( f"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}", top_k=2 ) self.assertEqual( outputs, [ [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], ], )
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_common.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import logging import os import sys import tempfile import unittest from pathlib import Path import datasets import numpy as np from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError from transformers import ( AutoModelForSequenceClassification, AutoTokenizer, DistilBertForSequenceClassification, TextClassificationPipeline, TFAutoModelForSequenceClassification, pipeline, ) from transformers.pipelines import PIPELINE_REGISTRY, get_task from transformers.pipelines.base import Pipeline, _pad from transformers.testing_utils import ( TOKEN, USER, CaptureLogger, RequestCounter, backend_empty_cache, is_pipeline_test, is_staging_test, nested_simplify, require_tensorflow_probability, require_tf, require_torch, require_torch_accelerator, require_torch_or_tf, slow, torch_device, ) from transformers.utils import direct_transformers_import, is_tf_available, is_torch_available from transformers.utils import logging as transformers_logging sys.path.append(str(Path(__file__).parent.parent.parent / "utils")) from test_module.custom_pipeline import PairClassificationPipeline # noqa E402 logger = logging.getLogger(__name__) PATH_TO_TRANSFORMERS = os.path.join(Path(__file__).parent.parent.parent, "src/transformers") # Dynamically import the Transformers module to grab the attribute classes of the processor form their names. transformers_module = direct_transformers_import(PATH_TO_TRANSFORMERS) class ANY: def __init__(self, *_types): self._types = _types def __eq__(self, other): return isinstance(other, self._types) def __repr__(self): return f"ANY({', '.join(_type.__name__ for _type in self._types)})" @is_pipeline_test class CommonPipelineTest(unittest.TestCase): @require_torch def test_pipeline_iteration(self): from torch.utils.data import Dataset class MyDataset(Dataset): data = [ "This is a test", "This restaurant is great", "This restaurant is awful", ] def __len__(self): return 3 def __getitem__(self, i): return self.data[i] text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt" ) dataset = MyDataset() for output in text_classifier(dataset): self.assertEqual(output, {"label": ANY(str), "score": ANY(float)}) @require_torch def test_check_task_auto_inference(self): pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert") self.assertIsInstance(pipe, TextClassificationPipeline) @require_torch def test_pipeline_batch_size_global(self): pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert") self.assertEqual(pipe._batch_size, None) self.assertEqual(pipe._num_workers, None) pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert", batch_size=2, num_workers=1) self.assertEqual(pipe._batch_size, 2) self.assertEqual(pipe._num_workers, 1) @require_torch def test_pipeline_pathlike(self): pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert") with tempfile.TemporaryDirectory() as d: pipe.save_pretrained(d) path = Path(d) newpipe = pipeline(task="text-classification", model=path) self.assertIsInstance(newpipe, TextClassificationPipeline) @require_torch def test_pipeline_override(self): class MyPipeline(TextClassificationPipeline): pass text_classifier = pipeline(model="hf-internal-testing/tiny-random-distilbert", pipeline_class=MyPipeline) self.assertIsInstance(text_classifier, MyPipeline) def test_check_task(self): task = get_task("gpt2") self.assertEqual(task, "text-generation") with self.assertRaises(RuntimeError): # Wrong framework get_task("espnet/siddhana_slurp_entity_asr_train_asr_conformer_raw_en_word_valid.acc.ave_10best") @require_torch def test_iterator_data(self): def data(n: int): for _ in range(n): yield "This is a test" pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert") results = [] for out in pipe(data(10)): self.assertEqual(nested_simplify(out), {"label": "LABEL_0", "score": 0.504}) results.append(out) self.assertEqual(len(results), 10) # When using multiple workers on streamable data it should still work # This will force using `num_workers=1` with a warning for now. results = [] for out in pipe(data(10), num_workers=2): self.assertEqual(nested_simplify(out), {"label": "LABEL_0", "score": 0.504}) results.append(out) self.assertEqual(len(results), 10) @require_tf def test_iterator_data_tf(self): def data(n: int): for _ in range(n): yield "This is a test" pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert", framework="tf") out = pipe("This is a test") results = [] for out in pipe(data(10)): self.assertEqual(nested_simplify(out), {"label": "LABEL_0", "score": 0.504}) results.append(out) self.assertEqual(len(results), 10) @require_torch def test_unbatch_attentions_hidden_states(self): model = DistilBertForSequenceClassification.from_pretrained( "hf-internal-testing/tiny-random-distilbert", output_hidden_states=True, output_attentions=True ) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-distilbert") text_classifier = TextClassificationPipeline(model=model, tokenizer=tokenizer) # Used to throw an error because `hidden_states` are a tuple of tensors # instead of the expected tensor. outputs = text_classifier(["This is great !"] * 20, batch_size=32) self.assertEqual(len(outputs), 20) @is_pipeline_test class PipelineScikitCompatTest(unittest.TestCase): @require_torch def test_pipeline_predict_pt(self): data = ["This is a test"] text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt" ) expected_output = [{"label": ANY(str), "score": ANY(float)}] actual_output = text_classifier.predict(data) self.assertEqual(expected_output, actual_output) @require_tf def test_pipeline_predict_tf(self): data = ["This is a test"] text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="tf" ) expected_output = [{"label": ANY(str), "score": ANY(float)}] actual_output = text_classifier.predict(data) self.assertEqual(expected_output, actual_output) @require_torch def test_pipeline_transform_pt(self): data = ["This is a test"] text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt" ) expected_output = [{"label": ANY(str), "score": ANY(float)}] actual_output = text_classifier.transform(data) self.assertEqual(expected_output, actual_output) @require_tf def test_pipeline_transform_tf(self): data = ["This is a test"] text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="tf" ) expected_output = [{"label": ANY(str), "score": ANY(float)}] actual_output = text_classifier.transform(data) self.assertEqual(expected_output, actual_output) @is_pipeline_test class PipelinePadTest(unittest.TestCase): @require_torch def test_pipeline_padding(self): import torch items = [ { "label": "label1", "input_ids": torch.LongTensor([[1, 23, 24, 2]]), "attention_mask": torch.LongTensor([[0, 1, 1, 0]]), }, { "label": "label2", "input_ids": torch.LongTensor([[1, 23, 24, 43, 44, 2]]), "attention_mask": torch.LongTensor([[0, 1, 1, 1, 1, 0]]), }, ] self.assertEqual(_pad(items, "label", 0, "right"), ["label1", "label2"]) self.assertTrue( torch.allclose( _pad(items, "input_ids", 10, "right"), torch.LongTensor([[1, 23, 24, 2, 10, 10], [1, 23, 24, 43, 44, 2]]), ) ) self.assertTrue( torch.allclose( _pad(items, "input_ids", 10, "left"), torch.LongTensor([[10, 10, 1, 23, 24, 2], [1, 23, 24, 43, 44, 2]]), ) ) self.assertTrue( torch.allclose( _pad(items, "attention_mask", 0, "right"), torch.LongTensor([[0, 1, 1, 0, 0, 0], [0, 1, 1, 1, 1, 0]]) ) ) @require_torch def test_pipeline_image_padding(self): import torch items = [ { "label": "label1", "pixel_values": torch.zeros((1, 3, 10, 10)), }, { "label": "label2", "pixel_values": torch.zeros((1, 3, 10, 10)), }, ] self.assertEqual(_pad(items, "label", 0, "right"), ["label1", "label2"]) self.assertTrue( torch.allclose( _pad(items, "pixel_values", 10, "right"), torch.zeros((2, 3, 10, 10)), ) ) @require_torch def test_pipeline_offset_mapping(self): import torch items = [ { "offset_mappings": torch.zeros([1, 11, 2], dtype=torch.long), }, { "offset_mappings": torch.zeros([1, 4, 2], dtype=torch.long), }, ] self.assertTrue( torch.allclose( _pad(items, "offset_mappings", 0, "right"), torch.zeros((2, 11, 2), dtype=torch.long), ), ) @is_pipeline_test class PipelineUtilsTest(unittest.TestCase): @require_torch def test_pipeline_dataset(self): from transformers.pipelines.pt_utils import PipelineDataset dummy_dataset = [0, 1, 2, 3] def add(number, extra=0): return number + extra dataset = PipelineDataset(dummy_dataset, add, {"extra": 2}) self.assertEqual(len(dataset), 4) outputs = [dataset[i] for i in range(4)] self.assertEqual(outputs, [2, 3, 4, 5]) @require_torch def test_pipeline_iterator(self): from transformers.pipelines.pt_utils import PipelineIterator dummy_dataset = [0, 1, 2, 3] def add(number, extra=0): return number + extra dataset = PipelineIterator(dummy_dataset, add, {"extra": 2}) self.assertEqual(len(dataset), 4) outputs = list(dataset) self.assertEqual(outputs, [2, 3, 4, 5]) @require_torch def test_pipeline_iterator_no_len(self): from transformers.pipelines.pt_utils import PipelineIterator def dummy_dataset(): for i in range(4): yield i def add(number, extra=0): return number + extra dataset = PipelineIterator(dummy_dataset(), add, {"extra": 2}) with self.assertRaises(TypeError): len(dataset) outputs = list(dataset) self.assertEqual(outputs, [2, 3, 4, 5]) @require_torch def test_pipeline_batch_unbatch_iterator(self): from transformers.pipelines.pt_utils import PipelineIterator dummy_dataset = [{"id": [0, 1, 2]}, {"id": [3]}] def add(number, extra=0): return {"id": [i + extra for i in number["id"]]} dataset = PipelineIterator(dummy_dataset, add, {"extra": 2}, loader_batch_size=3) outputs = list(dataset) self.assertEqual(outputs, [{"id": 2}, {"id": 3}, {"id": 4}, {"id": 5}]) @require_torch def test_pipeline_batch_unbatch_iterator_tensors(self): import torch from transformers.pipelines.pt_utils import PipelineIterator dummy_dataset = [{"id": torch.LongTensor([[10, 20], [0, 1], [0, 2]])}, {"id": torch.LongTensor([[3]])}] def add(number, extra=0): return {"id": number["id"] + extra} dataset = PipelineIterator(dummy_dataset, add, {"extra": 2}, loader_batch_size=3) outputs = list(dataset) self.assertEqual( nested_simplify(outputs), [{"id": [[12, 22]]}, {"id": [[2, 3]]}, {"id": [[2, 4]]}, {"id": [[5]]}] ) @require_torch def test_pipeline_chunk_iterator(self): from transformers.pipelines.pt_utils import PipelineChunkIterator def preprocess_chunk(n: int): for i in range(n): yield i dataset = [2, 3] dataset = PipelineChunkIterator(dataset, preprocess_chunk, {}, loader_batch_size=3) outputs = list(dataset) self.assertEqual(outputs, [0, 1, 0, 1, 2]) @require_torch def test_pipeline_pack_iterator(self): from transformers.pipelines.pt_utils import PipelinePackIterator def pack(item): return {"id": item["id"] + 1, "is_last": item["is_last"]} dataset = [ {"id": 0, "is_last": False}, {"id": 1, "is_last": True}, {"id": 0, "is_last": False}, {"id": 1, "is_last": False}, {"id": 2, "is_last": True}, ] dataset = PipelinePackIterator(dataset, pack, {}) outputs = list(dataset) self.assertEqual( outputs, [ [ {"id": 1}, {"id": 2}, ], [ {"id": 1}, {"id": 2}, {"id": 3}, ], ], ) @require_torch def test_pipeline_pack_unbatch_iterator(self): from transformers.pipelines.pt_utils import PipelinePackIterator dummy_dataset = [{"id": [0, 1, 2], "is_last": [False, True, False]}, {"id": [3], "is_last": [True]}] def add(number, extra=0): return {"id": [i + extra for i in number["id"]], "is_last": number["is_last"]} dataset = PipelinePackIterator(dummy_dataset, add, {"extra": 2}, loader_batch_size=3) outputs = list(dataset) self.assertEqual(outputs, [[{"id": 2}, {"id": 3}], [{"id": 4}, {"id": 5}]]) # is_false Across batch dummy_dataset = [{"id": [0, 1, 2], "is_last": [False, False, False]}, {"id": [3], "is_last": [True]}] def add(number, extra=0): return {"id": [i + extra for i in number["id"]], "is_last": number["is_last"]} dataset = PipelinePackIterator(dummy_dataset, add, {"extra": 2}, loader_batch_size=3) outputs = list(dataset) self.assertEqual(outputs, [[{"id": 2}, {"id": 3}, {"id": 4}, {"id": 5}]]) def test_pipeline_negative_device(self): # To avoid regressing, pipeline used to accept device=-1 classifier = pipeline("text-generation", "hf-internal-testing/tiny-random-bert", device=-1) expected_output = [{"generated_text": ANY(str)}] actual_output = classifier("Test input.") self.assertEqual(expected_output, actual_output) @slow @require_torch def test_load_default_pipelines_pt(self): import torch from transformers.pipelines import SUPPORTED_TASKS set_seed_fn = lambda: torch.manual_seed(0) # noqa: E731 for task in SUPPORTED_TASKS.keys(): if task == "table-question-answering": # test table in seperate test due to more dependencies continue self.check_default_pipeline(task, "pt", set_seed_fn, self.check_models_equal_pt) # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() backend_empty_cache(torch_device) @slow @require_tf def test_load_default_pipelines_tf(self): import tensorflow as tf from transformers.pipelines import SUPPORTED_TASKS set_seed_fn = lambda: tf.random.set_seed(0) # noqa: E731 for task in SUPPORTED_TASKS.keys(): if task == "table-question-answering": # test table in seperate test due to more dependencies continue self.check_default_pipeline(task, "tf", set_seed_fn, self.check_models_equal_tf) # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() @slow @require_torch def test_load_default_pipelines_pt_table_qa(self): import torch set_seed_fn = lambda: torch.manual_seed(0) # noqa: E731 self.check_default_pipeline("table-question-answering", "pt", set_seed_fn, self.check_models_equal_pt) # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() backend_empty_cache(torch_device) @slow @require_torch @require_torch_accelerator def test_pipeline_accelerator(self): pipe = pipeline("text-generation", device=torch_device) _ = pipe("Hello") @slow @require_torch @require_torch_accelerator def test_pipeline_accelerator_indexed(self): pipe = pipeline("text-generation", device=torch_device) _ = pipe("Hello") @slow @require_tf @require_tensorflow_probability def test_load_default_pipelines_tf_table_qa(self): import tensorflow as tf set_seed_fn = lambda: tf.random.set_seed(0) # noqa: E731 self.check_default_pipeline("table-question-answering", "tf", set_seed_fn, self.check_models_equal_tf) # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() def check_default_pipeline(self, task, framework, set_seed_fn, check_models_equal_fn): from transformers.pipelines import SUPPORTED_TASKS, pipeline task_dict = SUPPORTED_TASKS[task] # test to compare pipeline to manually loading the respective model model = None relevant_auto_classes = task_dict[framework] if len(relevant_auto_classes) == 0: # task has no default logger.debug(f"{task} in {framework} has no default") return # by default use first class auto_model_cls = relevant_auto_classes[0] # retrieve correct model ids if task == "translation": # special case for translation pipeline which has multiple languages model_ids = [] revisions = [] tasks = [] for translation_pair in task_dict["default"].keys(): model_id, revision = task_dict["default"][translation_pair]["model"][framework] model_ids.append(model_id) revisions.append(revision) tasks.append(task + f"_{'_to_'.join(translation_pair)}") else: # normal case - non-translation pipeline model_id, revision = task_dict["default"]["model"][framework] model_ids = [model_id] revisions = [revision] tasks = [task] # check for equality for model_id, revision, task in zip(model_ids, revisions, tasks): # load default model try: set_seed_fn() model = auto_model_cls.from_pretrained(model_id, revision=revision) except ValueError: # first auto class is possible not compatible with model, go to next model class auto_model_cls = relevant_auto_classes[1] set_seed_fn() model = auto_model_cls.from_pretrained(model_id, revision=revision) # load default pipeline set_seed_fn() default_pipeline = pipeline(task, framework=framework) # compare pipeline model with default model models_are_equal = check_models_equal_fn(default_pipeline.model, model) self.assertTrue(models_are_equal, f"{task} model doesn't match pipeline.") logger.debug(f"{task} in {framework} succeeded with {model_id}.") def check_models_equal_pt(self, model1, model2): models_are_equal = True for model1_p, model2_p in zip(model1.parameters(), model2.parameters()): if model1_p.data.ne(model2_p.data).sum() > 0: models_are_equal = False return models_are_equal def check_models_equal_tf(self, model1, model2): models_are_equal = True for model1_p, model2_p in zip(model1.weights, model2.weights): if np.abs(model1_p.numpy() - model2_p.numpy()).sum() > 1e-5: models_are_equal = False return models_are_equal class CustomPipeline(Pipeline): def _sanitize_parameters(self, **kwargs): preprocess_kwargs = {} if "maybe_arg" in kwargs: preprocess_kwargs["maybe_arg"] = kwargs["maybe_arg"] return preprocess_kwargs, {}, {} def preprocess(self, text, maybe_arg=2): input_ids = self.tokenizer(text, return_tensors="pt") return input_ids def _forward(self, model_inputs): outputs = self.model(**model_inputs) return outputs def postprocess(self, model_outputs): return model_outputs["logits"].softmax(-1).numpy() @is_pipeline_test class CustomPipelineTest(unittest.TestCase): def test_warning_logs(self): transformers_logging.set_verbosity_debug() logger_ = transformers_logging.get_logger("transformers.pipelines.base") alias = "text-classification" # Get the original task, so we can restore it at the end. # (otherwise the subsequential tests in `TextClassificationPipelineTests` will fail) _, original_task, _ = PIPELINE_REGISTRY.check_task(alias) try: with CaptureLogger(logger_) as cm: PIPELINE_REGISTRY.register_pipeline(alias, PairClassificationPipeline) self.assertIn(f"{alias} is already registered", cm.out) finally: # restore PIPELINE_REGISTRY.supported_tasks[alias] = original_task def test_register_pipeline(self): PIPELINE_REGISTRY.register_pipeline( "custom-text-classification", pipeline_class=PairClassificationPipeline, pt_model=AutoModelForSequenceClassification if is_torch_available() else None, tf_model=TFAutoModelForSequenceClassification if is_tf_available() else None, default={"pt": "hf-internal-testing/tiny-random-distilbert"}, type="text", ) assert "custom-text-classification" in PIPELINE_REGISTRY.get_supported_tasks() _, task_def, _ = PIPELINE_REGISTRY.check_task("custom-text-classification") self.assertEqual(task_def["pt"], (AutoModelForSequenceClassification,) if is_torch_available() else ()) self.assertEqual(task_def["tf"], (TFAutoModelForSequenceClassification,) if is_tf_available() else ()) self.assertEqual(task_def["type"], "text") self.assertEqual(task_def["impl"], PairClassificationPipeline) self.assertEqual(task_def["default"], {"model": {"pt": "hf-internal-testing/tiny-random-distilbert"}}) # Clean registry for next tests. del PIPELINE_REGISTRY.supported_tasks["custom-text-classification"] @require_torch_or_tf def test_dynamic_pipeline(self): PIPELINE_REGISTRY.register_pipeline( "pair-classification", pipeline_class=PairClassificationPipeline, pt_model=AutoModelForSequenceClassification if is_torch_available() else None, tf_model=TFAutoModelForSequenceClassification if is_tf_available() else None, ) classifier = pipeline("pair-classification", model="hf-internal-testing/tiny-random-bert") # Clean registry as we won't need the pipeline to be in it for the rest to work. del PIPELINE_REGISTRY.supported_tasks["pair-classification"] with tempfile.TemporaryDirectory() as tmp_dir: classifier.save_pretrained(tmp_dir) # checks self.assertDictEqual( classifier.model.config.custom_pipelines, { "pair-classification": { "impl": "custom_pipeline.PairClassificationPipeline", "pt": ("AutoModelForSequenceClassification",) if is_torch_available() else (), "tf": ("TFAutoModelForSequenceClassification",) if is_tf_available() else (), } }, ) # Fails if the user forget to pass along `trust_remote_code=True` with self.assertRaises(ValueError): _ = pipeline(model=tmp_dir) new_classifier = pipeline(model=tmp_dir, trust_remote_code=True) # Using trust_remote_code=False forces the traditional pipeline tag old_classifier = pipeline("text-classification", model=tmp_dir, trust_remote_code=False) # Can't make an isinstance check because the new_classifier is from the PairClassificationPipeline class of a # dynamic module self.assertEqual(new_classifier.__class__.__name__, "PairClassificationPipeline") self.assertEqual(new_classifier.task, "pair-classification") results = new_classifier("I hate you", second_text="I love you") self.assertDictEqual( nested_simplify(results), {"label": "LABEL_0", "score": 0.505, "logits": [-0.003, -0.024]}, ) self.assertEqual(old_classifier.__class__.__name__, "TextClassificationPipeline") self.assertEqual(old_classifier.task, "text-classification") results = old_classifier("I hate you", text_pair="I love you") self.assertListEqual( nested_simplify(results), [{"label": "LABEL_0", "score": 0.505}], ) @require_torch_or_tf def test_cached_pipeline_has_minimum_calls_to_head(self): # Make sure we have cached the pipeline. _ = pipeline("text-classification", model="hf-internal-testing/tiny-random-bert") with RequestCounter() as counter: _ = pipeline("text-classification", model="hf-internal-testing/tiny-random-bert") self.assertEqual(counter["GET"], 0) self.assertEqual(counter["HEAD"], 1) self.assertEqual(counter.total_calls, 1) @require_torch def test_chunk_pipeline_batching_single_file(self): # Make sure we have cached the pipeline. pipe = pipeline(model="hf-internal-testing/tiny-random-Wav2Vec2ForCTC") ds = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") audio = ds[40]["audio"]["array"] pipe = pipeline(model="hf-internal-testing/tiny-random-Wav2Vec2ForCTC") # For some reason scoping doesn't work if not using `self.` self.COUNT = 0 forward = pipe.model.forward def new_forward(*args, **kwargs): self.COUNT += 1 return forward(*args, **kwargs) pipe.model.forward = new_forward for out in pipe(audio, return_timestamps="char", chunk_length_s=3, stride_length_s=[1, 1], batch_size=1024): pass self.assertEqual(self.COUNT, 1) @require_torch @is_staging_test class DynamicPipelineTester(unittest.TestCase): vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "I", "love", "hate", "you"] @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @classmethod def tearDownClass(cls): try: delete_repo(token=cls._token, repo_id="test-dynamic-pipeline") except HTTPError: pass def test_push_to_hub_dynamic_pipeline(self): from transformers import BertConfig, BertForSequenceClassification, BertTokenizer PIPELINE_REGISTRY.register_pipeline( "pair-classification", pipeline_class=PairClassificationPipeline, pt_model=AutoModelForSequenceClassification, ) config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = BertForSequenceClassification(config).eval() with tempfile.TemporaryDirectory() as tmp_dir: create_repo(f"{USER}/test-dynamic-pipeline", token=self._token) repo = Repository(tmp_dir, clone_from=f"{USER}/test-dynamic-pipeline", token=self._token) vocab_file = os.path.join(tmp_dir, "vocab.txt") with open(vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens])) tokenizer = BertTokenizer(vocab_file) classifier = pipeline("pair-classification", model=model, tokenizer=tokenizer) # Clean registry as we won't need the pipeline to be in it for the rest to work. del PIPELINE_REGISTRY.supported_tasks["pair-classification"] classifier.save_pretrained(tmp_dir) # checks self.assertDictEqual( classifier.model.config.custom_pipelines, { "pair-classification": { "impl": "custom_pipeline.PairClassificationPipeline", "pt": ("AutoModelForSequenceClassification",), "tf": (), } }, ) repo.push_to_hub() # Fails if the user forget to pass along `trust_remote_code=True` with self.assertRaises(ValueError): _ = pipeline(model=f"{USER}/test-dynamic-pipeline") new_classifier = pipeline(model=f"{USER}/test-dynamic-pipeline", trust_remote_code=True) # Can't make an isinstance check because the new_classifier is from the PairClassificationPipeline class of a # dynamic module self.assertEqual(new_classifier.__class__.__name__, "PairClassificationPipeline") results = classifier("I hate you", second_text="I love you") new_results = new_classifier("I hate you", second_text="I love you") self.assertDictEqual(nested_simplify(results), nested_simplify(new_results)) # Using trust_remote_code=False forces the traditional pipeline tag old_classifier = pipeline( "text-classification", model=f"{USER}/test-dynamic-pipeline", trust_remote_code=False ) self.assertEqual(old_classifier.__class__.__name__, "TextClassificationPipeline") self.assertEqual(old_classifier.task, "text-classification") new_results = old_classifier("I hate you", text_pair="I love you") self.assertListEqual( nested_simplify([{"label": results["label"], "score": results["score"]}]), nested_simplify(new_results) )
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_image_segmentation.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest from typing import Dict import datasets import numpy as np import requests from datasets import load_dataset from huggingface_hub.utils import insecure_hashlib from transformers import ( MODEL_FOR_IMAGE_SEGMENTATION_MAPPING, MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING, AutoImageProcessor, AutoModelForImageSegmentation, AutoModelForInstanceSegmentation, DetrForSegmentation, ImageSegmentationPipeline, MaskFormerForInstanceSegmentation, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass def hashimage(image: Image) -> str: m = insecure_hashlib.md5(image.tobytes()) return m.hexdigest()[:10] def mask_to_test_readable(mask: Image) -> Dict: npimg = np.array(mask) white_pixels = (npimg == 255).sum() shape = npimg.shape return {"hash": hashimage(mask), "white_pixels": white_pixels, "shape": shape} def mask_to_test_readable_only_shape(mask: Image) -> Dict: npimg = np.array(mask) shape = npimg.shape return {"shape": shape} @is_pipeline_test @require_vision @require_timm @require_torch class ImageSegmentationPipelineTests(unittest.TestCase): model_mapping = dict( (list(MODEL_FOR_IMAGE_SEGMENTATION_MAPPING.items()) if MODEL_FOR_IMAGE_SEGMENTATION_MAPPING else []) + (MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING.items() if MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING else []) + (MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING.items() if MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING else []) ) def get_test_pipeline(self, model, tokenizer, processor): image_segmenter = ImageSegmentationPipeline(model=model, image_processor=processor) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def run_pipeline_test(self, image_segmenter, examples): outputs = image_segmenter( "./tests/fixtures/tests_samples/COCO/000000039769.png", threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0, ) self.assertIsInstance(outputs, list) n = len(outputs) if isinstance(image_segmenter.model, (MaskFormerForInstanceSegmentation, DetrForSegmentation)): # Instance segmentation (maskformer, and detr) have a slot for null class # and can output nothing even with a low threshold self.assertGreaterEqual(n, 0) else: self.assertGreaterEqual(n, 1) # XXX: PIL.Image implements __eq__ which bypasses ANY, so we inverse the comparison # to make it work self.assertEqual([{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n, outputs) dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test") # RGBA outputs = image_segmenter(dataset[0]["file"], threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0) m = len(outputs) self.assertEqual([{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * m, outputs) # LA outputs = image_segmenter(dataset[1]["file"], threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0) m = len(outputs) self.assertEqual([{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * m, outputs) # L outputs = image_segmenter(dataset[2]["file"], threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0) m = len(outputs) self.assertEqual([{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * m, outputs) if isinstance(image_segmenter.model, DetrForSegmentation): # We need to test batch_size with images with the same size. # Detr doesn't normalize the size of the images, meaning we can have # 800x800 or 800x1200, meaning we cannot batch simply. # We simply bail on this batch_size = 1 else: batch_size = 2 # 5 times the same image so the output shape is predictable batch = [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] outputs = image_segmenter( batch, threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0, batch_size=batch_size, ) self.assertEqual(len(batch), len(outputs)) self.assertEqual(len(outputs[0]), n) self.assertEqual( [ [{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n, [{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n, [{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n, [{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n, [{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n, ], outputs, f"Expected [{n}, {n}, {n}, {n}, {n}], got {[len(item) for item in outputs]}", ) @require_tf @unittest.skip("Image segmentation not implemented in TF") def test_small_model_tf(self): pass @require_torch def test_small_model_pt_no_panoptic(self): model_id = "hf-internal-testing/tiny-random-mobilevit" # The default task is `image-classification` we need to override pipe = pipeline(task="image-segmentation", model=model_id) # This model does NOT support neither `instance` nor `panoptic` # We should error out with self.assertRaises(ValueError) as e: pipe("http://images.cocodataset.org/val2017/000000039769.jpg", subtask="panoptic") self.assertEqual( str(e.exception), "Subtask panoptic is not supported for model <class" " 'transformers.models.mobilevit.modeling_mobilevit.MobileViTForSemanticSegmentation'>", ) with self.assertRaises(ValueError) as e: pipe("http://images.cocodataset.org/val2017/000000039769.jpg", subtask="instance") self.assertEqual( str(e.exception), "Subtask instance is not supported for model <class" " 'transformers.models.mobilevit.modeling_mobilevit.MobileViTForSemanticSegmentation'>", ) @require_torch def test_small_model_pt(self): model_id = "hf-internal-testing/tiny-detr-mobilenetsv3-panoptic" model = AutoModelForImageSegmentation.from_pretrained(model_id) image_processor = AutoImageProcessor.from_pretrained(model_id) image_segmenter = ImageSegmentationPipeline( model=model, image_processor=image_processor, subtask="panoptic", threshold=0.0, mask_threshold=0.0, overlap_mask_area_threshold=0.0, ) outputs = image_segmenter( "http://images.cocodataset.org/val2017/000000039769.jpg", ) # Shortening by hashing for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) # This is extremely brittle, and those values are made specific for the CI. self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.004, "label": "LABEL_215", "mask": {"hash": "a01498ca7c", "shape": (480, 640), "white_pixels": 307200}, }, ], ) outputs = image_segmenter( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ], ) for output in outputs: for o in output: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ { "score": 0.004, "label": "LABEL_215", "mask": {"hash": "a01498ca7c", "shape": (480, 640), "white_pixels": 307200}, }, ], [ { "score": 0.004, "label": "LABEL_215", "mask": {"hash": "a01498ca7c", "shape": (480, 640), "white_pixels": 307200}, }, ], ], ) output = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", subtask="instance") for o in output: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(output, decimals=4), [ { "score": 0.004, "label": "LABEL_215", "mask": {"hash": "a01498ca7c", "shape": (480, 640), "white_pixels": 307200}, }, ], ) # This must be surprising to the reader. # The `panoptic` returns only LABEL_215, and this returns 3 labels. # output = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", subtask="semantic") output_masks = [o["mask"] for o in output] # page links (to visualize) expected_masks = [ "https://huggingface.co/datasets/hf-internal-testing/mask-for-image-segmentation-tests/blob/main/mask_0.png", "https://huggingface.co/datasets/hf-internal-testing/mask-for-image-segmentation-tests/blob/main/mask_1.png", "https://huggingface.co/datasets/hf-internal-testing/mask-for-image-segmentation-tests/blob/main/mask_2.png", ] # actual links to get files expected_masks = [x.replace("/blob/", "/resolve/") for x in expected_masks] expected_masks = [Image.open(requests.get(image, stream=True).raw) for image in expected_masks] # Convert masks to numpy array output_masks = [np.array(x) for x in output_masks] expected_masks = [np.array(x) for x in expected_masks] self.assertEqual(output_masks[0].shape, expected_masks[0].shape) self.assertEqual(output_masks[1].shape, expected_masks[1].shape) self.assertEqual(output_masks[2].shape, expected_masks[2].shape) # With un-trained tiny random models, the output `logits` tensor is very likely to contain many values # close to each other, which cause `argmax` to give quite different results when running the test on 2 # environments. We use a lower threshold `0.9` here to avoid flakiness. self.assertGreaterEqual(np.mean(output_masks[0] == expected_masks[0]), 0.9) self.assertGreaterEqual(np.mean(output_masks[1] == expected_masks[1]), 0.9) self.assertGreaterEqual(np.mean(output_masks[2] == expected_masks[2]), 0.9) for o in output: o["mask"] = mask_to_test_readable_only_shape(o["mask"]) self.maxDiff = None self.assertEqual( nested_simplify(output, decimals=4), [ { "label": "LABEL_88", "mask": {"shape": (480, 640)}, "score": None, }, { "label": "LABEL_101", "mask": {"shape": (480, 640)}, "score": None, }, { "label": "LABEL_215", "mask": {"shape": (480, 640)}, "score": None, }, ], ) @require_torch def test_small_model_pt_semantic(self): model_id = "hf-internal-testing/tiny-random-beit-pipeline" image_segmenter = pipeline(model=model_id) outputs = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg") for o in outputs: # shortening by hashing o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": None, "label": "LABEL_0", "mask": {"hash": "42d0907228", "shape": (480, 640), "white_pixels": 10714}, }, { "score": None, "label": "LABEL_1", "mask": {"hash": "46b8cc3976", "shape": (480, 640), "white_pixels": 296486}, }, ], ) @require_torch @slow def test_integration_torch_image_segmentation(self): model_id = "facebook/detr-resnet-50-panoptic" image_segmenter = pipeline( "image-segmentation", model=model_id, threshold=0.0, overlap_mask_area_threshold=0.0, ) outputs = image_segmenter( "http://images.cocodataset.org/val2017/000000039769.jpg", ) # Shortening by hashing for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.9094, "label": "blanket", "mask": {"hash": "dcff19a97a", "shape": (480, 640), "white_pixels": 16617}, }, { "score": 0.9941, "label": "cat", "mask": {"hash": "9c0af87bd0", "shape": (480, 640), "white_pixels": 59185}, }, { "score": 0.9987, "label": "remote", "mask": {"hash": "c7870600d6", "shape": (480, 640), "white_pixels": 4182}, }, { "score": 0.9995, "label": "remote", "mask": {"hash": "ef899a25fd", "shape": (480, 640), "white_pixels": 2275}, }, { "score": 0.9722, "label": "couch", "mask": {"hash": "37b8446ac5", "shape": (480, 640), "white_pixels": 172380}, }, { "score": 0.9994, "label": "cat", "mask": {"hash": "6a09d3655e", "shape": (480, 640), "white_pixels": 52561}, }, ], ) outputs = image_segmenter( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ], ) # Shortening by hashing for output in outputs: for o in output: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ { "score": 0.9094, "label": "blanket", "mask": {"hash": "dcff19a97a", "shape": (480, 640), "white_pixels": 16617}, }, { "score": 0.9941, "label": "cat", "mask": {"hash": "9c0af87bd0", "shape": (480, 640), "white_pixels": 59185}, }, { "score": 0.9987, "label": "remote", "mask": {"hash": "c7870600d6", "shape": (480, 640), "white_pixels": 4182}, }, { "score": 0.9995, "label": "remote", "mask": {"hash": "ef899a25fd", "shape": (480, 640), "white_pixels": 2275}, }, { "score": 0.9722, "label": "couch", "mask": {"hash": "37b8446ac5", "shape": (480, 640), "white_pixels": 172380}, }, { "score": 0.9994, "label": "cat", "mask": {"hash": "6a09d3655e", "shape": (480, 640), "white_pixels": 52561}, }, ], [ { "score": 0.9094, "label": "blanket", "mask": {"hash": "dcff19a97a", "shape": (480, 640), "white_pixels": 16617}, }, { "score": 0.9941, "label": "cat", "mask": {"hash": "9c0af87bd0", "shape": (480, 640), "white_pixels": 59185}, }, { "score": 0.9987, "label": "remote", "mask": {"hash": "c7870600d6", "shape": (480, 640), "white_pixels": 4182}, }, { "score": 0.9995, "label": "remote", "mask": {"hash": "ef899a25fd", "shape": (480, 640), "white_pixels": 2275}, }, { "score": 0.9722, "label": "couch", "mask": {"hash": "37b8446ac5", "shape": (480, 640), "white_pixels": 172380}, }, { "score": 0.9994, "label": "cat", "mask": {"hash": "6a09d3655e", "shape": (480, 640), "white_pixels": 52561}, }, ], ], ) @require_torch @slow def test_threshold(self): model_id = "facebook/detr-resnet-50-panoptic" image_segmenter = pipeline("image-segmentation", model=model_id) outputs = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", threshold=0.999) # Shortening by hashing for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.9995, "label": "remote", "mask": {"hash": "d02404f578", "shape": (480, 640), "white_pixels": 2789}, }, { "score": 0.9994, "label": "cat", "mask": {"hash": "eaa115b40c", "shape": (480, 640), "white_pixels": 304411}, }, ], ) outputs = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", threshold=0.5) for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.9941, "label": "cat", "mask": {"hash": "9c0af87bd0", "shape": (480, 640), "white_pixels": 59185}, }, { "score": 0.9987, "label": "remote", "mask": {"hash": "c7870600d6", "shape": (480, 640), "white_pixels": 4182}, }, { "score": 0.9995, "label": "remote", "mask": {"hash": "ef899a25fd", "shape": (480, 640), "white_pixels": 2275}, }, { "score": 0.9722, "label": "couch", "mask": {"hash": "37b8446ac5", "shape": (480, 640), "white_pixels": 172380}, }, { "score": 0.9994, "label": "cat", "mask": {"hash": "6a09d3655e", "shape": (480, 640), "white_pixels": 52561}, }, ], ) @require_torch @slow def test_maskformer(self): threshold = 0.8 model_id = "facebook/maskformer-swin-base-ade" model = AutoModelForInstanceSegmentation.from_pretrained(model_id) image_processor = AutoImageProcessor.from_pretrained(model_id) image_segmenter = pipeline("image-segmentation", model=model, image_processor=image_processor) image = load_dataset("hf-internal-testing/fixtures_ade20k", split="test") file = image[0]["file"] outputs = image_segmenter(file, threshold=threshold) # Shortening by hashing for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.9974, "label": "wall", "mask": {"hash": "a547b7c062", "shape": (512, 683), "white_pixels": 14252}, }, { "score": 0.949, "label": "house", "mask": {"hash": "0da9b7b38f", "shape": (512, 683), "white_pixels": 132177}, }, { "score": 0.9995, "label": "grass", "mask": {"hash": "1d07ea0a26", "shape": (512, 683), "white_pixels": 53444}, }, { "score": 0.9976, "label": "tree", "mask": {"hash": "6cdc97c7da", "shape": (512, 683), "white_pixels": 7944}, }, { "score": 0.8239, "label": "plant", "mask": {"hash": "1ab4ce378f", "shape": (512, 683), "white_pixels": 4136}, }, { "score": 0.9942, "label": "road, route", "mask": {"hash": "39c5d17be5", "shape": (512, 683), "white_pixels": 1941}, }, { "score": 1.0, "label": "sky", "mask": {"hash": "a3756324a6", "shape": (512, 683), "white_pixels": 135802}, }, ], ) @require_torch @slow def test_oneformer(self): image_segmenter = pipeline(model="shi-labs/oneformer_ade20k_swin_tiny") image = load_dataset("hf-internal-testing/fixtures_ade20k", split="test") file = image[0]["file"] outputs = image_segmenter(file, threshold=0.99) # Shortening by hashing for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.9981, "label": "grass", "mask": {"hash": "3a92904d4c", "white_pixels": 118131, "shape": (512, 683)}, }, { "score": 0.9992, "label": "sky", "mask": {"hash": "fa2300cc9a", "white_pixels": 231565, "shape": (512, 683)}, }, ], ) # Different task outputs = image_segmenter(file, threshold=0.99, subtask="instance") # Shortening by hashing for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.9991, "label": "sky", "mask": {"hash": "8b1ffad016", "white_pixels": 230566, "shape": (512, 683)}, }, { "score": 0.9981, "label": "grass", "mask": {"hash": "9bbdf83d3d", "white_pixels": 119130, "shape": (512, 683)}, }, ], ) # Different task outputs = image_segmenter(file, subtask="semantic") # Shortening by hashing for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": None, "label": "wall", "mask": {"hash": "897fb20b7f", "white_pixels": 14506, "shape": (512, 683)}, }, { "score": None, "label": "building", "mask": {"hash": "f2a68c63e4", "white_pixels": 125019, "shape": (512, 683)}, }, { "score": None, "label": "sky", "mask": {"hash": "e0ca3a548e", "white_pixels": 135330, "shape": (512, 683)}, }, { "score": None, "label": "tree", "mask": {"hash": "7c9544bcac", "white_pixels": 16263, "shape": (512, 683)}, }, { "score": None, "label": "road, route", "mask": {"hash": "2c7704e491", "white_pixels": 2143, "shape": (512, 683)}, }, { "score": None, "label": "grass", "mask": {"hash": "bf6c2867e0", "white_pixels": 53040, "shape": (512, 683)}, }, { "score": None, "label": "plant", "mask": {"hash": "93c4b7199e", "white_pixels": 3335, "shape": (512, 683)}, }, { "score": None, "label": "house", "mask": {"hash": "93ec419ad5", "white_pixels": 60, "shape": (512, 683)}, }, ], ) def test_save_load(self): model_id = "hf-internal-testing/tiny-detr-mobilenetsv3-panoptic" model = AutoModelForImageSegmentation.from_pretrained(model_id) image_processor = AutoImageProcessor.from_pretrained(model_id) image_segmenter = pipeline( task="image-segmentation", model=model, image_processor=image_processor, ) with tempfile.TemporaryDirectory() as tmpdirname: image_segmenter.save_pretrained(tmpdirname) pipeline(task="image-segmentation", model=tmpdirname)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_text_to_audio.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers import ( MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING, AutoProcessor, TextToAudioPipeline, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, require_torch, require_torch_accelerator, require_torch_or_tf, slow, torch_device, ) from transformers.trainer_utils import set_seed from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class TextToAudioPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING # for now only test text_to_waveform and not text_to_spectrogram @slow @require_torch def test_small_musicgen_pt(self): music_generator = pipeline(task="text-to-audio", model="facebook/musicgen-small", framework="pt") forward_params = { "do_sample": False, "max_new_tokens": 250, } outputs = music_generator("This is a test", forward_params=forward_params) self.assertEqual({"audio": ANY(np.ndarray), "sampling_rate": 32000}, outputs) # test two examples side-by-side outputs = music_generator(["This is a test", "This is a second test"], forward_params=forward_params) audio = [output["audio"] for output in outputs] self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio) # test batching outputs = music_generator( ["This is a test", "This is a second test"], forward_params=forward_params, batch_size=2 ) audio = [output["audio"] for output in outputs] self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio) @slow @require_torch def test_small_bark_pt(self): speech_generator = pipeline(task="text-to-audio", model="suno/bark-small", framework="pt") forward_params = { # Using `do_sample=False` to force deterministic output "do_sample": False, "semantic_max_new_tokens": 100, } outputs = speech_generator("This is a test", forward_params=forward_params) self.assertEqual( {"audio": ANY(np.ndarray), "sampling_rate": 24000}, outputs, ) # test two examples side-by-side outputs = speech_generator( ["This is a test", "This is a second test"], forward_params=forward_params, ) audio = [output["audio"] for output in outputs] self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio) # test other generation strategy forward_params = { "do_sample": True, "semantic_max_new_tokens": 100, "semantic_num_return_sequences": 2, } outputs = speech_generator("This is a test", forward_params=forward_params) audio = outputs["audio"] self.assertEqual(ANY(np.ndarray), audio) # test using a speaker embedding processor = AutoProcessor.from_pretrained("suno/bark-small") temp_inp = processor("hey, how are you?", voice_preset="v2/en_speaker_5") history_prompt = temp_inp["history_prompt"] forward_params["history_prompt"] = history_prompt outputs = speech_generator( ["This is a test", "This is a second test"], forward_params=forward_params, batch_size=2, ) audio = [output["audio"] for output in outputs] self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio) @slow @require_torch_accelerator def test_conversion_additional_tensor(self): speech_generator = pipeline(task="text-to-audio", model="suno/bark-small", framework="pt", device=torch_device) processor = AutoProcessor.from_pretrained("suno/bark-small") forward_params = { "do_sample": True, "semantic_max_new_tokens": 100, } # atm, must do to stay coherent with BarkProcessor preprocess_params = { "max_length": 256, "add_special_tokens": False, "return_attention_mask": True, "return_token_type_ids": False, "padding": "max_length", } outputs = speech_generator( "This is a test", forward_params=forward_params, preprocess_params=preprocess_params, ) temp_inp = processor("hey, how are you?", voice_preset="v2/en_speaker_5") history_prompt = temp_inp["history_prompt"] forward_params["history_prompt"] = history_prompt # history_prompt is a torch.Tensor passed as a forward_param # if generation is successful, it means that it was passed to the right device outputs = speech_generator( "This is a test", forward_params=forward_params, preprocess_params=preprocess_params ) self.assertEqual( {"audio": ANY(np.ndarray), "sampling_rate": 24000}, outputs, ) @slow @require_torch def test_vits_model_pt(self): speech_generator = pipeline(task="text-to-audio", model="facebook/mms-tts-eng", framework="pt") outputs = speech_generator("This is a test") self.assertEqual(outputs["sampling_rate"], 16000) audio = outputs["audio"] self.assertEqual(ANY(np.ndarray), audio) # test two examples side-by-side outputs = speech_generator(["This is a test", "This is a second test"]) audio = [output["audio"] for output in outputs] self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio) # test batching outputs = speech_generator(["This is a test", "This is a second test"], batch_size=2) self.assertEqual(ANY(np.ndarray), outputs[0]["audio"]) @slow @require_torch def test_forward_model_kwargs(self): # use vits - a forward model speech_generator = pipeline(task="text-to-audio", model="kakao-enterprise/vits-vctk", framework="pt") # for reproducibility set_seed(555) outputs = speech_generator("This is a test", forward_params={"speaker_id": 5}) audio = outputs["audio"] with self.assertRaises(TypeError): # assert error if generate parameter outputs = speech_generator("This is a test", forward_params={"speaker_id": 5, "do_sample": True}) forward_params = {"speaker_id": 5} generate_kwargs = {"do_sample": True} with self.assertRaises(ValueError): # assert error if generate_kwargs with forward-only models outputs = speech_generator( "This is a test", forward_params=forward_params, generate_kwargs=generate_kwargs ) self.assertTrue(np.abs(outputs["audio"] - audio).max() < 1e-5) @slow @require_torch def test_generative_model_kwargs(self): # use musicgen - a generative model music_generator = pipeline(task="text-to-audio", model="facebook/musicgen-small", framework="pt") forward_params = { "do_sample": True, "max_new_tokens": 250, } # for reproducibility set_seed(555) outputs = music_generator("This is a test", forward_params=forward_params) audio = outputs["audio"] self.assertEqual(ANY(np.ndarray), audio) # make sure generate kwargs get priority over forward params forward_params = { "do_sample": False, "max_new_tokens": 250, } generate_kwargs = {"do_sample": True} # for reproducibility set_seed(555) outputs = music_generator("This is a test", forward_params=forward_params, generate_kwargs=generate_kwargs) self.assertListEqual(outputs["audio"].tolist(), audio.tolist()) def get_test_pipeline(self, model, tokenizer, processor): speech_generator = TextToAudioPipeline(model=model, tokenizer=tokenizer) return speech_generator, ["This is a test", "Another test"] def run_pipeline_test(self, speech_generator, _): outputs = speech_generator("This is a test") self.assertEqual(ANY(np.ndarray), outputs["audio"]) forward_params = ( {"num_return_sequences": 2, "do_sample": True} if speech_generator.model.can_generate() else {} ) outputs = speech_generator(["This is great !", "Something else"], forward_params=forward_params) audio = [output["audio"] for output in outputs] self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_text2text_generation.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, Text2TextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class Text2TextGenerationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING tf_model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def get_test_pipeline(self, model, tokenizer, processor): generator = Text2TextGenerationPipeline(model=model, tokenizer=tokenizer) return generator, ["Something to write", "Something else"] def run_pipeline_test(self, generator, _): outputs = generator("Something there") self.assertEqual(outputs, [{"generated_text": ANY(str)}]) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]["generated_text"].startswith("Something there")) outputs = generator(["This is great !", "Something else"], num_return_sequences=2, do_sample=True) self.assertEqual( outputs, [ [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], ], ) outputs = generator( ["This is great !", "Something else"], num_return_sequences=2, batch_size=2, do_sample=True ) self.assertEqual( outputs, [ [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], ], ) with self.assertRaises(ValueError): generator(4) @require_torch def test_small_model_pt(self): generator = pipeline("text2text-generation", model="patrickvonplaten/t5-tiny-random", framework="pt") # do_sample=False necessary for reproducibility outputs = generator("Something there", do_sample=False) self.assertEqual(outputs, [{"generated_text": ""}]) num_return_sequences = 3 outputs = generator( "Something there", num_return_sequences=num_return_sequences, num_beams=num_return_sequences, ) target_outputs = [ {"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"}, {"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"}, {"generated_text": ""}, ] self.assertEqual(outputs, target_outputs) outputs = generator("This is a test", do_sample=True, num_return_sequences=2, return_tensors=True) self.assertEqual( outputs, [ {"generated_token_ids": ANY(torch.Tensor)}, {"generated_token_ids": ANY(torch.Tensor)}, ], ) generator.tokenizer.pad_token_id = generator.model.config.eos_token_id generator.tokenizer.pad_token = "<pad>" outputs = generator( ["This is a test", "This is a second test"], do_sample=True, num_return_sequences=2, batch_size=2, return_tensors=True, ) self.assertEqual( outputs, [ [ {"generated_token_ids": ANY(torch.Tensor)}, {"generated_token_ids": ANY(torch.Tensor)}, ], [ {"generated_token_ids": ANY(torch.Tensor)}, {"generated_token_ids": ANY(torch.Tensor)}, ], ], ) @require_tf def test_small_model_tf(self): generator = pipeline("text2text-generation", model="patrickvonplaten/t5-tiny-random", framework="tf") # do_sample=False necessary for reproducibility outputs = generator("Something there", do_sample=False) self.assertEqual(outputs, [{"generated_text": ""}])
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_question_answering.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, LxmertConfig, QuestionAnsweringPipeline, ) from transformers.data.processors.squad import SquadExample from transformers.pipelines import QuestionAnsweringArgumentHandler, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_torch_or_tf, slow, ) from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. _TO_SKIP = {"LayoutLMv2Config", "LayoutLMv3Config"} @is_pipeline_test class QAPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_QUESTION_ANSWERING_MAPPING tf_model_mapping = TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING if model_mapping is not None: model_mapping = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: tf_model_mapping = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def get_test_pipeline(self, model, tokenizer, processor): if isinstance(model.config, LxmertConfig): # This is an bimodal model, we need to find a more consistent way # to switch on those models. return None, None question_answerer = QuestionAnsweringPipeline(model, tokenizer) examples = [ {"question": "Where was HuggingFace founded ?", "context": "HuggingFace was founded in Paris."}, {"question": "In what field is HuggingFace ?", "context": "HuggingFace is an AI startup."}, ] return question_answerer, examples def run_pipeline_test(self, question_answerer, _): outputs = question_answerer( question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris." ) self.assertEqual(outputs, {"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)}) outputs = question_answerer( question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris.", handle_impossible_answer=True, ) self.assertEqual(outputs, {"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)}) outputs = question_answerer( question=["In what field is HuggingFace working ?", "In what field is HuggingFace working ?"], context="HuggingFace was founded in Paris.", ) self.assertEqual( outputs, [ {"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)}, {"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)}, ], ) outputs = question_answerer( question=["What field is HuggingFace working ?", "In what field is HuggingFace ?"], context=[ "HuggingFace is a startup based in New-York", "HuggingFace is a startup founded in Paris", ], ) self.assertEqual( outputs, [ {"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)}, {"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)}, ], ) with self.assertRaises(ValueError): question_answerer(question="", context="HuggingFace was founded in Paris.") with self.assertRaises(ValueError): question_answerer(question=None, context="HuggingFace was founded in Paris.") with self.assertRaises(ValueError): question_answerer(question="In what field is HuggingFace working ?", context="") with self.assertRaises(ValueError): question_answerer(question="In what field is HuggingFace working ?", context=None) outputs = question_answerer( question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris.", top_k=20 ) self.assertEqual( outputs, [{"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)} for i in range(20)] ) # Very long context require multiple features outputs = question_answerer( question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris." * 20 ) self.assertEqual(outputs, {"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)}) # Using batch is OK if question_answerer.tokenizer.pad_token_id is None: question_answerer.tokenizer.pad_token_id = question_answerer.model.config.eos_token_id new_outputs = question_answerer( question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris." * 20, batch_size=2 ) self.assertEqual(new_outputs, {"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)}) self.assertEqual(nested_simplify(outputs), nested_simplify(new_outputs)) @require_torch def test_small_model_pt(self): question_answerer = pipeline( "question-answering", model="sshleifer/tiny-distilbert-base-cased-distilled-squad" ) outputs = question_answerer( question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris." ) self.assertEqual(nested_simplify(outputs), {"score": 0.01, "start": 0, "end": 11, "answer": "HuggingFace"}) @require_torch def test_small_model_pt_iterator(self): # https://github.com/huggingface/transformers/issues/18510 pipe = pipeline(model="sshleifer/tiny-distilbert-base-cased-distilled-squad", batch_size=16, framework="pt") def data(): for i in range(10): yield {"question": "Where was HuggingFace founded ?", "context": "HuggingFace was founded in Paris."} for outputs in pipe(data()): self.assertEqual(nested_simplify(outputs), {"score": 0.01, "start": 0, "end": 11, "answer": "HuggingFace"}) @require_torch def test_small_model_pt_softmax_trick(self): question_answerer = pipeline( "question-answering", model="sshleifer/tiny-distilbert-base-cased-distilled-squad" ) real_postprocess = question_answerer.postprocess # Tweak start and stop to make sure we encounter the softmax logits # bug. def ensure_large_logits_postprocess( model_outputs, top_k=1, handle_impossible_answer=False, max_answer_len=15, ): for output in model_outputs: output["start"] = output["start"] * 1e6 output["end"] = output["end"] * 1e6 return real_postprocess( model_outputs, top_k=top_k, handle_impossible_answer=handle_impossible_answer, max_answer_len=max_answer_len, ) question_answerer.postprocess = ensure_large_logits_postprocess outputs = question_answerer( question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris." ) self.assertEqual(nested_simplify(outputs), {"score": 0.028, "start": 0, "end": 11, "answer": "HuggingFace"}) @slow @require_torch def test_small_model_japanese(self): question_answerer = pipeline( "question-answering", model="KoichiYasuoka/deberta-base-japanese-aozora-ud-head", ) output = question_answerer(question="国語", context="全学年にわたって小学校の国語の教科書に挿し絵が用いられている") # fmt: skip # Wrong answer, the whole text is identified as one "word" since the tokenizer does not include # a pretokenizer self.assertEqual(nested_simplify(output),{"score": 1.0, "start": 0, "end": 30, "answer": "全学年にわたって小学校の国語の教科書に挿し絵が用いられている"}) # fmt: skip # Disable word alignment output = question_answerer(question="国語", context="全学年にわたって小学校の国語の教科書に挿し絵が用いられている", align_to_words=False) # fmt: skip self.assertEqual( nested_simplify(output), {"score": 1.0, "start": 15, "end": 18, "answer": "教科書"}, ) @slow @require_torch def test_small_model_long_context_cls_slow(self): question_answerer = pipeline( "question-answering", model="deepset/roberta-base-squad2", handle_impossible_answer=True, max_seq_length=512, ) outputs = question_answerer( question="What country is Paris the capital of?", context="""London is the capital and largest city of England and the United Kingdom. It stands on the River Thames in south-east England at the head of a 50-mile (80 km) estuary down to the North Sea, and has been a major settlement for two millennia. The City of London, its ancient core and financial centre, was founded by the Romans as Londinium and retains boundaries close to its medieval ones. Since the 19th century, \"London\" has also referred to the metropolis around this core, historically split between the counties of Middlesex, Essex, Surrey, Kent, and Hertfordshire, which largely comprises Greater London, governed by the Greater London Authority. The City of Westminster, to the west of the City of London, has for centuries held the national government and parliament. As one of the world's global cities, London exerts strong influence on its arts, commerce, education, entertainment, fashion, finance, health care, media, tourism, and communications, and has sometimes been called the capital of the world. Its GDP (€801.66 billion in 2017) makes it the biggest urban economy in Europe, and it is one of the major financial centres in the world. In 2019 it had the second-highest number of ultra high-net-worth individuals in Europe after Paris and the second-highest number of billionaires in Europe after Moscow. As of 2021, London has the most millionaires of any city. With Europe's largest concentration of higher education institutions, it includes Imperial College London in natural and applied sciences, the London School of Economics in social sciences, and the comprehensive University College London. The city is home to the most 5-star hotels of any city in the world. In 2012, London became the first city to host three Summer Olympic Games. London is the capital and largest city of England and the United Kingdom. It stands on the River Thames in south-east England at the head of a 50-mile (80 km) estuary down to the North Sea, and has been a major settlement for two millennia. The City of London, its ancient core and financial centre, was founded by the Romans as Londinium and retains boundaries close to its medieval ones. Since the 19th century, \"London\" has also referred to the metropolis around this core, historically split between the counties of Middlesex, Essex, Surrey, Kent, and Hertfordshire, which largely comprises Greater London, governed by the Greater London Authority. The City of Westminster, to the west of the City of London, has for centuries held the national government and parliament. As one of the world's global cities, London exerts strong influence on its arts, commerce, education, entertainment, fashion, finance, health care, media, tourism, and communications, and has sometimes been called the capital of the world. Its GDP (€801.66 billion in 2017) makes it the biggest urban economy in Europe, and it is one of the major financial centres in the world. In 2019 it had the second-highest number of ultra high-net-worth individuals in Europe after Paris and the second-highest number of billionaires in Europe after Moscow. As of 2021, London has the most millionaires of any city. With Europe's largest concentration of higher education institutions, it includes Imperial College London in natural and applied sciences, the London School of Economics in social sciences, and the comprehensive University College London. The city is home to the most 5-star hotels of any city in the world. In 2012, London became the first city to host three Summer Olympic Games.""", ) self.assertEqual(nested_simplify(outputs), {"score": 0.988, "start": 0, "end": 0, "answer": ""}) @require_tf def test_small_model_tf(self): question_answerer = pipeline( "question-answering", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="tf" ) outputs = question_answerer( question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris." ) self.assertEqual(nested_simplify(outputs), {"score": 0.011, "start": 0, "end": 11, "answer": "HuggingFace"}) @slow @require_torch def test_large_model_pt(self): question_answerer = pipeline( "question-answering", ) outputs = question_answerer( question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris." ) self.assertEqual(nested_simplify(outputs), {"score": 0.979, "start": 27, "end": 32, "answer": "Paris"}) @slow @require_torch def test_large_model_issue(self): qa_pipeline = pipeline( "question-answering", model="mrm8488/bert-multi-cased-finetuned-xquadv1", ) outputs = qa_pipeline( { "context": ( "Yes Bank founder Rana Kapoor has approached the Bombay High Court, challenging a special court's" " order from August this year that had remanded him in police custody for a week in a multi-crore" " loan fraud case. Kapoor, who is currently lodged in Taloja Jail, is an accused in the loan fraud" " case and some related matters being probed by the CBI and Enforcement Directorate. A single" " bench presided over by Justice S K Shinde on Tuesday posted the plea for further hearing on" " October 14. In his plea filed through advocate Vijay Agarwal, Kapoor claimed that the special" " court's order permitting the CBI's request for police custody on August 14 was illegal and in" " breach of the due process of law. Therefore, his police custody and subsequent judicial custody" " in the case were all illegal. Kapoor has urged the High Court to quash and set aside the special" " court's order dated August 14. As per his plea, in August this year, the CBI had moved two" " applications before the special court, one seeking permission to arrest Kapoor, who was already" " in judicial custody at the time in another case, and the other, seeking his police custody." " While the special court refused to grant permission to the CBI to arrest Kapoor, it granted the" " central agency's plea for his custody. Kapoor, however, said in his plea that before filing an" " application for his arrest, the CBI had not followed the process of issuing him a notice under" " Section 41 of the CrPC for appearance before it. He further said that the CBI had not taken" " prior sanction as mandated under section 17 A of the Prevention of Corruption Act for" " prosecuting him. The special court, however, had said in its order at the time that as Kapoor" " was already in judicial custody in another case and was not a free man the procedure mandated" " under Section 41 of the CrPC need not have been adhered to as far as issuing a prior notice of" " appearance was concerned. ADVERTISING It had also said that case records showed that the" " investigating officer had taken an approval from a managing director of Yes Bank before" " beginning the proceedings against Kapoor and such a permission was a valid sanction. However," " Kapoor in his plea said that the above order was bad in law and sought that it be quashed and" " set aside. The law mandated that if initial action was not in consonance with legal procedures," " then all subsequent actions must be held as illegal, he said, urging the High Court to declare" " the CBI remand and custody and all subsequent proceedings including the further custody as" " illegal and void ab-initio. In a separate plea before the High Court, Kapoor's daughter Rakhee" " Kapoor-Tandon has sought exemption from in-person appearance before a special PMLA court. Rakhee" " has stated that she is a resident of the United Kingdom and is unable to travel to India owing" " to restrictions imposed due to the COVID-19 pandemic. According to the CBI, in the present case," " Kapoor had obtained a gratification or pecuniary advantage of ₹ 307 crore, and thereby caused" " Yes Bank a loss of ₹ 1,800 crore by extending credit facilities to Avantha Group, when it was" " not eligible for the same" ), "question": "Is this person invovled in fraud?", } ) self.assertEqual( nested_simplify(outputs), {"answer": "an accused in the loan fraud case", "end": 294, "score": 0.001, "start": 261}, ) @slow @require_torch def test_large_model_course(self): question_answerer = pipeline("question-answering") long_context = """ 🤗 Transformers: State of the Art NLP 🤗 Transformers provides thousands of pretrained models to perform tasks on texts such as classification, information extraction, question answering, summarization, translation, text generation and more in over 100 languages. Its aim is to make cutting-edge NLP easier to use for everyone. 🤗 Transformers provides APIs to quickly download and use those pretrained models on a given text, fine-tune them on your own datasets and then share them with the community on our model hub. At the same time, each python module defining an architecture is fully standalone and can be modified to enable quick research experiments. Why should I use transformers? 1. Easy-to-use state-of-the-art models: - High performance on NLU and NLG tasks. - Low barrier to entry for educators and practitioners. - Few user-facing abstractions with just three classes to learn. - A unified API for using all our pretrained models. - Lower compute costs, smaller carbon footprint: 2. Researchers can share trained models instead of always retraining. - Practitioners can reduce compute time and production costs. - Dozens of architectures with over 10,000 pretrained models, some in more than 100 languages. 3. Choose the right framework for every part of a model's lifetime: - Train state-of-the-art models in 3 lines of code. - Move a single model between TF2.0/PyTorch frameworks at will. - Seamlessly pick the right framework for training, evaluation and production. 4. Easily customize a model or an example to your needs: - We provide examples for each architecture to reproduce the results published by its original authors. - Model internals are exposed as consistently as possible. - Model files can be used independently of the library for quick experiments. 🤗 Transformers is backed by the three most popular deep learning libraries — Jax, PyTorch and TensorFlow — with a seamless integration between them. It's straightforward to train your models with one before loading them for inference with the other. """ question = "Which deep learning libraries back 🤗 Transformers?" outputs = question_answerer(question=question, context=long_context) self.assertEqual( nested_simplify(outputs), {"answer": "Jax, PyTorch and TensorFlow", "end": 1919, "score": 0.971, "start": 1892}, ) @slow @require_tf def test_large_model_tf(self): question_answerer = pipeline("question-answering", framework="tf") outputs = question_answerer( question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris." ) self.assertEqual(nested_simplify(outputs), {"score": 0.979, "start": 27, "end": 32, "answer": "Paris"}) @require_torch_or_tf class QuestionAnsweringArgumentHandlerTests(unittest.TestCase): def test_argument_handler(self): qa = QuestionAnsweringArgumentHandler() Q = "Where was HuggingFace founded ?" C = "HuggingFace was founded in Paris" normalized = qa(Q, C) self.assertEqual(type(normalized), list) self.assertEqual(len(normalized), 1) self.assertEqual({type(el) for el in normalized}, {SquadExample}) normalized = qa(question=Q, context=C) self.assertEqual(type(normalized), list) self.assertEqual(len(normalized), 1) self.assertEqual({type(el) for el in normalized}, {SquadExample}) normalized = qa(question=Q, context=C) self.assertEqual(type(normalized), list) self.assertEqual(len(normalized), 1) self.assertEqual({type(el) for el in normalized}, {SquadExample}) normalized = qa(question=[Q, Q], context=C) self.assertEqual(type(normalized), list) self.assertEqual(len(normalized), 2) self.assertEqual({type(el) for el in normalized}, {SquadExample}) normalized = qa({"question": Q, "context": C}) self.assertEqual(type(normalized), list) self.assertEqual(len(normalized), 1) self.assertEqual({type(el) for el in normalized}, {SquadExample}) normalized = qa([{"question": Q, "context": C}]) self.assertEqual(type(normalized), list) self.assertEqual(len(normalized), 1) self.assertEqual({type(el) for el in normalized}, {SquadExample}) normalized = qa([{"question": Q, "context": C}, {"question": Q, "context": C}]) self.assertEqual(type(normalized), list) self.assertEqual(len(normalized), 2) self.assertEqual({type(el) for el in normalized}, {SquadExample}) normalized = qa(X={"question": Q, "context": C}) self.assertEqual(type(normalized), list) self.assertEqual(len(normalized), 1) self.assertEqual({type(el) for el in normalized}, {SquadExample}) normalized = qa(X=[{"question": Q, "context": C}]) self.assertEqual(type(normalized), list) self.assertEqual(len(normalized), 1) self.assertEqual({type(el) for el in normalized}, {SquadExample}) normalized = qa(data={"question": Q, "context": C}) self.assertEqual(type(normalized), list) self.assertEqual(len(normalized), 1) self.assertEqual({type(el) for el in normalized}, {SquadExample}) def test_argument_handler_error_handling(self): qa = QuestionAnsweringArgumentHandler() Q = "Where was HuggingFace founded ?" C = "HuggingFace was founded in Paris" with self.assertRaises(KeyError): qa({"context": C}) with self.assertRaises(KeyError): qa({"question": Q}) with self.assertRaises(KeyError): qa([{"context": C}]) with self.assertRaises(ValueError): qa(None, C) with self.assertRaises(ValueError): qa("", C) with self.assertRaises(ValueError): qa(Q, None) with self.assertRaises(ValueError): qa(Q, "") with self.assertRaises(ValueError): qa(question=None, context=C) with self.assertRaises(ValueError): qa(question="", context=C) with self.assertRaises(ValueError): qa(question=Q, context=None) with self.assertRaises(ValueError): qa(question=Q, context="") with self.assertRaises(ValueError): qa({"question": None, "context": C}) with self.assertRaises(ValueError): qa({"question": "", "context": C}) with self.assertRaises(ValueError): qa({"question": Q, "context": None}) with self.assertRaises(ValueError): qa({"question": Q, "context": ""}) with self.assertRaises(ValueError): qa([{"question": Q, "context": C}, {"question": None, "context": C}]) with self.assertRaises(ValueError): qa([{"question": Q, "context": C}, {"question": "", "context": C}]) with self.assertRaises(ValueError): qa([{"question": Q, "context": C}, {"question": Q, "context": None}]) with self.assertRaises(ValueError): qa([{"question": Q, "context": C}, {"question": Q, "context": ""}]) with self.assertRaises(ValueError): qa(question={"This": "Is weird"}, context="This is a context") with self.assertRaises(ValueError): qa(question=[Q, Q], context=[C, C, C]) with self.assertRaises(ValueError): qa(question=[Q, Q, Q], context=[C, C]) def test_argument_handler_old_format(self): qa = QuestionAnsweringArgumentHandler() Q = "Where was HuggingFace founded ?" C = "HuggingFace was founded in Paris" # Backward compatibility for this normalized = qa(question=[Q, Q], context=[C, C]) self.assertEqual(type(normalized), list) self.assertEqual(len(normalized), 2) self.assertEqual({type(el) for el in normalized}, {SquadExample}) def test_argument_handler_error_handling_odd(self): qa = QuestionAnsweringArgumentHandler() with self.assertRaises(ValueError): qa(None) with self.assertRaises(ValueError): qa(Y=None) with self.assertRaises(ValueError): qa(1)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_document_question_answering.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available from transformers.pipelines import pipeline from transformers.pipelines.document_question_answering import apply_tesseract from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_detectron2, require_pytesseract, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image from transformers.image_utils import load_image else: class Image: @staticmethod def open(*args, **kwargs): pass def load_image(_): return None # This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace, # so we can expect it to be available. INVOICE_URL = ( "https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png" ) @is_pipeline_test @require_torch @require_vision class DocumentQuestionAnsweringPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING @require_pytesseract @require_vision def get_test_pipeline(self, model, tokenizer, processor): dqa_pipeline = pipeline( "document-question-answering", model=model, tokenizer=tokenizer, image_processor=processor ) image = INVOICE_URL word_boxes = list(zip(*apply_tesseract(load_image(image), None, ""))) question = "What is the placebo?" examples = [ { "image": load_image(image), "question": question, }, { "image": image, "question": question, }, { "image": image, "question": question, "word_boxes": word_boxes, }, ] return dqa_pipeline, examples def run_pipeline_test(self, dqa_pipeline, examples): outputs = dqa_pipeline(examples, top_k=2) self.assertEqual( outputs, [ [ {"score": ANY(float), "answer": ANY(str), "start": ANY(int), "end": ANY(int)}, {"score": ANY(float), "answer": ANY(str), "start": ANY(int), "end": ANY(int)}, ] ] * 3, ) @require_torch @require_detectron2 @require_pytesseract def test_small_model_pt(self): dqa_pipeline = pipeline("document-question-answering", model="hf-internal-testing/tiny-random-layoutlmv2") image = INVOICE_URL question = "How many cats are there?" expected_output = [ {"score": 0.0001, "answer": "oy 2312/2019", "start": 38, "end": 39}, {"score": 0.0001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40}, ] outputs = dqa_pipeline(image=image, question=question, top_k=2) self.assertEqual(nested_simplify(outputs, decimals=4), expected_output) outputs = dqa_pipeline({"image": image, "question": question}, top_k=2) self.assertEqual(nested_simplify(outputs, decimals=4), expected_output) # This image does not detect ANY text in it, meaning layoutlmv2 should fail. # Empty answer probably image = "./tests/fixtures/tests_samples/COCO/000000039769.png" outputs = dqa_pipeline(image=image, question=question, top_k=2) self.assertEqual(outputs, []) # We can optionnally pass directly the words and bounding boxes image = "./tests/fixtures/tests_samples/COCO/000000039769.png" words = [] boxes = [] outputs = dqa_pipeline(image=image, question=question, words=words, boxes=boxes, top_k=2) self.assertEqual(outputs, []) # TODO: Enable this once hf-internal-testing/tiny-random-donut is implemented # @require_torch # def test_small_model_pt_donut(self): # dqa_pipeline = pipeline("document-question-answering", model="hf-internal-testing/tiny-random-donut") # # dqa_pipeline = pipeline("document-question-answering", model="../tiny-random-donut") # image = "https://templates.invoicehome.com/invoice-template-us-neat-750px.png" # question = "How many cats are there?" # # outputs = dqa_pipeline(image=image, question=question, top_k=2) # self.assertEqual( # nested_simplify(outputs, decimals=4), [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] # ) @slow @require_torch @require_detectron2 @require_pytesseract def test_large_model_pt(self): dqa_pipeline = pipeline( "document-question-answering", model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa", revision="9977165", ) image = INVOICE_URL question = "What is the invoice number?" outputs = dqa_pipeline(image=image, question=question, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0009, "answer": "us-001", "start": 16, "end": 16}, ], ) outputs = dqa_pipeline({"image": image, "question": question}, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0009, "answer": "us-001", "start": 16, "end": 16}, ], ) outputs = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.9944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0009, "answer": "us-001", "start": 16, "end": 16}, ], ] * 2, ) @slow @require_torch @require_detectron2 @require_pytesseract def test_large_model_pt_chunk(self): dqa_pipeline = pipeline( "document-question-answering", model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa", revision="9977165", max_seq_len=50, ) image = INVOICE_URL question = "What is the invoice number?" outputs = dqa_pipeline(image=image, question=question, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9948, "answer": "us-001", "start": 16, "end": 16}, ], ) outputs = dqa_pipeline({"image": image, "question": question}, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9948, "answer": "us-001", "start": 16, "end": 16}, ], ) outputs = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9948, "answer": "us-001", "start": 16, "end": 16}, ] ] * 2, ) @slow @require_torch @require_pytesseract @require_vision def test_large_model_pt_layoutlm(self): tokenizer = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=True ) dqa_pipeline = pipeline( "document-question-answering", model="impira/layoutlm-document-qa", tokenizer=tokenizer, revision="3dc6de3", ) image = INVOICE_URL question = "What is the invoice number?" outputs = dqa_pipeline(image=image, question=question, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.4251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23}, ], ) outputs = dqa_pipeline({"image": image, "question": question}, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.4251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23}, ], ) outputs = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.4251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23}, ] ] * 2, ) word_boxes = list(zip(*apply_tesseract(load_image(image), None, ""))) # This model should also work if `image` is set to None outputs = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.4251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23}, ], ) @slow @require_torch @require_pytesseract @require_vision def test_large_model_pt_layoutlm_chunk(self): tokenizer = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=True ) dqa_pipeline = pipeline( "document-question-answering", model="impira/layoutlm-document-qa", tokenizer=tokenizer, revision="3dc6de3", max_seq_len=50, ) image = INVOICE_URL question = "What is the invoice number?" outputs = dqa_pipeline(image=image, question=question, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9998, "answer": "us-001", "start": 16, "end": 16}, ], ) outputs = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.9999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9998, "answer": "us-001", "start": 16, "end": 16}, ] ] * 2, ) word_boxes = list(zip(*apply_tesseract(load_image(image), None, ""))) # This model should also work if `image` is set to None outputs = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9998, "answer": "us-001", "start": 16, "end": 16}, ], ) @slow @require_torch def test_large_model_pt_donut(self): dqa_pipeline = pipeline( "document-question-answering", model="naver-clova-ix/donut-base-finetuned-docvqa", tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa"), feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa", ) image = INVOICE_URL question = "What is the invoice number?" outputs = dqa_pipeline(image=image, question=question, top_k=2) self.assertEqual(nested_simplify(outputs, decimals=4), [{"answer": "us-001"}]) @require_tf @unittest.skip("Document question answering not implemented in TF") def test_small_model_tf(self): pass
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_summarization.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, SummarizationPipeline, TFPreTrainedModel, pipeline, ) from transformers.testing_utils import get_gpu_count, is_pipeline_test, require_tf, require_torch, slow, torch_device from transformers.tokenization_utils import TruncationStrategy from .test_pipelines_common import ANY @is_pipeline_test class SummarizationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING tf_model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def get_test_pipeline(self, model, tokenizer, processor): summarizer = SummarizationPipeline(model=model, tokenizer=tokenizer) return summarizer, ["(CNN)The Palestinian Authority officially became", "Some other text"] def run_pipeline_test(self, summarizer, _): model = summarizer.model outputs = summarizer("(CNN)The Palestinian Authority officially became") self.assertEqual(outputs, [{"summary_text": ANY(str)}]) outputs = summarizer( "(CNN)The Palestinian Authority officially became ", num_beams=2, min_length=2, max_length=5, ) self.assertEqual(outputs, [{"summary_text": ANY(str)}]) # Some models (Switch Transformers, LED, T5, LongT5, etc) can handle long sequences. model_can_handle_longer_seq = [ "SwitchTransformersConfig", "T5Config", "LongT5Config", "LEDConfig", "PegasusXConfig", "FSMTConfig", "M2M100Config", "ProphetNetConfig", # positional embeddings up to a fixed maximum size (otherwise clamping the values) ] if model.config.__class__.__name__ not in model_can_handle_longer_seq: # Too long and exception is expected. # For TF models, if the weights are initialized in GPU context, we won't get expected index error from # the embedding layer. if not ( isinstance(model, TFPreTrainedModel) and get_gpu_count() > 0 and len(summarizer.model.trainable_weights) > 0 ): with self.assertRaises(Exception): outputs = summarizer("This " * 1000) outputs = summarizer("This " * 1000, truncation=TruncationStrategy.ONLY_FIRST) @require_torch def test_small_model_pt(self): summarizer = pipeline(task="summarization", model="sshleifer/tiny-mbart", framework="pt") outputs = summarizer("This is a small test") self.assertEqual( outputs, [ { "summary_text": "เข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไป" } ], ) @require_tf def test_small_model_tf(self): summarizer = pipeline(task="summarization", model="sshleifer/tiny-mbart", framework="tf") outputs = summarizer("This is a small test") self.assertEqual( outputs, [ { "summary_text": "เข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไป" } ], ) @require_torch @slow def test_integration_torch_summarization(self): summarizer = pipeline(task="summarization", device=torch_device) cnn_article = ( " (CNN)The Palestinian Authority officially became the 123rd member of the International Criminal Court on" " Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The" " formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is based." " The Palestinians signed the ICC's founding Rome Statute in January, when they also accepted its" ' jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including East' ' Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination into the' " situation in Palestinian territories, paving the way for possible war crimes investigations against" " Israelis. As members of the court, Palestinians may be subject to counter-charges as well. Israel and" " the United States, neither of which is an ICC member, opposed the Palestinians' efforts to join the" " body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday's ceremony, said it was a" ' move toward greater justice. "As Palestine formally becomes a State Party to the Rome Statute today, the' ' world is also a step closer to ending a long era of impunity and injustice," he said, according to an' ' ICC news release. "Indeed, today brings us closer to our shared goals of justice and peace." Judge' " Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was just the first step for the" ' Palestinians. "As the Rome Statute today enters into force for the State of Palestine, Palestine' " acquires all the rights as well as responsibilities that come with being a State Party to the Statute." ' These are substantive commitments, which cannot be taken lightly," she said. Rights group Human Rights' ' Watch welcomed the development. "Governments seeking to penalize Palestine for joining the ICC should' " immediately end their pressure, and countries that support universal acceptance of the court's treaty" ' should speak out to welcome its membership," said Balkees Jarrah, international justice counsel for the' " group. \"What's objectionable is the attempts to undermine international justice, not Palestine's" ' decision to join a treaty to which over 100 countries around the world are members." In January, when' " the preliminary ICC examination was opened, Israeli Prime Minister Benjamin Netanyahu described it as an" ' outrage, saying the court was overstepping its boundaries. The United States also said it "strongly"' " disagreed with the court's decision. \"As we have said repeatedly, we do not believe that Palestine is a" ' state and therefore we do not believe that it is eligible to join the ICC," the State Department said in' ' a statement. It urged the warring sides to resolve their differences through direct negotiations. "We' ' will continue to oppose actions against Israel at the ICC as counterproductive to the cause of peace,"' " it said. But the ICC begs to differ with the definition of a state for its purposes and refers to the" ' territories as "Palestine." While a preliminary examination is not a formal investigation, it allows the' " court to review evidence and determine whether to investigate suspects on both sides. Prosecutor Fatou" ' Bensouda said her office would "conduct its analysis in full independence and impartiality." The war' " between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The inquiry" " will include alleged war crimes committed since June. The International Criminal Court was set up in" " 2002 to prosecute genocide, crimes against humanity and war crimes. CNN's Vasco Cotovio, Kareem Khadder" " and Faith Karimi contributed to this report." ) expected_cnn_summary = ( " The Palestinian Authority becomes the 123rd member of the International Criminal Court . The move gives" " the court jurisdiction over alleged crimes in Palestinian territories . Israel and the United States" " opposed the Palestinians' efforts to join the court . Rights group Human Rights Watch welcomes the move," " says governments seeking to penalize Palestine should end pressure ." ) result = summarizer(cnn_article) self.assertEqual(result[0]["summary_text"], expected_cnn_summary)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_translation.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import pytest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, MBart50TokenizerFast, MBartConfig, MBartForConditionalGeneration, TranslationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch, slow from .test_pipelines_common import ANY @is_pipeline_test class TranslationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING tf_model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def get_test_pipeline(self, model, tokenizer, processor): if isinstance(model.config, MBartConfig): src_lang, tgt_lang = list(tokenizer.lang_code_to_id.keys())[:2] translator = TranslationPipeline(model=model, tokenizer=tokenizer, src_lang=src_lang, tgt_lang=tgt_lang) else: translator = TranslationPipeline(model=model, tokenizer=tokenizer) return translator, ["Some string", "Some other text"] def run_pipeline_test(self, translator, _): outputs = translator("Some string") self.assertEqual(outputs, [{"translation_text": ANY(str)}]) outputs = translator(["Some string"]) self.assertEqual(outputs, [{"translation_text": ANY(str)}]) outputs = translator(["Some string", "other string"]) self.assertEqual(outputs, [{"translation_text": ANY(str)}, {"translation_text": ANY(str)}]) @require_torch def test_small_model_pt(self): translator = pipeline("translation_en_to_ro", model="patrickvonplaten/t5-tiny-random", framework="pt") outputs = translator("This is a test string", max_length=20) self.assertEqual( outputs, [ { "translation_text": ( "Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide" " Beide Beide" ) } ], ) @require_tf def test_small_model_tf(self): translator = pipeline("translation_en_to_ro", model="patrickvonplaten/t5-tiny-random", framework="tf") outputs = translator("This is a test string", max_length=20) self.assertEqual( outputs, [ { "translation_text": ( "Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide" " Beide Beide" ) } ], ) @require_torch def test_en_to_de_pt(self): translator = pipeline("translation_en_to_de", model="patrickvonplaten/t5-tiny-random", framework="pt") outputs = translator("This is a test string", max_length=20) self.assertEqual( outputs, [ { "translation_text": ( "monoton monoton monoton monoton monoton monoton monoton monoton monoton monoton urine urine" " urine urine urine urine urine urine urine" ) } ], ) @require_tf def test_en_to_de_tf(self): translator = pipeline("translation_en_to_de", model="patrickvonplaten/t5-tiny-random", framework="tf") outputs = translator("This is a test string", max_length=20) self.assertEqual( outputs, [ { "translation_text": ( "monoton monoton monoton monoton monoton monoton monoton monoton monoton monoton urine urine" " urine urine urine urine urine urine urine" ) } ], ) class TranslationNewFormatPipelineTests(unittest.TestCase): @require_torch @slow def test_default_translations(self): # We don't provide a default for this pair with self.assertRaises(ValueError): pipeline(task="translation_cn_to_ar") # but we do for this one translator = pipeline(task="translation_en_to_de") self.assertEqual(translator._preprocess_params["src_lang"], "en") self.assertEqual(translator._preprocess_params["tgt_lang"], "de") @require_torch @slow def test_multilingual_translation(self): model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-many-to-many-mmt") tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50-many-to-many-mmt") translator = pipeline(task="translation", model=model, tokenizer=tokenizer) # Missing src_lang, tgt_lang with self.assertRaises(ValueError): translator("This is a test") outputs = translator("This is a test", src_lang="en_XX", tgt_lang="ar_AR") self.assertEqual(outputs, [{"translation_text": "هذا إختبار"}]) outputs = translator("This is a test", src_lang="en_XX", tgt_lang="hi_IN") self.assertEqual(outputs, [{"translation_text": "यह एक परीक्षण है"}]) # src_lang, tgt_lang can be defined at pipeline call time translator = pipeline(task="translation", model=model, tokenizer=tokenizer, src_lang="en_XX", tgt_lang="ar_AR") outputs = translator("This is a test") self.assertEqual(outputs, [{"translation_text": "هذا إختبار"}]) @require_torch def test_translation_on_odd_language(self): model = "patrickvonplaten/t5-tiny-random" translator = pipeline(task="translation_cn_to_ar", model=model) self.assertEqual(translator._preprocess_params["src_lang"], "cn") self.assertEqual(translator._preprocess_params["tgt_lang"], "ar") @require_torch def test_translation_default_language_selection(self): model = "patrickvonplaten/t5-tiny-random" with pytest.warns(UserWarning, match=r".*translation_en_to_de.*"): translator = pipeline(task="translation", model=model) self.assertEqual(translator.task, "translation_en_to_de") self.assertEqual(translator._preprocess_params["src_lang"], "en") self.assertEqual(translator._preprocess_params["tgt_lang"], "de") @require_torch def test_translation_with_no_language_no_model_fails(self): with self.assertRaises(ValueError): pipeline(task="translation")
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_token_classification.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers import ( MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, AutoModelForTokenClassification, AutoTokenizer, TokenClassificationPipeline, pipeline, ) from transformers.pipelines import AggregationStrategy, TokenClassificationArgumentHandler from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_torch_accelerator, slow, torch_device, ) from .test_pipelines_common import ANY VALID_INPUTS = ["A simple string", ["list of strings", "A simple string that is quite a bit longer"]] # These 2 model types require different inputs than those of the usual text models. _TO_SKIP = {"LayoutLMv2Config", "LayoutLMv3Config"} @is_pipeline_test class TokenClassificationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING tf_model_mapping = TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING if model_mapping is not None: model_mapping = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: tf_model_mapping = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def get_test_pipeline(self, model, tokenizer, processor): token_classifier = TokenClassificationPipeline(model=model, tokenizer=tokenizer) return token_classifier, ["A simple string", "A simple string that is quite a bit longer"] def run_pipeline_test(self, token_classifier, _): model = token_classifier.model tokenizer = token_classifier.tokenizer if not tokenizer.is_fast: return # Slow tokenizers do not return offsets mappings, so this test will fail outputs = token_classifier("A simple string") self.assertIsInstance(outputs, list) n = len(outputs) self.assertEqual( nested_simplify(outputs), [ { "entity": ANY(str), "score": ANY(float), "start": ANY(int), "end": ANY(int), "index": ANY(int), "word": ANY(str), } for i in range(n) ], ) outputs = token_classifier(["list of strings", "A simple string that is quite a bit longer"]) self.assertIsInstance(outputs, list) self.assertEqual(len(outputs), 2) n = len(outputs[0]) m = len(outputs[1]) self.assertEqual( nested_simplify(outputs), [ [ { "entity": ANY(str), "score": ANY(float), "start": ANY(int), "end": ANY(int), "index": ANY(int), "word": ANY(str), } for i in range(n) ], [ { "entity": ANY(str), "score": ANY(float), "start": ANY(int), "end": ANY(int), "index": ANY(int), "word": ANY(str), } for i in range(m) ], ], ) self.run_aggregation_strategy(model, tokenizer) def run_aggregation_strategy(self, model, tokenizer): token_classifier = TokenClassificationPipeline(model=model, tokenizer=tokenizer, aggregation_strategy="simple") self.assertEqual(token_classifier._postprocess_params["aggregation_strategy"], AggregationStrategy.SIMPLE) outputs = token_classifier("A simple string") self.assertIsInstance(outputs, list) n = len(outputs) self.assertEqual( nested_simplify(outputs), [ { "entity_group": ANY(str), "score": ANY(float), "start": ANY(int), "end": ANY(int), "word": ANY(str), } for i in range(n) ], ) token_classifier = TokenClassificationPipeline(model=model, tokenizer=tokenizer, aggregation_strategy="first") self.assertEqual(token_classifier._postprocess_params["aggregation_strategy"], AggregationStrategy.FIRST) outputs = token_classifier("A simple string") self.assertIsInstance(outputs, list) n = len(outputs) self.assertEqual( nested_simplify(outputs), [ { "entity_group": ANY(str), "score": ANY(float), "start": ANY(int), "end": ANY(int), "word": ANY(str), } for i in range(n) ], ) token_classifier = TokenClassificationPipeline(model=model, tokenizer=tokenizer, aggregation_strategy="max") self.assertEqual(token_classifier._postprocess_params["aggregation_strategy"], AggregationStrategy.MAX) outputs = token_classifier("A simple string") self.assertIsInstance(outputs, list) n = len(outputs) self.assertEqual( nested_simplify(outputs), [ { "entity_group": ANY(str), "score": ANY(float), "start": ANY(int), "end": ANY(int), "word": ANY(str), } for i in range(n) ], ) token_classifier = TokenClassificationPipeline( model=model, tokenizer=tokenizer, aggregation_strategy="average" ) self.assertEqual(token_classifier._postprocess_params["aggregation_strategy"], AggregationStrategy.AVERAGE) outputs = token_classifier("A simple string") self.assertIsInstance(outputs, list) n = len(outputs) self.assertEqual( nested_simplify(outputs), [ { "entity_group": ANY(str), "score": ANY(float), "start": ANY(int), "end": ANY(int), "word": ANY(str), } for i in range(n) ], ) with self.assertWarns(UserWarning): token_classifier = pipeline(task="ner", model=model, tokenizer=tokenizer, grouped_entities=True) self.assertEqual(token_classifier._postprocess_params["aggregation_strategy"], AggregationStrategy.SIMPLE) with self.assertWarns(UserWarning): token_classifier = pipeline( task="ner", model=model, tokenizer=tokenizer, grouped_entities=True, ignore_subwords=True ) self.assertEqual(token_classifier._postprocess_params["aggregation_strategy"], AggregationStrategy.FIRST) @slow @require_torch def test_chunking(self): NER_MODEL = "elastic/distilbert-base-uncased-finetuned-conll03-english" model = AutoModelForTokenClassification.from_pretrained(NER_MODEL) tokenizer = AutoTokenizer.from_pretrained(NER_MODEL, use_fast=True) tokenizer.model_max_length = 10 stride = 5 sentence = ( "Hugging Face, Inc. is a French company that develops tools for building applications using machine learning. " "The company, based in New York City was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf." ) token_classifier = TokenClassificationPipeline( model=model, tokenizer=tokenizer, aggregation_strategy="simple", stride=stride ) output = token_classifier(sentence) self.assertEqual( nested_simplify(output), [ {"entity_group": "ORG", "score": 0.978, "word": "hugging face, inc.", "start": 0, "end": 18}, {"entity_group": "MISC", "score": 0.999, "word": "french", "start": 24, "end": 30}, {"entity_group": "LOC", "score": 0.997, "word": "new york city", "start": 131, "end": 144}, {"entity_group": "MISC", "score": 0.999, "word": "french", "start": 168, "end": 174}, {"entity_group": "PER", "score": 0.999, "word": "clement delangue", "start": 189, "end": 205}, {"entity_group": "PER", "score": 0.999, "word": "julien chaumond", "start": 207, "end": 222}, {"entity_group": "PER", "score": 0.999, "word": "thomas wolf", "start": 228, "end": 239}, ], ) token_classifier = TokenClassificationPipeline( model=model, tokenizer=tokenizer, aggregation_strategy="first", stride=stride ) output = token_classifier(sentence) self.assertEqual( nested_simplify(output), [ {"entity_group": "ORG", "score": 0.978, "word": "hugging face, inc.", "start": 0, "end": 18}, {"entity_group": "MISC", "score": 0.999, "word": "french", "start": 24, "end": 30}, {"entity_group": "LOC", "score": 0.997, "word": "new york city", "start": 131, "end": 144}, {"entity_group": "MISC", "score": 0.999, "word": "french", "start": 168, "end": 174}, {"entity_group": "PER", "score": 0.999, "word": "clement delangue", "start": 189, "end": 205}, {"entity_group": "PER", "score": 0.999, "word": "julien chaumond", "start": 207, "end": 222}, {"entity_group": "PER", "score": 0.999, "word": "thomas wolf", "start": 228, "end": 239}, ], ) token_classifier = TokenClassificationPipeline( model=model, tokenizer=tokenizer, aggregation_strategy="max", stride=stride ) output = token_classifier(sentence) self.assertEqual( nested_simplify(output), [ {"entity_group": "ORG", "score": 0.978, "word": "hugging face, inc.", "start": 0, "end": 18}, {"entity_group": "MISC", "score": 0.999, "word": "french", "start": 24, "end": 30}, {"entity_group": "LOC", "score": 0.997, "word": "new york city", "start": 131, "end": 144}, {"entity_group": "MISC", "score": 0.999, "word": "french", "start": 168, "end": 174}, {"entity_group": "PER", "score": 0.999, "word": "clement delangue", "start": 189, "end": 205}, {"entity_group": "PER", "score": 0.999, "word": "julien chaumond", "start": 207, "end": 222}, {"entity_group": "PER", "score": 0.999, "word": "thomas wolf", "start": 228, "end": 239}, ], ) token_classifier = TokenClassificationPipeline( model=model, tokenizer=tokenizer, aggregation_strategy="average", stride=stride ) output = token_classifier(sentence) self.assertEqual( nested_simplify(output), [ {"entity_group": "ORG", "score": 0.978, "word": "hugging face, inc.", "start": 0, "end": 18}, {"entity_group": "MISC", "score": 0.999, "word": "french", "start": 24, "end": 30}, {"entity_group": "LOC", "score": 0.997, "word": "new york city", "start": 131, "end": 144}, {"entity_group": "MISC", "score": 0.999, "word": "french", "start": 168, "end": 174}, {"entity_group": "PER", "score": 0.999, "word": "clement delangue", "start": 189, "end": 205}, {"entity_group": "PER", "score": 0.999, "word": "julien chaumond", "start": 207, "end": 222}, {"entity_group": "PER", "score": 0.999, "word": "thomas wolf", "start": 228, "end": 239}, ], ) @require_torch def test_chunking_fast(self): # Note: We cannot run the test on "conflicts" on the chunking. # The problem is that the model is random, and thus the results do heavily # depend on the chunking, so we cannot expect "abcd" and "bcd" to find # the same entities. We defer to slow tests for this. pipe = pipeline(model="hf-internal-testing/tiny-bert-for-token-classification") sentence = "The company, based in New York City was founded in 2016 by French entrepreneurs" results = pipe(sentence, aggregation_strategy="first") # This is what this random model gives on the full sentence self.assertEqual( nested_simplify(results), [ # This is 2 actual tokens {"end": 39, "entity_group": "MISC", "score": 0.115, "start": 31, "word": "city was"}, {"end": 79, "entity_group": "MISC", "score": 0.115, "start": 66, "word": "entrepreneurs"}, ], ) # This will force the tokenizer to split after "city was". pipe.tokenizer.model_max_length = 12 self.assertEqual( pipe.tokenizer.decode(pipe.tokenizer.encode(sentence, truncation=True)), "[CLS] the company, based in new york city was [SEP]", ) stride = 4 results = pipe(sentence, aggregation_strategy="first", stride=stride) self.assertEqual( nested_simplify(results), [ {"end": 39, "entity_group": "MISC", "score": 0.115, "start": 31, "word": "city was"}, # This is an extra entity found by this random model, but at least both original # entities are there {"end": 58, "entity_group": "MISC", "score": 0.115, "start": 56, "word": "by"}, {"end": 79, "entity_group": "MISC", "score": 0.115, "start": 66, "word": "entrepreneurs"}, ], ) @require_torch @slow def test_spanish_bert(self): # https://github.com/huggingface/transformers/pull/4987 NER_MODEL = "mrm8488/bert-spanish-cased-finetuned-ner" model = AutoModelForTokenClassification.from_pretrained(NER_MODEL) tokenizer = AutoTokenizer.from_pretrained(NER_MODEL, use_fast=True) sentence = """Consuelo Araújo Noguera, ministra de cultura del presidente Andrés Pastrana (1998.2002) fue asesinada por las Farc luego de haber permanecido secuestrada por algunos meses.""" token_classifier = pipeline("ner", model=model, tokenizer=tokenizer) output = token_classifier(sentence) self.assertEqual( nested_simplify(output[:3]), [ {"entity": "B-PER", "score": 0.999, "word": "Cons", "start": 0, "end": 4, "index": 1}, {"entity": "B-PER", "score": 0.803, "word": "##uelo", "start": 4, "end": 8, "index": 2}, {"entity": "I-PER", "score": 0.999, "word": "Ara", "start": 9, "end": 12, "index": 3}, ], ) token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="simple") output = token_classifier(sentence) self.assertEqual( nested_simplify(output[:3]), [ {"entity_group": "PER", "score": 0.999, "word": "Cons", "start": 0, "end": 4}, {"entity_group": "PER", "score": 0.966, "word": "##uelo Araújo Noguera", "start": 4, "end": 23}, {"entity_group": "PER", "score": 1.0, "word": "Andrés Pastrana", "start": 60, "end": 75}, ], ) token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="first") output = token_classifier(sentence) self.assertEqual( nested_simplify(output[:3]), [ {"entity_group": "PER", "score": 0.999, "word": "Consuelo Araújo Noguera", "start": 0, "end": 23}, {"entity_group": "PER", "score": 1.0, "word": "Andrés Pastrana", "start": 60, "end": 75}, {"entity_group": "ORG", "score": 0.999, "word": "Farc", "start": 110, "end": 114}, ], ) token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="max") output = token_classifier(sentence) self.assertEqual( nested_simplify(output[:3]), [ {"entity_group": "PER", "score": 0.999, "word": "Consuelo Araújo Noguera", "start": 0, "end": 23}, {"entity_group": "PER", "score": 1.0, "word": "Andrés Pastrana", "start": 60, "end": 75}, {"entity_group": "ORG", "score": 0.999, "word": "Farc", "start": 110, "end": 114}, ], ) token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="average") output = token_classifier(sentence) self.assertEqual( nested_simplify(output[:3]), [ {"entity_group": "PER", "score": 0.966, "word": "Consuelo Araújo Noguera", "start": 0, "end": 23}, {"entity_group": "PER", "score": 1.0, "word": "Andrés Pastrana", "start": 60, "end": 75}, {"entity_group": "ORG", "score": 0.542, "word": "Farc", "start": 110, "end": 114}, ], ) @require_torch_accelerator @slow def test_accelerator(self): sentence = "This is dummy sentence" ner = pipeline( "token-classification", device=torch_device, aggregation_strategy=AggregationStrategy.SIMPLE, ) output = ner(sentence) self.assertEqual(nested_simplify(output), []) @require_torch @slow def test_dbmdz_english(self): # Other sentence NER_MODEL = "dbmdz/bert-large-cased-finetuned-conll03-english" model = AutoModelForTokenClassification.from_pretrained(NER_MODEL) tokenizer = AutoTokenizer.from_pretrained(NER_MODEL, use_fast=True) sentence = """Enzo works at the UN""" token_classifier = pipeline("ner", model=model, tokenizer=tokenizer) output = token_classifier(sentence) self.assertEqual( nested_simplify(output), [ {"entity": "I-PER", "score": 0.998, "word": "En", "start": 0, "end": 2, "index": 1}, {"entity": "I-PER", "score": 0.997, "word": "##zo", "start": 2, "end": 4, "index": 2}, {"entity": "I-ORG", "score": 0.999, "word": "UN", "start": 18, "end": 20, "index": 6}, ], ) token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="simple") output = token_classifier(sentence) self.assertEqual( nested_simplify(output), [ {"entity_group": "PER", "score": 0.997, "word": "Enzo", "start": 0, "end": 4}, {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 18, "end": 20}, ], ) token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="first") output = token_classifier(sentence) self.assertEqual( nested_simplify(output[:3]), [ {"entity_group": "PER", "score": 0.998, "word": "Enzo", "start": 0, "end": 4}, {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 18, "end": 20}, ], ) token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="max") output = token_classifier(sentence) self.assertEqual( nested_simplify(output[:3]), [ {"entity_group": "PER", "score": 0.998, "word": "Enzo", "start": 0, "end": 4}, {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 18, "end": 20}, ], ) token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="average") output = token_classifier(sentence) self.assertEqual( nested_simplify(output), [ {"entity_group": "PER", "score": 0.997, "word": "Enzo", "start": 0, "end": 4}, {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 18, "end": 20}, ], ) @require_torch @slow def test_aggregation_strategy_byte_level_tokenizer(self): sentence = "Groenlinks praat over Schiphol." ner = pipeline("ner", model="xlm-roberta-large-finetuned-conll02-dutch", aggregation_strategy="max") self.assertEqual( nested_simplify(ner(sentence)), [ {"end": 10, "entity_group": "ORG", "score": 0.994, "start": 0, "word": "Groenlinks"}, {"entity_group": "LOC", "score": 1.0, "word": "Schiphol.", "start": 22, "end": 31}, ], ) @require_torch def test_aggregation_strategy_no_b_i_prefix(self): model_name = "sshleifer/tiny-dbmdz-bert-large-cased-finetuned-conll03-english" tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True) token_classifier = pipeline(task="ner", model=model_name, tokenizer=tokenizer, framework="pt") # Just to understand scores indexes in this test token_classifier.model.config.id2label = {0: "O", 1: "MISC", 2: "PER", 3: "ORG", 4: "LOC"} example = [ { "scores": np.array([0, 0, 0, 0, 0.9968166351318359]), # fmt : skip "index": 1, "is_subword": False, "word": "En", "start": 0, "end": 2, }, { "scores": np.array([0, 0, 0, 0, 0.9957635998725891]), # fmt : skip "index": 2, "is_subword": True, "word": "##zo", "start": 2, "end": 4, }, { "scores": np.array([0, 0, 0, 0.9986497163772583, 0]), # fmt : skip "index": 7, "word": "UN", "is_subword": False, "start": 11, "end": 13, }, ] self.assertEqual( nested_simplify(token_classifier.aggregate(example, AggregationStrategy.NONE)), [ {"end": 2, "entity": "LOC", "score": 0.997, "start": 0, "word": "En", "index": 1}, {"end": 4, "entity": "LOC", "score": 0.996, "start": 2, "word": "##zo", "index": 2}, {"end": 13, "entity": "ORG", "score": 0.999, "start": 11, "word": "UN", "index": 7}, ], ) self.assertEqual( nested_simplify(token_classifier.aggregate(example, AggregationStrategy.SIMPLE)), [ {"entity_group": "LOC", "score": 0.996, "word": "Enzo", "start": 0, "end": 4}, {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 11, "end": 13}, ], ) @require_torch def test_aggregation_strategy(self): model_name = "sshleifer/tiny-dbmdz-bert-large-cased-finetuned-conll03-english" tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True) token_classifier = pipeline(task="ner", model=model_name, tokenizer=tokenizer, framework="pt") # Just to understand scores indexes in this test self.assertEqual( token_classifier.model.config.id2label, {0: "O", 1: "B-MISC", 2: "I-MISC", 3: "B-PER", 4: "I-PER", 5: "B-ORG", 6: "I-ORG", 7: "B-LOC", 8: "I-LOC"}, ) example = [ { "scores": np.array([0, 0, 0, 0, 0.9968166351318359, 0, 0, 0]), # fmt : skip "index": 1, "is_subword": False, "word": "En", "start": 0, "end": 2, }, { "scores": np.array([0, 0, 0, 0, 0.9957635998725891, 0, 0, 0]), # fmt : skip "index": 2, "is_subword": True, "word": "##zo", "start": 2, "end": 4, }, { "scores": np.array([0, 0, 0, 0, 0, 0.9986497163772583, 0, 0]), # fmt : skip "index": 7, "word": "UN", "is_subword": False, "start": 11, "end": 13, }, ] self.assertEqual( nested_simplify(token_classifier.aggregate(example, AggregationStrategy.NONE)), [ {"end": 2, "entity": "I-PER", "score": 0.997, "start": 0, "word": "En", "index": 1}, {"end": 4, "entity": "I-PER", "score": 0.996, "start": 2, "word": "##zo", "index": 2}, {"end": 13, "entity": "B-ORG", "score": 0.999, "start": 11, "word": "UN", "index": 7}, ], ) self.assertEqual( nested_simplify(token_classifier.aggregate(example, AggregationStrategy.SIMPLE)), [ {"entity_group": "PER", "score": 0.996, "word": "Enzo", "start": 0, "end": 4}, {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 11, "end": 13}, ], ) self.assertEqual( nested_simplify(token_classifier.aggregate(example, AggregationStrategy.FIRST)), [ {"entity_group": "PER", "score": 0.997, "word": "Enzo", "start": 0, "end": 4}, {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 11, "end": 13}, ], ) self.assertEqual( nested_simplify(token_classifier.aggregate(example, AggregationStrategy.MAX)), [ {"entity_group": "PER", "score": 0.997, "word": "Enzo", "start": 0, "end": 4}, {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 11, "end": 13}, ], ) self.assertEqual( nested_simplify(token_classifier.aggregate(example, AggregationStrategy.AVERAGE)), [ {"entity_group": "PER", "score": 0.996, "word": "Enzo", "start": 0, "end": 4}, {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 11, "end": 13}, ], ) @require_torch def test_aggregation_strategy_example2(self): model_name = "sshleifer/tiny-dbmdz-bert-large-cased-finetuned-conll03-english" tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True) token_classifier = pipeline(task="ner", model=model_name, tokenizer=tokenizer, framework="pt") # Just to understand scores indexes in this test self.assertEqual( token_classifier.model.config.id2label, {0: "O", 1: "B-MISC", 2: "I-MISC", 3: "B-PER", 4: "I-PER", 5: "B-ORG", 6: "I-ORG", 7: "B-LOC", 8: "I-LOC"}, ) example = [ { # Necessary for AVERAGE "scores": np.array([0, 0.55, 0, 0.45, 0, 0, 0, 0, 0, 0]), "is_subword": False, "index": 1, "word": "Ra", "start": 0, "end": 2, }, { "scores": np.array([0, 0, 0, 0.2, 0, 0, 0, 0.8, 0, 0]), "is_subword": True, "word": "##ma", "start": 2, "end": 4, "index": 2, }, { # 4th score will have the higher average # 4th score is B-PER for this model # It's does not correspond to any of the subtokens. "scores": np.array([0, 0, 0, 0.4, 0, 0, 0.6, 0, 0, 0]), "is_subword": True, "word": "##zotti", "start": 11, "end": 13, "index": 3, }, ] self.assertEqual( token_classifier.aggregate(example, AggregationStrategy.NONE), [ {"end": 2, "entity": "B-MISC", "score": 0.55, "start": 0, "word": "Ra", "index": 1}, {"end": 4, "entity": "B-LOC", "score": 0.8, "start": 2, "word": "##ma", "index": 2}, {"end": 13, "entity": "I-ORG", "score": 0.6, "start": 11, "word": "##zotti", "index": 3}, ], ) self.assertEqual( token_classifier.aggregate(example, AggregationStrategy.FIRST), [{"entity_group": "MISC", "score": 0.55, "word": "Ramazotti", "start": 0, "end": 13}], ) self.assertEqual( token_classifier.aggregate(example, AggregationStrategy.MAX), [{"entity_group": "LOC", "score": 0.8, "word": "Ramazotti", "start": 0, "end": 13}], ) self.assertEqual( nested_simplify(token_classifier.aggregate(example, AggregationStrategy.AVERAGE)), [{"entity_group": "PER", "score": 0.35, "word": "Ramazotti", "start": 0, "end": 13}], ) @require_torch @slow def test_aggregation_strategy_offsets_with_leading_space(self): sentence = "We're from New York" model_name = "brandon25/deberta-base-finetuned-ner" ner = pipeline("ner", model=model_name, ignore_labels=[], aggregation_strategy="max") self.assertEqual( nested_simplify(ner(sentence)), [ {"entity_group": "O", "score": 1.0, "word": " We're from", "start": 0, "end": 10}, {"entity_group": "LOC", "score": 1.0, "word": " New York", "start": 10, "end": 19}, ], ) @require_torch def test_gather_pre_entities(self): model_name = "sshleifer/tiny-dbmdz-bert-large-cased-finetuned-conll03-english" tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True) token_classifier = pipeline(task="ner", model=model_name, tokenizer=tokenizer, framework="pt") sentence = "Hello there" tokens = tokenizer( sentence, return_attention_mask=False, return_tensors="pt", truncation=True, return_special_tokens_mask=True, return_offsets_mapping=True, ) offset_mapping = tokens.pop("offset_mapping").cpu().numpy()[0] special_tokens_mask = tokens.pop("special_tokens_mask").cpu().numpy()[0] input_ids = tokens["input_ids"].numpy()[0] # First element in [CLS] scores = np.array([[1, 0, 0], [0.1, 0.3, 0.6], [0.8, 0.1, 0.1]]) pre_entities = token_classifier.gather_pre_entities( sentence, input_ids, scores, offset_mapping, special_tokens_mask, aggregation_strategy=AggregationStrategy.NONE, ) self.assertEqual( nested_simplify(pre_entities), [ {"word": "Hello", "scores": [0.1, 0.3, 0.6], "start": 0, "end": 5, "is_subword": False, "index": 1}, { "word": "there", "scores": [0.8, 0.1, 0.1], "index": 2, "start": 6, "end": 11, "is_subword": False, }, ], ) @require_torch def test_word_heuristic_leading_space(self): model_name = "hf-internal-testing/tiny-random-deberta-v2" tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True) token_classifier = pipeline(task="ner", model=model_name, tokenizer=tokenizer, framework="pt") sentence = "I play the theremin" tokens = tokenizer( sentence, return_attention_mask=False, return_tensors="pt", return_special_tokens_mask=True, return_offsets_mapping=True, ) offset_mapping = tokens.pop("offset_mapping").cpu().numpy()[0] special_tokens_mask = tokens.pop("special_tokens_mask").cpu().numpy()[0] input_ids = tokens["input_ids"].numpy()[0] scores = np.array([[1, 0] for _ in input_ids]) # values irrelevant for heuristic pre_entities = token_classifier.gather_pre_entities( sentence, input_ids, scores, offset_mapping, special_tokens_mask, aggregation_strategy=AggregationStrategy.FIRST, ) # ensure expected tokenization and correct is_subword values self.assertEqual( [(entity["word"], entity["is_subword"]) for entity in pre_entities], [("▁I", False), ("▁play", False), ("▁the", False), ("▁there", False), ("min", True)], ) @require_tf def test_tf_only(self): model_name = "hf-internal-testing/tiny-random-bert-tf-only" # This model only has a TensorFlow version # We test that if we don't specificy framework='tf', it gets detected automatically token_classifier = pipeline(task="ner", model=model_name) self.assertEqual(token_classifier.framework, "tf") @require_tf def test_small_model_tf(self): model_name = "hf-internal-testing/tiny-bert-for-token-classification" token_classifier = pipeline(task="token-classification", model=model_name, framework="tf") outputs = token_classifier("This is a test !") self.assertEqual( nested_simplify(outputs), [ {"entity": "I-MISC", "score": 0.115, "index": 1, "word": "this", "start": 0, "end": 4}, {"entity": "I-MISC", "score": 0.115, "index": 2, "word": "is", "start": 5, "end": 7}, ], ) @require_torch def test_no_offset_tokenizer(self): model_name = "hf-internal-testing/tiny-bert-for-token-classification" tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False) token_classifier = pipeline(task="token-classification", model=model_name, tokenizer=tokenizer, framework="pt") outputs = token_classifier("This is a test !") self.assertEqual( nested_simplify(outputs), [ {"entity": "I-MISC", "score": 0.115, "index": 1, "word": "this", "start": None, "end": None}, {"entity": "I-MISC", "score": 0.115, "index": 2, "word": "is", "start": None, "end": None}, ], ) @require_torch def test_small_model_pt(self): model_name = "hf-internal-testing/tiny-bert-for-token-classification" token_classifier = pipeline(task="token-classification", model=model_name, framework="pt") outputs = token_classifier("This is a test !") self.assertEqual( nested_simplify(outputs), [ {"entity": "I-MISC", "score": 0.115, "index": 1, "word": "this", "start": 0, "end": 4}, {"entity": "I-MISC", "score": 0.115, "index": 2, "word": "is", "start": 5, "end": 7}, ], ) token_classifier = pipeline( task="token-classification", model=model_name, framework="pt", ignore_labels=["O", "I-MISC"] ) outputs = token_classifier("This is a test !") self.assertEqual( nested_simplify(outputs), [], ) token_classifier = pipeline(task="token-classification", model=model_name, framework="pt") # Overload offset_mapping outputs = token_classifier( "This is a test !", offset_mapping=[(0, 0), (0, 1), (0, 2), (0, 0), (0, 0), (0, 0), (0, 0)] ) self.assertEqual( nested_simplify(outputs), [ {"entity": "I-MISC", "score": 0.115, "index": 1, "word": "this", "start": 0, "end": 1}, {"entity": "I-MISC", "score": 0.115, "index": 2, "word": "is", "start": 0, "end": 2}, ], ) # Batch size does not affect outputs (attention_mask are required) sentences = ["This is a test !", "Another test this is with longer sentence"] outputs = token_classifier(sentences) outputs_batched = token_classifier(sentences, batch_size=2) # Batching does not make a difference in predictions self.assertEqual(nested_simplify(outputs_batched), nested_simplify(outputs)) self.assertEqual( nested_simplify(outputs_batched), [ [ {"entity": "I-MISC", "score": 0.115, "index": 1, "word": "this", "start": 0, "end": 4}, {"entity": "I-MISC", "score": 0.115, "index": 2, "word": "is", "start": 5, "end": 7}, ], [], ], ) @require_torch def test_pt_ignore_subwords_slow_tokenizer_raises(self): model_name = "sshleifer/tiny-dbmdz-bert-large-cased-finetuned-conll03-english" tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False) with self.assertRaises(ValueError): pipeline(task="ner", model=model_name, tokenizer=tokenizer, aggregation_strategy=AggregationStrategy.FIRST) with self.assertRaises(ValueError): pipeline( task="ner", model=model_name, tokenizer=tokenizer, aggregation_strategy=AggregationStrategy.AVERAGE ) with self.assertRaises(ValueError): pipeline(task="ner", model=model_name, tokenizer=tokenizer, aggregation_strategy=AggregationStrategy.MAX) @slow @require_torch def test_simple(self): token_classifier = pipeline(task="ner", model="dslim/bert-base-NER", grouped_entities=True) sentence = "Hello Sarah Jessica Parker who Jessica lives in New York" sentence2 = "This is a simple test" output = token_classifier(sentence) output_ = nested_simplify(output) self.assertEqual( output_, [ { "entity_group": "PER", "score": 0.996, "word": "Sarah Jessica Parker", "start": 6, "end": 26, }, {"entity_group": "PER", "score": 0.977, "word": "Jessica", "start": 31, "end": 38}, {"entity_group": "LOC", "score": 0.999, "word": "New York", "start": 48, "end": 56}, ], ) output = token_classifier([sentence, sentence2]) output_ = nested_simplify(output) self.assertEqual( output_, [ [ {"entity_group": "PER", "score": 0.996, "word": "Sarah Jessica Parker", "start": 6, "end": 26}, {"entity_group": "PER", "score": 0.977, "word": "Jessica", "start": 31, "end": 38}, {"entity_group": "LOC", "score": 0.999, "word": "New York", "start": 48, "end": 56}, ], [], ], ) class TokenClassificationArgumentHandlerTestCase(unittest.TestCase): def setUp(self): self.args_parser = TokenClassificationArgumentHandler() def test_simple(self): string = "This is a simple input" inputs, offset_mapping = self.args_parser(string) self.assertEqual(inputs, [string]) self.assertEqual(offset_mapping, None) inputs, offset_mapping = self.args_parser([string, string]) self.assertEqual(inputs, [string, string]) self.assertEqual(offset_mapping, None) inputs, offset_mapping = self.args_parser(string, offset_mapping=[(0, 1), (1, 2)]) self.assertEqual(inputs, [string]) self.assertEqual(offset_mapping, [[(0, 1), (1, 2)]]) inputs, offset_mapping = self.args_parser( [string, string], offset_mapping=[[(0, 1), (1, 2)], [(0, 2), (2, 3)]] ) self.assertEqual(inputs, [string, string]) self.assertEqual(offset_mapping, [[(0, 1), (1, 2)], [(0, 2), (2, 3)]]) def test_errors(self): string = "This is a simple input" # 2 sentences, 1 offset_mapping, args with self.assertRaises(TypeError): self.args_parser(string, string, offset_mapping=[[(0, 1), (1, 2)]]) # 2 sentences, 1 offset_mapping, args with self.assertRaises(TypeError): self.args_parser(string, string, offset_mapping=[(0, 1), (1, 2)]) # 2 sentences, 1 offset_mapping, input_list with self.assertRaises(ValueError): self.args_parser([string, string], offset_mapping=[[(0, 1), (1, 2)]]) # 2 sentences, 1 offset_mapping, input_list with self.assertRaises(ValueError): self.args_parser([string, string], offset_mapping=[(0, 1), (1, 2)]) # 1 sentences, 2 offset_mapping with self.assertRaises(ValueError): self.args_parser(string, offset_mapping=[[(0, 1), (1, 2)], [(0, 2), (2, 3)]]) # 0 sentences, 1 offset_mapping with self.assertRaises(TypeError): self.args_parser(offset_mapping=[[(0, 1), (1, 2)]])
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_mask_generation.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from typing import Dict import numpy as np from huggingface_hub.utils import insecure_hashlib from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass def hashimage(image: Image) -> str: m = insecure_hashlib.md5(image.tobytes()) return m.hexdigest()[:10] def mask_to_test_readable(mask: Image) -> Dict: npimg = np.array(mask) shape = npimg.shape return {"hash": hashimage(mask), "shape": shape} @is_pipeline_test @require_vision @require_torch class MaskGenerationPipelineTests(unittest.TestCase): model_mapping = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items()) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) tf_model_mapping = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items()) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def get_test_pipeline(self, model, tokenizer, processor): image_segmenter = MaskGenerationPipeline(model=model, image_processor=processor) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] # TODO: Implement me @Arthur def run_pipeline_test(self, mask_generator, examples): pass @require_tf @unittest.skip("Image segmentation not implemented in TF") def test_small_model_tf(self): pass @slow @require_torch def test_small_model_pt(self): image_segmenter = pipeline("mask-generation", model="facebook/sam-vit-huge") outputs = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", points_per_batch=256) # Shortening by hashing new_outupt = [] for i, o in enumerate(outputs["masks"]): new_outupt += [{"mask": mask_to_test_readable(o), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(new_outupt, decimals=4), [ {'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0444}, {'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.021}, {'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0167}, {'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0132}, {'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0053}, {'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9967}, {'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.993}, {'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9909}, {'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9879}, {'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9834}, {'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9716}, {'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9612}, {'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9599}, {'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9552}, {'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9532}, {'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9516}, {'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9499}, {'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9483}, {'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9464}, {'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.943}, {'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.943}, {'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9408}, {'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9335}, {'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9326}, {'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9262}, {'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8999}, {'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8986}, {'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8984}, {'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8873}, {'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8871} ], ) # fmt: on @require_torch @slow def test_threshold(self): model_id = "facebook/sam-vit-huge" image_segmenter = pipeline("mask-generation", model=model_id) outputs = image_segmenter( "http://images.cocodataset.org/val2017/000000039769.jpg", pred_iou_thresh=1, points_per_batch=256 ) # Shortening by hashing new_outupt = [] for i, o in enumerate(outputs["masks"]): new_outupt += [{"mask": mask_to_test_readable(o), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(new_outupt, decimals=4), [ {"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.0444}, {"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0210}, {"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.0167}, {"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.0132}, {"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.0053}, ], )
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_depth_estimation.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from huggingface_hub.utils import insecure_hashlib from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass def hashimage(image: Image) -> str: m = insecure_hashlib.md5(image.tobytes()) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class DepthEstimationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def get_test_pipeline(self, model, tokenizer, processor): depth_estimator = DepthEstimationPipeline(model=model, image_processor=processor) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def run_pipeline_test(self, depth_estimator, examples): outputs = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png") self.assertEqual({"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)}, outputs) import datasets dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test") outputs = depth_estimator( [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["file"], # LA dataset[1]["file"], # L dataset[2]["file"], ] ) self.assertEqual( [ {"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)}, {"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)}, {"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)}, {"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)}, {"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)}, ], outputs, ) @require_tf @unittest.skip("Depth estimation is not implemented in TF") def test_small_model_tf(self): pass @slow @require_torch def test_large_model_pt(self): model_id = "Intel/dpt-large" depth_estimator = pipeline("depth-estimation", model=model_id) outputs = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg") outputs["depth"] = hashimage(outputs["depth"]) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item()), 29.304) self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item()), 2.662) @require_torch def test_small_model_pt(self): # This is highly irregular to have no small tests. self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT")
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_audio_classification.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING from transformers.pipelines import AudioClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_torchaudio, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class AudioClassificationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING tf_model_mapping = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING def get_test_pipeline(self, model, tokenizer, processor): audio_classifier = AudioClassificationPipeline(model=model, feature_extractor=processor) # test with a raw waveform audio = np.zeros((34000,)) audio2 = np.zeros((14000,)) return audio_classifier, [audio2, audio] def run_pipeline_test(self, audio_classifier, examples): audio2, audio = examples output = audio_classifier(audio) # by default a model is initialized with num_labels=2 self.assertEqual( output, [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], ) output = audio_classifier(audio, top_k=1) self.assertEqual( output, [ {"score": ANY(float), "label": ANY(str)}, ], ) self.run_torchaudio(audio_classifier) @require_torchaudio def run_torchaudio(self, audio_classifier): import datasets # test with a local file dataset = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") audio = dataset[0]["audio"]["array"] output = audio_classifier(audio) self.assertEqual( output, [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], ) @require_torch def test_small_model_pt(self): model = "anton-l/wav2vec2-random-tiny-classifier" audio_classifier = pipeline("audio-classification", model=model) audio = np.ones((8000,)) output = audio_classifier(audio, top_k=4) EXPECTED_OUTPUT = [ {"score": 0.0842, "label": "no"}, {"score": 0.0838, "label": "up"}, {"score": 0.0837, "label": "go"}, {"score": 0.0834, "label": "right"}, ] EXPECTED_OUTPUT_PT_2 = [ {"score": 0.0845, "label": "stop"}, {"score": 0.0844, "label": "on"}, {"score": 0.0841, "label": "right"}, {"score": 0.0834, "label": "left"}, ] self.assertIn(nested_simplify(output, decimals=4), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2]) audio_dict = {"array": np.ones((8000,)), "sampling_rate": audio_classifier.feature_extractor.sampling_rate} output = audio_classifier(audio_dict, top_k=4) self.assertIn(nested_simplify(output, decimals=4), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2]) @require_torch @slow def test_large_model_pt(self): import datasets model = "superb/wav2vec2-base-superb-ks" audio_classifier = pipeline("audio-classification", model=model) dataset = datasets.load_dataset("anton-l/superb_dummy", "ks", split="test") audio = np.array(dataset[3]["speech"], dtype=np.float32) output = audio_classifier(audio, top_k=4) self.assertEqual( nested_simplify(output, decimals=3), [ {"score": 0.981, "label": "go"}, {"score": 0.007, "label": "up"}, {"score": 0.006, "label": "_unknown_"}, {"score": 0.001, "label": "down"}, ], ) @require_tf @unittest.skip("Audio classification is not implemented for TF") def test_small_model_tf(self): pass
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/pipelines/test_pipelines_video_classification.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class VideoClassificationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def get_test_pipeline(self, model, tokenizer, processor): example_video_filepath = hf_hub_download( repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" ) video_classifier = VideoClassificationPipeline(model=model, image_processor=processor, top_k=2) examples = [ example_video_filepath, "https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4", ] return video_classifier, examples def run_pipeline_test(self, video_classifier, examples): for example in examples: outputs = video_classifier(example) self.assertEqual( outputs, [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], ) @require_torch def test_small_model_pt(self): small_model = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification" small_feature_extractor = VideoMAEFeatureExtractor( size={"shortest_edge": 10}, crop_size={"height": 10, "width": 10} ) video_classifier = pipeline( "video-classification", model=small_model, feature_extractor=small_feature_extractor, frame_sampling_rate=4 ) video_file_path = hf_hub_download(repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset") outputs = video_classifier(video_file_path, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}], ) outputs = video_classifier( [ video_file_path, video_file_path, ], top_k=2, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}], [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}], ], ) @require_tf def test_small_model_tf(self): pass
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/fsdp/test_fsdp.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import os import unittest from functools import partial from parameterized import parameterized import tests.trainer.test_trainer from tests.trainer.test_trainer import TrainerIntegrationCommon # noqa from transformers import is_torch_available from transformers.testing_utils import ( TestCasePlus, backend_device_count, execute_subprocess_async, mockenv_context, require_accelerate, require_fsdp, require_torch_accelerator, require_torch_multi_accelerator, slow, torch_device, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import FSDPOption, set_seed from transformers.utils import is_accelerate_available, is_torch_bf16_available_on_device if is_torch_available(): from transformers.pytorch_utils import is_torch_greater_or_equal_than_2_1 else: is_torch_greater_or_equal_than_2_1 = False # default torch.distributed port DEFAULT_MASTER_PORT = "10999" dtypes = ["fp16"] if is_torch_bf16_available_on_device(torch_device): dtypes += ["bf16"] sharding_strategies = ["full_shard", "shard_grad_op"] state_dict_types = ["FULL_STATE_DICT", "SHARDED_STATE_DICT"] set_seed(42) params = list(itertools.product(sharding_strategies, dtypes)) def get_master_port(real_launcher=False): """ When using a single gpu launcher emulation (i.e. not deepspeed or python -m torch.distributed) the issue is that once the port is tied it can't be used anywhere else outside of this process, since torch.dist doesn't free the port until the process exits. Therefore for the sake of being able to run both emulated launcher and normal launcher tests we need 2 distinct ports. This function will give the right port in the right context. For real launcher it'll give the base port, for emulated launcher it'll give the base port + 1. In both cases a string is returned. Args: `real_launcher`: whether a real launcher is going to be used, or the emulated one """ master_port_base = os.environ.get("DS_TEST_PORT", DEFAULT_MASTER_PORT) if not real_launcher: master_port_base = str(int(master_port_base) + 1) return master_port_base if is_torch_available(): from tests.trainer.test_trainer import ( # noqa RegressionModelConfig, RegressionPreTrainedModel, ) # hack to restore original logging level pre #21700 get_regression_trainer = partial(tests.trainer.test_trainer.get_regression_trainer, log_level="info") require_fsdp_version = require_fsdp if is_accelerate_available(): from accelerate.utils.constants import ( FSDP_PYTORCH_VERSION, FSDP_SHARDING_STRATEGY, ) require_fsdp_version = partial(require_fsdp, min_version=FSDP_PYTORCH_VERSION) def get_launcher(distributed=False, use_accelerate=False): # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup # - it won't be able to handle that # 2. for now testing with just 2 gpus max (since some quality tests may give different # results with mode gpus because we use very little data) num_gpus = min(2, backend_device_count(torch_device)) if distributed else 1 master_port = get_master_port(real_launcher=True) if use_accelerate: return f"""accelerate launch --num_processes {num_gpus} --main_process_port {master_port} --use_fsdp --fsdp_auto_wrap_policy TRANSFORMER_BASED_WRAP --fsdp_state_dict_type SHARDED_STATE_DICT --fsdp_transformer_layer_cls_to_wrap BertLayer""".split() return f"torchrun --nnodes 1 --nproc-per-node {num_gpus} --master-port {master_port}".split() def _parameterized_custom_name_func(func, param_num, param): # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param param_based_name = parameterized.to_safe_name("_".join(str(x) for x in param.args)) return f"{func.__name__}_{param_based_name}" @require_accelerate @require_torch_accelerator @require_fsdp_version class TrainerIntegrationFSDP(TestCasePlus, TrainerIntegrationCommon): def setUp(self): super().setUp() master_port = get_master_port(real_launcher=False) self.dist_env_1_gpu = { "MASTER_ADDR": "localhost", "MASTER_PORT": master_port, "RANK": "0", "LOCAL_RANK": "0", "WORLD_SIZE": "1", } self.fsdp_config = { "backward_prefetch": "backward_pre", "forward_prefetch": "False", "limit_all_gathers": "False", "use_orig_params": "True", "sync_module_states": "True", "activation_checkpointing": "False", "min_num_params": 1, } def tearDown(self): super().tearDown() @parameterized.expand(params, name_func=_parameterized_custom_name_func) def test_fsdp_config(self, sharding_strategy, dtype): output_dir = self.get_auto_remove_tmp_dir() kwargs = { "output_dir": output_dir, "train_len": 128, "save_steps": 5, "learning_rate": 0.1, "fsdp": f"{sharding_strategy} offload auto_wrap", "fsdp_config": self.fsdp_config, } kwargs[dtype] = True with mockenv_context(**self.dist_env_1_gpu): trainer = get_regression_trainer(**kwargs) self.assertEqual(trainer.args.fsdp[0], sharding_strategy) self.assertEqual(trainer.args.fsdp[1], FSDPOption.OFFLOAD) self.assertEqual(trainer.args.fsdp[2], FSDPOption.AUTO_WRAP) for k, v in trainer.args.fsdp_config.items(): self.assertEqual(v, self.fsdp_config[k]) self.assertEqual(os.environ.get("ACCELERATE_USE_FSDP", "false"), "true") @parameterized.expand(params, name_func=_parameterized_custom_name_func) @require_torch_multi_accelerator @slow def test_basic_run(self, sharding_strategy, dtype): launcher = get_launcher(distributed=True, use_accelerate=False) output_dir = self.get_auto_remove_tmp_dir() args = self.get_base_args(output_dir, 1, 50).split() + [f"--{dtype}"] fsdp_args = ["--fsdp", f"{sharding_strategy} auto_wrap", "--fsdp_transformer_layer_cls_to_wrap", "BertLayer"] script = [f"{self.examples_dir_str}/pytorch/text-classification/run_glue.py"] cmd = launcher + script + args + fsdp_args execute_subprocess_async(cmd, env=self.get_env()) @parameterized.expand(dtypes) @require_torch_multi_accelerator @slow @unittest.skipIf(not is_torch_greater_or_equal_than_2_1, reason="This test on pytorch 2.0 takes 4 hours.") def test_basic_run_with_cpu_offload(self, dtype): launcher = get_launcher(distributed=True, use_accelerate=False) output_dir = self.get_auto_remove_tmp_dir() args = self.get_base_args(output_dir, 1, 50).split() + [f"--{dtype}", "--max_steps", "10"] fsdp_args = ["--fsdp", "full_shard auto_wrap offload", "--fsdp_transformer_layer_cls_to_wrap", "BertLayer"] script = [f"{self.examples_dir_str}/pytorch/text-classification/run_glue.py"] cmd = launcher + script + args + fsdp_args execute_subprocess_async(cmd, env=self.get_env()) @parameterized.expand(state_dict_types, name_func=_parameterized_custom_name_func) @require_torch_multi_accelerator @slow def test_training_and_can_resume_normally(self, state_dict_type): output_dir = self.get_auto_remove_tmp_dir("./xxx", after=False) sharding_strategy = "full_shard" use_accelerate = state_dict_type == "SHARDED_STATE_DICT" launcher = get_launcher(True, use_accelerate=use_accelerate) args = self.get_base_args(output_dir, 2, 25).split() script = [f"{self.examples_dir_str}/pytorch/text-classification/run_glue.py"] logs = self.run_cmd_and_get_logs(use_accelerate, sharding_strategy, launcher, script, args, output_dir) # resume from ckpt checkpoint = os.path.join(output_dir, "checkpoint-115") resume_args = args + f"--resume_from_checkpoint {checkpoint}".split() logs_resume = self.run_cmd_and_get_logs( use_accelerate, sharding_strategy, launcher, script, resume_args, output_dir ) for log, log1 in zip(logs, logs_resume): if "learning_rate" in log: self.assertAlmostEqual(log["learning_rate"], log1["learning_rate"], delta=1e-5) def run_cmd_and_get_logs(self, use_accelerate, sharding_strategy, launcher, script, args, output_dir): if not use_accelerate: fsdp_args = [ "--fsdp", f"{sharding_strategy} auto_wrap", "--fsdp_transformer_layer_cls_to_wrap", "BertLayer", ] cmd = launcher + script + args + fsdp_args else: fsdp_config = f""" --fsdp_sharding_strategy {FSDP_SHARDING_STRATEGY.index(sharding_strategy.upper()) + 1} """.split() cmd = launcher + fsdp_config + script + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(cmd, env=self.get_env()) logs = TrainerState.load_from_json(os.path.join(output_dir, "trainer_state.json")).log_history return logs def get_base_args(self, output_dir, num_epochs, logging_steps): return f""" --model_name_or_path bert-base-cased --task_name mrpc --output_dir {output_dir} --overwrite_output_dir --do_train --max_seq_length 128 --per_device_train_batch_size 16 --learning_rate 5e-5 --num_train_epochs {num_epochs} --lr_scheduler_type cosine --logging_steps {logging_steps} --save_strategy epoch --do_eval --evaluation_strategy epoch --report_to none """
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/extended/test_trainer_ext.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import os import re import sys from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, backend_device_count, execute_subprocess_async, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_accelerator, require_torch_non_multi_accelerator, slow, torch_device, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed bindir = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"): from run_translation import main # noqa set_seed(42) MARIAN_MODEL = "sshleifer/student_marian_en_ro_6_1" MBART_TINY = "sshleifer/tiny-mbart" @require_torch class TestTrainerExt(TestCasePlus): def run_seq2seq_quick( self, distributed=False, extra_args_str=None, predict_with_generate=True, do_train=True, do_eval=True, do_predict=True, ): output_dir = self.run_trainer( eval_steps=1, max_len=12, model_name=MBART_TINY, num_train_epochs=1, distributed=distributed, extra_args_str=extra_args_str, predict_with_generate=predict_with_generate, do_train=do_train, do_eval=do_eval, do_predict=do_predict, ) logs = TrainerState.load_from_json(os.path.join(output_dir, "trainer_state.json")).log_history if not do_eval: return eval_metrics = [log for log in logs if "eval_loss" in log.keys()] first_step_stats = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats last_step_stats = eval_metrics[-1] assert isinstance(last_step_stats["eval_bleu"], float) assert not math.isnan(float(last_step_stats["eval_loss"])), "eval_loss must not be `nan`" @require_torch_non_multi_accelerator def test_run_seq2seq_no_dist(self): self.run_seq2seq_quick() # verify that the trainer can handle non-distributed with n_gpu > 1 @require_torch_multi_accelerator def test_run_seq2seq_dp(self): self.run_seq2seq_quick(distributed=False) # verify that the trainer can handle distributed with n_gpu > 1 @require_torch_multi_accelerator def test_run_seq2seq_ddp(self): self.run_seq2seq_quick(distributed=True) @require_apex @require_torch_gpu def test_run_seq2seq_apex(self): # XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same # program and it breaks other tests that run from the same pytest worker, therefore until this is # sorted out it must be run only in an external program, that is distributed=True in this # test and only under one or more gpus - if we want cpu will need to make a special test # # specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via # 2nd main() call it botches the future eval. # self.run_seq2seq_quick(distributed=True, extra_args_str="--fp16 --fp16_backend=apex") # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seq2seq_quick(distributed=True, extra_args_str="--fp16 --fp16_backend=apex") @parameterized.expand(["base", "low", "high", "mixed"]) @require_torch_multi_accelerator def test_trainer_log_level_replica(self, experiment_id): # as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout experiments = { # test with the default log_level - should be info and thus log info once "base": {"extra_args_str": "", "n_matches": 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes "low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica "high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1}, # test with high log_level and log_level_replica - should be quiet on all processes "mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0}, } data = experiments[experiment_id] kwargs = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False} log_info_string = "Running training" with CaptureStderr() as cl: self.run_seq2seq_quick(**kwargs, extra_args_str=data["extra_args_str"]) n_matches = len(re.findall(log_info_string, cl.err)) self.assertEqual(n_matches, data["n_matches"]) @slow def test_run_seq2seq(self): output_dir = self.run_trainer( eval_steps=2, max_len=128, model_name=MARIAN_MODEL, learning_rate=3e-4, num_train_epochs=10, distributed=False, ) # Check metrics logs = TrainerState.load_from_json(os.path.join(output_dir, "trainer_state.json")).log_history eval_metrics = [log for log in logs if "eval_loss" in log.keys()] first_step_stats = eval_metrics[0] last_step_stats = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats["eval_bleu"], float) # test if do_predict saves generations and metrics contents = os.listdir(output_dir) contents = {os.path.basename(p) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def test_run_seq2seq_bnb(self): from transformers.training_args import OptimizerNames def train_and_return_metrics(optim: str) -> Tuple[int, float]: extra_args = "--skip_memory_metrics 0" output_dir = self.run_trainer( max_len=128, model_name=MARIAN_MODEL, learning_rate=3e-4, num_train_epochs=1, optim=optim, distributed=True, # force run in a new process extra_args_str=extra_args, do_eval=False, do_predict=False, n_gpus_to_use=1, # to allow deterministic fixed memory usage ) # Check metrics logs = TrainerState.load_from_json(Path(output_dir, "trainer_state.json")).log_history gpu_peak_mem_mb = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20) gpu_alloc_mem_mb = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20) loss = logs[0]["train_loss"] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss gpu_peak_mem_orig, gpu_alloc_mem_orig, loss_orig = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value) gpu_peak_mem_bnb, gpu_alloc_mem_bnb, loss_bnb = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value) gpu_alloc_mem_diff = gpu_alloc_mem_orig - gpu_alloc_mem_bnb gpu_total_mem_orig = gpu_peak_mem_orig + gpu_alloc_mem_orig gpu_total_mem_bnb = gpu_peak_mem_bnb + gpu_alloc_mem_bnb gpu_total_mem_diff = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings expected_savings = 120 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( gpu_alloc_mem_diff, expected_savings, "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got" f" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and" f" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB", ) self.assertGreater( gpu_total_mem_diff, expected_savings, "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got" f" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and" f" gpu_total_mem_bnb={gpu_total_mem_bnb}MB", ) self.assertEqual( loss_orig, loss_bnb, f"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}" ) def run_trainer( self, max_len: int, model_name: str, num_train_epochs: int, learning_rate: float = 3e-3, optim: str = "adafactor", distributed: bool = False, extra_args_str: str = None, eval_steps: int = 0, predict_with_generate: bool = True, do_train: bool = True, do_eval: bool = True, do_predict: bool = True, n_gpus_to_use: int = None, ): data_dir = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro" output_dir = self.get_auto_remove_tmp_dir() args_train = f""" --model_name_or_path {model_name} --train_file {data_dir}/train.json --validation_file {data_dir}/val.json --test_file {data_dir}/test.json --output_dir {output_dir} --overwrite_output_dir --max_train_samples 8 --max_source_length {max_len} --max_target_length {max_len} --do_train --num_train_epochs {str(num_train_epochs)} --per_device_train_batch_size 4 --learning_rate {learning_rate} --warmup_steps 8 --logging_steps 0 --logging_strategy no --save_steps {str(eval_steps)} --group_by_length --label_smoothing_factor 0.1 --target_lang ro_RO --source_lang en_XX """.split() args_eval = f""" --do_eval --per_device_eval_batch_size 4 --max_eval_samples 8 --val_max_target_length {max_len} --evaluation_strategy steps --eval_steps {str(eval_steps)} """.split() args_predict = """ --do_predict """.split() args = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += f"--optim {optim}".split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: n_gpus_to_use = backend_device_count(torch_device) master_port = get_torch_dist_unique_port() distributed_args = f""" -m torch.distributed.run --nproc_per_node={n_gpus_to_use} --master_port={master_port} {self.examples_dir_str}/pytorch/translation/run_translation.py """.split() cmd = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(cmd, env=self.get_env()) else: testargs = ["run_translation.py"] + args with patch.object(sys, "argv", testargs): main() return output_dir
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/generation/test_framework_agnostic.py
""" Framework agnostic tests for generate()-related methods. """ import numpy as np from transformers import AutoTokenizer from transformers.testing_utils import slow, torch_device class GenerationIntegrationTestsMixin: # To be populated by the child classes framework_dependent_parameters = { "AutoModelForCausalLM": None, "AutoModelForSpeechSeq2Seq": None, "AutoModelForSeq2SeqLM": None, "AutoModelForVision2Seq": None, "LogitsProcessorList": None, "MinLengthLogitsProcessor": None, "create_tensor_fn": None, "floats_tensor": None, "return_tensors": None, "set_seed": None, } def test_validate_generation_inputs(self): model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] create_tensor_fn = self.framework_dependent_parameters["create_tensor_fn"] tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") model = model_cls.from_pretrained("hf-internal-testing/tiny-random-t5") encoder_input_str = "Hello world" input_ids = tokenizer(encoder_input_str, return_tensors=return_tensors).input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(ValueError, "do_samples"): model.generate(input_ids, do_samples=True) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(ValueError, "foo"): fake_model_kwargs = {"foo": "bar"} model.generate(input_ids, **fake_model_kwargs) # however, valid model_kwargs are accepted valid_model_kwargs = {"attention_mask": create_tensor_fn(np.zeros_like(input_ids))} model.generate(input_ids, **valid_model_kwargs) def test_custom_logits_processor(self): model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] logits_processor_list_cls = self.framework_dependent_parameters["LogitsProcessorList"] min_length_logits_processor_cls = self.framework_dependent_parameters["MinLengthLogitsProcessor"] return_tensors = self.framework_dependent_parameters["return_tensors"] bart_tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_model = model_cls.from_pretrained("hf-internal-testing/tiny-random-bart", min_length=1) input_ids = bart_tokenizer(article, return_tensors=return_tensors).input_ids logits_processor = logits_processor_list_cls() logits_processor.append(min_length_logits_processor_cls(min_length=10, eos_token_id=0)) # it should not be allowed to both define `min_length` via config and `logits_processor` list with self.assertRaises(ValueError): bart_model.generate(input_ids, logits_processor=logits_processor) bart_model.config.min_length = None bart_model.generate(input_ids, logits_processor=logits_processor) def test_max_new_tokens_encoder_decoder(self): model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = model_cls.from_pretrained("hf-internal-testing/tiny-random-bart") input_ids = bart_tokenizer(article, return_tensors=return_tensors).input_ids if is_pt: bart_model = bart_model.to(torch_device) input_ids = input_ids.to(torch_device) self.assertEqual(list(input_ids.shape), [1, 29]) max_new_tokens = 3 bart_model.config.max_length = 20 bart_model.config.eos_token_id = None # Encoder decoder call outputs = bart_model.generate(input_ids, max_new_tokens=max_new_tokens) # 1 BOS + 3 new tokens self.assertEqual(list(outputs.shape), [1, 4]) # Decoder only call outputs = bart_model.generate(decoder_input_ids=input_ids, max_new_tokens=max_new_tokens) # 1 BOS + 29 (input length) + 3 new tokens self.assertEqual(list(outputs.shape), [1, 33]) # Encoder decoder call > 20 outputs = bart_model.generate(max_new_tokens=max_new_tokens + 20) # 1 BOS + 20 + 3 new tokens self.assertEqual(list(outputs.shape), [1, 24]) def test_max_new_tokens_decoder_only(self): model_cls = self.framework_dependent_parameters["AutoModelForCausalLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") article = """Justin Timberlake.""" gpt2_tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") gpt2_model = model_cls.from_pretrained("hf-internal-testing/tiny-random-gpt2") input_ids = gpt2_tokenizer(article, return_tensors=return_tensors).input_ids if is_pt: gpt2_model = gpt2_model.to(torch_device) input_ids = input_ids.to(torch_device) self.assertEqual(list(input_ids.shape), [1, 9]) max_new_tokens = 3 gpt2_model.config.max_length = 20 # call < 20 outputs = gpt2_model.generate(input_ids, max_new_tokens=max_new_tokens) # 9 input_ids + 3 new tokens self.assertEqual(list(outputs.shape), [1, 12]) # call > 20 outputs = gpt2_model.generate(max_new_tokens=max_new_tokens + 20) # 1 BOS token + 23 new tokens self.assertEqual(list(outputs.shape), [1, 24]) def test_encoder_decoder_generate_with_inputs_embeds(self): model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = model_cls.from_pretrained("hf-internal-testing/tiny-random-bart", max_length=5) model.config.eos_token_id = None input_ids = tokenizer(article, return_tensors=return_tensors).input_ids inputs_embeds = model.get_input_embeddings()(input_ids) output_sequences = model.generate(inputs_embeds=inputs_embeds) # make sure model generated correctly until `max_length` self.assertEqual(output_sequences.shape, (1, 5)) def test_transition_scores_greedy_search(self): model_cls = self.framework_dependent_parameters["AutoModelForCausalLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") articles = ["Justin Timberlake", "Michael Phelps"] tokenizer = AutoTokenizer.from_pretrained("distilgpt2", padding_side="left") tokenizer.pad_token = tokenizer.eos_token model = model_cls.from_pretrained("distilgpt2") input_ids = tokenizer(articles, return_tensors=return_tensors, padding=True).input_ids if is_pt: model = model.to(torch_device) input_ids = input_ids.to(torch_device) outputs = model.generate( input_ids=input_ids, max_new_tokens=5, pad_token_id=tokenizer.eos_token_id, eos_token_id=None, return_dict_in_generate=True, output_scores=True, ) transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores) if is_pt: transition_scores = transition_scores.cpu().numpy() expected_scores = np.array( [ [-57.8844, -60.45698, -70.16364, -65.50791, -66.35648], [-54.417572, -60.216614, -62.661243, -58.621933, -58.298683], ] ) self.assertTrue(np.allclose(transition_scores, expected_scores, atol=1e-3)) def test_transition_scores_greedy_search_normalized(self): model_cls = self.framework_dependent_parameters["AutoModelForCausalLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") articles = ["Justin Timberlake", "Michael Phelps"] tokenizer = AutoTokenizer.from_pretrained("distilgpt2", padding_side="left") tokenizer.pad_token = tokenizer.eos_token model = model_cls.from_pretrained("distilgpt2") input_ids = tokenizer(articles, return_tensors=return_tensors, padding=True).input_ids if is_pt: model = model.to(torch_device) input_ids = input_ids.to(torch_device) outputs = model.generate( input_ids=input_ids, max_new_tokens=5, pad_token_id=tokenizer.eos_token_id, eos_token_id=None, return_dict_in_generate=True, output_scores=True, ) transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores, normalize_logits=True) if is_pt: transition_scores = transition_scores.cpu().numpy() expected_scores = np.array( [ [-2.538938, -2.2694316, -2.1580915, -1.572299, -2.6719835], [-1.8826028, -2.2461371, -1.7556462, -2.9644494, -1.7996008], ] ) self.assertTrue(np.allclose(transition_scores, expected_scores, atol=1e-3)) def test_transition_scores_beam_search_encoder_decoder(self): model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") articles = [ "Justin Timberlake and Jessica Biel, welcome to parenthood.", "Michael Phelps is arguably the most decorated Olympian of all time.", ] tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = model_cls.from_pretrained( "hf-internal-testing/tiny-random-bart", max_length=10, num_beams=4, num_return_sequences=2, eos_token_id=None, return_dict_in_generate=True, output_scores=True, length_penalty=0.0, ) input_ids = tokenizer(articles, return_tensors=return_tensors, padding=True).input_ids if is_pt: model = model.to(torch_device) input_ids = input_ids.to(torch_device) outputs = model.generate(input_ids=input_ids) transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores, outputs.beam_indices) if is_pt: transition_scores = transition_scores.cpu().numpy() outputs.sequences_scores = outputs.sequences_scores.cpu().numpy() self.assertTrue(np.allclose(np.sum(transition_scores, axis=-1), outputs.sequences_scores, atol=1e-3)) def test_transition_scores_beam_search_encoder_decoder_with_eos(self): model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") articles = [ "Justin Timberlake and Jessica Biel, welcome to parenthood.", "Michael Phelps is arguably the most decorated Olympian of all time.", ] tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = model_cls.from_pretrained( "hf-internal-testing/tiny-random-bart", max_length=10, num_beams=4, num_return_sequences=2, return_dict_in_generate=True, output_scores=True, length_penalty=0.0, ) input_ids = tokenizer(articles, return_tensors=return_tensors, padding=True).input_ids if is_pt: model = model.to(torch_device) input_ids = input_ids.to(torch_device) outputs = model.generate(input_ids=input_ids) transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores, outputs.beam_indices) if is_pt: transition_scores = transition_scores.cpu().numpy() outputs.sequences_scores = outputs.sequences_scores.cpu().numpy() self.assertTrue(np.allclose(np.sum(transition_scores, axis=-1), outputs.sequences_scores, atol=1e-3)) def test_transition_scores_beam_search_decoder_only(self): model_cls = self.framework_dependent_parameters["AutoModelForCausalLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") articles = [ "Justin Timberlake", "Michael Phelps", ] tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") tokenizer.pad_token = tokenizer.eos_token model = model_cls.from_pretrained( "hf-internal-testing/tiny-random-gpt2", max_length=10, num_beams=4, num_return_sequences=2, pad_token_id=tokenizer.eos_token_id, eos_token_id=None, return_dict_in_generate=True, output_scores=True, length_penalty=0.0, ) input_ids = tokenizer(articles, return_tensors=return_tensors, padding=True).input_ids if is_pt: model = model.to(torch_device) input_ids = input_ids.to(torch_device) outputs = model.generate(input_ids=input_ids) transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores, outputs.beam_indices) if is_pt: transition_scores = transition_scores.cpu().numpy() outputs.sequences_scores = outputs.sequences_scores.cpu().numpy() self.assertTrue(np.allclose(np.sum(transition_scores, axis=-1), outputs.sequences_scores, atol=1e-3)) def test_transition_scores_beam_sample_encoder_decoder(self): model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") articles = [ "Justin Timberlake and Jessica Biel, welcome to parenthood.", "Michael Phelps is arguably the most decorated Olympian of all time.", ] tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = model_cls.from_pretrained( "hf-internal-testing/tiny-random-bart", do_sample=True, max_length=10, num_beams=4, num_return_sequences=2, eos_token_id=None, return_dict_in_generate=True, output_scores=True, length_penalty=0.0, ) input_ids = tokenizer(articles, return_tensors=return_tensors, padding=True).input_ids if is_pt: model = model.to(torch_device) input_ids = input_ids.to(torch_device) outputs = model.generate(input_ids=input_ids) transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores, outputs.beam_indices) if is_pt: transition_scores = transition_scores.cpu().numpy() outputs.sequences_scores = outputs.sequences_scores.cpu().numpy() self.assertTrue(np.allclose(np.sum(transition_scores, axis=-1), outputs.sequences_scores, atol=1e-3)) @slow def test_transition_scores_early_stopping(self): # This is an aggressive test that makes sure that `beam_search's` # transition scores are computed correctly for varying `num_return_sequences`, `num_beams` and `batch_size > 1` # 2 x input_ids for "question: How are you? \n context: I had a long day, " model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] create_tensor_fn = self.framework_dependent_parameters["create_tensor_fn"] is_pt = not model_cls.__name__.startswith("TF") input_ids = create_tensor_fn(2 * [[822, 10, 571, 33, 25, 58, 2625, 10, 27, 141, 3, 9, 307, 239, 6, 1]]) model = model_cls.from_pretrained("t5-small") if is_pt: model = model.to(torch_device) input_ids = input_ids.to(torch_device) outputs = model.generate( input_ids, max_length=10, return_dict_in_generate=True, output_scores=True, forced_eos_token_id=model.config.eos_token_id, num_beams=4, do_sample=False, num_return_sequences=3, length_penalty=0.0, ) transition_scores = model.compute_transition_scores( sequences=outputs.sequences, scores=outputs.scores, beam_indices=outputs.beam_indices ) if is_pt: transition_scores = transition_scores.cpu().numpy() outputs.sequences_scores = outputs.sequences_scores.cpu().numpy() self.assertTrue(np.allclose(np.sum(transition_scores, axis=-1), outputs.sequences_scores)) def test_encoder_decoder_generate_attention_mask(self): model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") articles = ["Timberlake", "Jessica Biel, welcome to parenthood among other things"] tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") # need extreme generation values here to force this test # to fail when `attention_mask` is not correctly treated in generate model = model_cls.from_pretrained( "hf-internal-testing/tiny-random-bart", max_length=50, num_beams=5, num_return_sequences=5 ) model.config.eos_token_id = None input_ids = tokenizer(articles[0], return_tensors=return_tensors).input_ids input_ids_batched = tokenizer(articles, padding=True, return_tensors=return_tensors).input_ids if is_pt: model = model.to(torch_device) input_ids = input_ids.to(torch_device) input_ids_batched = input_ids_batched.to(torch_device) output_sequences_batched = model.generate( input_ids=input_ids_batched, return_dict_in_generate=True, output_scores=True ) output_sequences = model.generate(input_ids=input_ids, return_dict_in_generate=True, output_scores=True) batched_out = output_sequences_batched.sequences_scores out = output_sequences.sequences_scores if is_pt: batched_out = batched_out.cpu().numpy() out = out.cpu().numpy() diff = np.abs(np.sum(batched_out[:5]) - np.sum(out)) self.assertTrue(diff < 1e-4) def test_generate_input_ids_as_kwarg(self): model_cls = self.framework_dependent_parameters["AutoModelForCausalLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") article = """I need input_ids to generate""" tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = model_cls.from_pretrained("hf-internal-testing/tiny-random-gpt2", max_length=15) input_ids = tokenizer(article, return_tensors=return_tensors).input_ids if is_pt: model = model.to(torch_device) input_ids = input_ids.to(torch_device) output_sequences_kwargs = model.generate(input_ids=input_ids) output_sequences = model.generate(input_ids) if is_pt: output_sequences_kwargs = output_sequences_kwargs.cpu().numpy() output_sequences = output_sequences.cpu().numpy() self.assertTrue(np.array_equal(output_sequences, output_sequences_kwargs)) self.assertEqual(output_sequences.shape, (1, 15)) def test_generate_input_ids_as_encoder_kwarg(self): model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = model_cls.from_pretrained("hf-internal-testing/tiny-random-bart", max_length=5) model.config.eos_token_id = None input_ids = tokenizer(article, return_tensors=return_tensors).input_ids if is_pt: model = model.to(torch_device) input_ids = input_ids.to(torch_device) output_sequences_kwargs = model.generate(input_ids=input_ids) output_sequences = model.generate(input_ids) if is_pt: output_sequences_kwargs = output_sequences_kwargs.cpu().numpy() output_sequences = output_sequences.cpu().numpy() self.assertTrue(np.array_equal(output_sequences, output_sequences_kwargs)) self.assertEqual(output_sequences.shape, (1, 5)) def test_generate_inputs_and_encoder_kwargs(self): model_cls = self.framework_dependent_parameters["AutoModelForCausalLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] article = """I need input_ids to generate""" tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = model_cls.from_pretrained("hf-internal-testing/tiny-random-gpt2", max_length=10) input_ids = tokenizer(article, return_tensors=return_tensors).input_ids with self.assertRaises(ValueError): model.generate(input_ids, input_ids=input_ids) def test_generate_too_many_encoder_kwargs(self): model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] article = """I need input_ids to generate""" tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = model_cls.from_pretrained("hf-internal-testing/tiny-random-bart", max_length=10) input_ids = tokenizer(article, return_tensors=return_tensors).input_ids with self.assertRaises(ValueError): model.generate(input_ids=input_ids, inputs_embeds=input_ids) def test_generate_input_features_as_encoder_kwarg(self): model_cls = self.framework_dependent_parameters["AutoModelForSpeechSeq2Seq"] floats_tensor = self.framework_dependent_parameters["floats_tensor"] is_pt = not model_cls.__name__.startswith("TF") input_features = floats_tensor((3, 80, 60)) model = model_cls.from_pretrained("hf-internal-testing/tiny-random-WhisperForConditionalGeneration") if is_pt: input_features.to(torch_device) model = model.to(torch_device) output_sequences_kwargs = model.generate(input_features=input_features, max_length=5) output_sequences = model.generate(input_features, max_length=5) if is_pt: output_sequences_kwargs = output_sequences_kwargs.cpu().numpy() output_sequences = output_sequences.cpu().numpy() self.assertTrue(np.array_equal(output_sequences, output_sequences_kwargs)) self.assertEqual(output_sequences.shape, (3, 5)) def test_generate_pixel_values_as_encoder_kwarg(self): model_cls = self.framework_dependent_parameters["AutoModelForVision2Seq"] floats_tensor = self.framework_dependent_parameters["floats_tensor"] is_pt = not model_cls.__name__.startswith("TF") pixel_values = floats_tensor((2, 3, 30, 30)) model = model_cls.from_pretrained("hf-internal-testing/tiny-random-VisionEncoderDecoderModel-vit-gpt2") model.config.decoder.eos_token_id = None if is_pt: pixel_values = pixel_values.to(torch_device) model = model.to(torch_device) output_sequences_kwargs = model.generate(pixel_values=pixel_values, max_length=5) output_sequences = model.generate(pixel_values, max_length=5) if is_pt: output_sequences_kwargs = output_sequences_kwargs.cpu().numpy() output_sequences = output_sequences.cpu().numpy() self.assertTrue(np.array_equal(output_sequences, output_sequences_kwargs)) self.assertEqual(output_sequences.shape, (2, 5)) def test_generate_encoder_outputs_attention_mask(self): model_cls = self.framework_dependent_parameters["AutoModelForSpeechSeq2Seq"] floats_tensor = self.framework_dependent_parameters["floats_tensor"] create_tensor_fn = self.framework_dependent_parameters["create_tensor_fn"] is_pt = not model_cls.__name__.startswith("TF") input_features = floats_tensor((3, 80, 60)) attention_mask = create_tensor_fn(np.ones(input_features.shape)) model = model_cls.from_pretrained("hf-internal-testing/tiny-random-WhisperForConditionalGeneration") if is_pt: input_features = input_features.to(torch_device) attention_mask = attention_mask.to(torch_device) model = model.to(torch_device) encoder = model.get_encoder() encoder_outputs = encoder(input_features) output_sequences_no_mask = model.generate(encoder_outputs=encoder_outputs) output_sequences_with_mask = model.generate(encoder_outputs=encoder_outputs, attention_mask=attention_mask) if is_pt: output_sequences_no_mask = output_sequences_no_mask.cpu().numpy() output_sequences_with_mask = output_sequences_with_mask.cpu().numpy() self.assertTrue(np.array_equal(output_sequences_no_mask, output_sequences_with_mask)) def test_eos_token_id_int_and_list_greedy_search(self): model_cls = self.framework_dependent_parameters["AutoModelForCausalLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") generation_kwargs = { "do_sample": False, "num_beams": 1, } expectation = 13 tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") text = """Hello, my dog is cute and""" tokens = tokenizer(text, return_tensors=return_tensors) model = model_cls.from_pretrained("hf-internal-testing/tiny-random-gpt2") if is_pt: model = model.to(torch_device) tokens = tokens.to(torch_device) eos_token_id = 873 generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) self.assertTrue(expectation == len(generated_tokens[0])) eos_token_id = [873, 198] generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) self.assertTrue(expectation == len(generated_tokens[0])) def test_eos_token_id_int_and_list_contrastive_search(self): model_cls = self.framework_dependent_parameters["AutoModelForCausalLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") generation_kwargs = { "do_sample": False, "num_beams": 1, "penalty_alpha": 0.6, "top_k": 4, } expectation = 17 tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") text = """Hello, my dog is cute and""" tokens = tokenizer(text, return_tensors=return_tensors) model = model_cls.from_pretrained("hf-internal-testing/tiny-random-gpt2") if is_pt: model = model.to(torch_device) tokens = tokens.to(torch_device) eos_token_id = 225 generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) self.assertTrue(expectation == len(generated_tokens[0])) eos_token_id = [225, 198] generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) self.assertTrue(expectation == len(generated_tokens[0])) def test_eos_token_id_int_and_list_beam_search(self): model_cls = self.framework_dependent_parameters["AutoModelForCausalLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") generation_kwargs = { "do_sample": False, "num_beams": 3, } if is_pt: expectation = 20 else: # TODO (joao): fix me expectation = 13 tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") text = """Hello, my dog is cute and""" tokens = tokenizer(text, return_tensors=return_tensors) model = model_cls.from_pretrained("hf-internal-testing/tiny-random-gpt2") if is_pt: model = model.to(torch_device) tokens = tokens.to(torch_device) eos_token_id = 873 generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) unpadded_correct_condition = expectation == len(generated_tokens[0]) padded_correct_condition = expectation < len(generated_tokens[0]) and all( token == model.config.pad_token_id for token in generated_tokens[0][expectation:] ) self.assertTrue(unpadded_correct_condition or padded_correct_condition) eos_token_id = [873, 198] generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) unpadded_correct_condition = expectation == len(generated_tokens[0]) padded_correct_condition = expectation < len(generated_tokens[0]) and all( token == model.config.pad_token_id for token in generated_tokens[0][expectation:] ) self.assertTrue(unpadded_correct_condition or padded_correct_condition) def test_generate_vision2text_conditioning(self): model_cls = self.framework_dependent_parameters["AutoModelForVision2Seq"] floats_tensor = self.framework_dependent_parameters["floats_tensor"] create_tensor_fn = self.framework_dependent_parameters["create_tensor_fn"] is_pt = not model_cls.__name__.startswith("TF") pixel_values = floats_tensor((2, 3, 30, 30)) conditioning_input = create_tensor_fn([[10], [10]]) # this should be the 2nd output token, after the BOS token model = model_cls.from_pretrained("hf-internal-testing/tiny-random-VisionEncoderDecoderModel-vit-gpt2") if is_pt: pixel_values = pixel_values.to(torch_device) model = model.to(torch_device) conditioning_input = conditioning_input.to(torch_device) # we can condition on decoder_input_ids (expected decoder input) and input_ids (which we pipe internally as # decoder_input_ids, if the encoder is not a model with text input) output_sequences_decoder_input_ids = model.generate( pixel_values, max_length=5, decoder_input_ids=conditioning_input ) output_sequences_input_ids = model.generate(pixel_values, max_length=5, input_ids=conditioning_input) if is_pt: output_sequences_decoder_input_ids = output_sequences_decoder_input_ids.cpu().numpy() output_sequences_input_ids = output_sequences_input_ids.cpu().numpy() conditioning_input = conditioning_input.cpu().numpy() self.assertTrue(np.array_equal(output_sequences_decoder_input_ids, output_sequences_input_ids)) self.assertTrue(np.array_equal(output_sequences_decoder_input_ids[:, 1:2], conditioning_input))
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/generation/test_logits_process.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from typing import List, Union from parameterized import parameterized from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from torch import nn from transformers.generation import ( EncoderNoRepeatNGramLogitsProcessor, EncoderRepetitionPenaltyLogitsProcessor, EpsilonLogitsWarper, EtaLogitsWarper, ExponentialDecayLengthPenalty, ForcedBOSTokenLogitsProcessor, ForcedEOSTokenLogitsProcessor, HammingDiversityLogitsProcessor, InfNanRemoveLogitsProcessor, LogitNormalization, LogitsProcessorList, MinLengthLogitsProcessor, MinNewTokensLengthLogitsProcessor, NoBadWordsLogitsProcessor, NoRepeatNGramLogitsProcessor, PrefixConstrainedLogitsProcessor, RepetitionPenaltyLogitsProcessor, SequenceBiasLogitsProcessor, TemperatureLogitsWarper, TopKLogitsWarper, TopPLogitsWarper, TypicalLogitsWarper, UnbatchedClassifierFreeGuidanceLogitsProcessor, ) from transformers.generation.logits_process import BarkEosPrioritizerLogitsProcessor @require_torch class LogitsProcessorTest(unittest.TestCase): def _get_uniform_logits(self, batch_size: int, length: int): scores = torch.ones((batch_size, length), device=torch_device, dtype=torch.float) / length return scores def test_min_length_dist_processor(self): vocab_size = 20 batch_size = 4 eos_token_id = 0 min_dist_processor = MinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id) # check that min length is applied at length 5 input_ids = ids_tensor((batch_size, 5), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = min_dist_processor(input_ids, scores) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist(), 4 * [-float("inf")]) # check that min length is not applied anymore at length 15 input_ids = ids_tensor((batch_size, 15), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = min_dist_processor(input_ids, scores) self.assertFalse(torch.isinf(scores_before_min_length).any()) @parameterized.expand([(0,), ([0, 18],)]) def test_new_min_length_dist_processor(self, eos_token_id: Union[int, List[int]]): vocab_size = 20 batch_size = 4 # check that first input is skipped (min new length applying) input_ids = ids_tensor((batch_size, 5), vocab_size=20) new_min_dist_processor = MinNewTokensLengthLogitsProcessor( prompt_length_to_skip=input_ids.shape[-1], min_new_tokens=3, eos_token_id=eos_token_id ) expected_eos_scores_before_min_length = batch_size * [-float("inf")] if isinstance(eos_token_id, list): expected_eos_scores_before_min_length *= len(eos_token_id) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = new_min_dist_processor(input_ids, scores) self.assertListEqual( scores_before_min_length[:, eos_token_id].flatten().tolist(), expected_eos_scores_before_min_length ) # check that, for skipping, now prompt length is 5, after that we expect first 5 tokens will be skipped self.assertTrue(new_min_dist_processor.prompt_length_to_skip == 5) # check that min length is applied at length 2 input_ids = ids_tensor((batch_size, 2), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = new_min_dist_processor(input_ids, scores) self.assertListEqual( scores_before_min_length[:, eos_token_id].flatten().tolist(), expected_eos_scores_before_min_length ) # check that min new length is applied at length 6 (because it has only 1 new token) input_ids = ids_tensor((batch_size, 6), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = new_min_dist_processor(input_ids, scores) self.assertListEqual( scores_before_min_length[:, eos_token_id].flatten().tolist(), expected_eos_scores_before_min_length ) # check that min new length is applied at length 7 (because it has only 2 new tokens) input_ids = ids_tensor((batch_size, 7), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = new_min_dist_processor(input_ids, scores) self.assertListEqual( scores_before_min_length[:, eos_token_id].flatten().tolist(), expected_eos_scores_before_min_length ) # check that min new length is not applied anymore at length 8 input_ids = ids_tensor((batch_size, 8), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = new_min_dist_processor(input_ids, scores) self.assertFalse(torch.isinf(scores_before_min_length).any()) # check that min new length is not applied anymore at length 15 input_ids = ids_tensor((batch_size, 15), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = new_min_dist_processor(input_ids, scores) self.assertFalse(torch.isinf(scores_before_min_length).any()) def test_temperature_dist_warper(self): input_ids = None length = 20 scores = self._get_uniform_logits(batch_size=2, length=length) # tweak scores to not be uniform anymore scores[1, 5] = (1 / length) + 0.1 # peak, 1st batch scores[1, 10] = (1 / length) - 0.4 # valley, 1st batch # compute softmax probs = nn.functional.softmax(scores, dim=-1) temp_dist_warper_sharper = TemperatureLogitsWarper(temperature=0.5) temp_dist_warper_smoother = TemperatureLogitsWarper(temperature=1.3) warped_prob_sharp = nn.functional.softmax(temp_dist_warper_sharper(input_ids, scores.clone()), dim=-1) warped_prob_smooth = nn.functional.softmax(temp_dist_warper_smoother(input_ids, scores.clone()), dim=-1) # uniform distribution stays uniform self.assertTrue(torch.allclose(probs[0, :], warped_prob_sharp[0, :], atol=1e-3)) self.assertTrue(torch.allclose(probs[0, :], warped_prob_smooth[0, :], atol=1e-3)) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max(), warped_prob_sharp[1, :].max()) self.assertGreater(probs[1, :].min(), warped_prob_sharp[1, :].min()) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max(), warped_prob_smooth[1, :].max()) self.assertLess(probs[1, :].min(), warped_prob_smooth[1, :].min()) def test_repetition_penalty_dist_process(self): input_ids = torch.tensor([[0, 1], [5, 0]], device=torch_device, dtype=torch.long) vocab_size = 10 scores = self._get_uniform_logits(batch_size=2, length=vocab_size) # give values special values scores[0, 0] = -(1 / vocab_size) scores[1, 5] = 4 / vocab_size rep_penalty_proc = RepetitionPenaltyLogitsProcessor(penalty=2.0) scores = rep_penalty_proc(input_ids, scores.clone()) # check that values were correctly changed self.assertAlmostEqual(scores[0, 0].item(), -(1 / vocab_size) * 2) self.assertAlmostEqual(scores[0, 1].item(), (1 / vocab_size) / 2) self.assertAlmostEqual(scores[1, 0].item(), (1 / vocab_size) / 2) self.assertAlmostEqual(scores[1, 5].item(), (4 / vocab_size) / 2) def test_encoder_repetition_penalty_dist_process(self): input_ids = torch.tensor([[0, 1], [5, 0]], device=torch_device, dtype=torch.long) vocab_size = 10 scores = self._get_uniform_logits(batch_size=2, length=vocab_size) # give values special values scores[0, 0] = -(1 / vocab_size) scores[1, 5] = 4 / vocab_size rep_penalty_proc = EncoderRepetitionPenaltyLogitsProcessor(penalty=2.0, encoder_input_ids=input_ids) scores = rep_penalty_proc(input_ids, scores.clone()) # check that values were correctly changed self.assertAlmostEqual(scores[0, 0].item(), -(1 / vocab_size) / 2) self.assertAlmostEqual(scores[0, 1].item(), (1 / vocab_size) * 2) self.assertAlmostEqual(scores[1, 0].item(), (1 / vocab_size) * 2) self.assertAlmostEqual(scores[1, 5].item(), (4 / vocab_size) * 2) # check that values not in the encoder ids were NOT changed self.assertAlmostEqual(scores[0, 2].item(), (1 / vocab_size)) self.assertAlmostEqual(scores[1, 2].item(), (1 / vocab_size)) def test_top_k_dist_warper(self): input_ids = None vocab_size = 10 batch_size = 2 # create ramp distribution ramp_logits = ( torch.arange(vocab_size, device=torch_device, dtype=torch.float).unsqueeze(0).repeat(batch_size, 1) ) ramp_logits[1:, : vocab_size // 2] = ramp_logits[1:, : vocab_size // 2] + vocab_size top_k_warp = TopKLogitsWarper(3) scores = top_k_warp(input_ids, ramp_logits) # check that correct tokens are filtered self.assertListEqual(torch.isinf(scores[0]).tolist(), 7 * [True] + 3 * [False]) self.assertListEqual(torch.isinf(scores[1]).tolist(), 2 * [True] + 3 * [False] + 5 * [True]) # check special cases length = 5 logits = self._get_uniform_logits(batch_size=batch_size, length=length) top_k_warp_safety_check = TopKLogitsWarper(top_k=1, filter_value=0.0, min_tokens_to_keep=3) scores = top_k_warp_safety_check(input_ids, logits) # uniform dist is not changed self.assertListEqual((scores == 0.0).to(torch.long).sum(dim=-1).tolist(), [0, 0]) ramp_logits = torch.arange(length, device=torch_device, dtype=torch.float).unsqueeze(0).repeat(batch_size, 1) scores = top_k_warp_safety_check(input_ids, ramp_logits) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).to(torch.long).sum(dim=-1).tolist(), [2, 2]) def test_top_p_dist_warper(self): input_ids = None vocab_size = 10 batch_size = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) dist = torch.log( torch.tensor([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]], device=torch_device, dtype=torch.float) ) top_p_warp = TopPLogitsWarper(0.8) filtered_dist = torch.exp(top_p_warp(input_ids, dist)) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 EXPECTED_FILTERED_DIST = torch.tensor( [[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]], device=torch_device, dtype=torch.float ) self.assertTrue(torch.allclose(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3)) # check edge cases with negative and extreme logits ramp_logits = torch.arange(vocab_size, device=torch_device, dtype=torch.float).unsqueeze(0).repeat( batch_size, 1 ) - (vocab_size // 2) # make ramp_logits more extreme ramp_logits[1] = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept top_p_warp = TopPLogitsWarper(0.9, min_tokens_to_keep=2, filter_value=0.0) filtered_dist = top_p_warp(input_ids, ramp_logits) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).to(torch.long).sum(dim=-1).tolist(), [3, 2]) def test_typical_dist_warper(self): input_ids = None vocab_size = 10 batch_size = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) dist = torch.log( torch.tensor([[0.97, 0.01, 0.01, 0.01], [0.4, 0.2, 0.2, 0.2]], device=torch_device, dtype=torch.float) ) typical_warp = TypicalLogitsWarper(0.5) filtered_dist = torch.exp(typical_warp(input_ids, dist)) # dist should be filtered to keep min num values so that sum is >= 0.7 # exp (-inf) => 0 EXPECTED_FILTERED_DIST = torch.tensor( [[0.97, 0.0, 0.0, 0.0], [0.0, 0.2, 0.2, 0.2]], device=torch_device, dtype=torch.float ) self.assertTrue(torch.allclose(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3)) # check special cases length = 5 logits = self._get_uniform_logits(batch_size=batch_size, length=length) typical_warp_safety_check = TypicalLogitsWarper(mass=0.5, filter_value=0.0, min_tokens_to_keep=3) scores = typical_warp_safety_check(input_ids, logits) # uniform dist is not changed self.assertListEqual((scores == 0.0).to(torch.long).sum(dim=-1).tolist(), [0, 0]) # check edge cases with negative and extreme logits ramp_logits = torch.arange(vocab_size, device=torch_device, dtype=torch.float).unsqueeze(0).repeat( batch_size, 1 ) - (vocab_size // 2) # make ramp_logits more extreme ramp_logits[1] = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept typical_warp = TypicalLogitsWarper(0.7, min_tokens_to_keep=2, filter_value=0.0) filtered_dist = typical_warp(input_ids, ramp_logits) # first batch should keep two tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).to(torch.long).sum(dim=-1).tolist(), [2, 2]) def test_epsilon_dist_warper(self): input_ids = None vocab_size = 10 batch_size = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) dist = torch.log( torch.tensor( [[0.87, 0.099, 0.001, 0.03], [0.4, 0.299, 0.101, 0.2]], device=torch_device, dtype=torch.float ) ) epsilon_warp = EpsilonLogitsWarper(0.1) filtered_dist = torch.exp(epsilon_warp(input_ids, dist)) # dist should be filtered to only keep values with proba >= 0.1 # exp (-inf) => 0 EXPECTED_FILTERED_DIST = torch.tensor( [[0.87, 0, 0, 0], [0.4, 0.299, 0.101, 0.2]], device=torch_device, dtype=torch.float ) self.assertTrue(torch.allclose(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3)) # check edge cases with negative and extreme logits ramp_logits = torch.arange(vocab_size, device=torch_device, dtype=torch.float).unsqueeze(0).repeat( batch_size, 1 ) - (vocab_size // 2) # make ramp_logits more extreme ramp_logits[1] = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept epsilon_warp = EpsilonLogitsWarper(5e-2, min_tokens_to_keep=2, filter_value=0.0) filtered_dist = epsilon_warp(input_ids, ramp_logits) # first batch should keep 3 tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).to(torch.long).sum(dim=-1).tolist(), [3, 2]) def test_eta_dist_warper(self): input_ids = None vocab_size = 10 batch_size = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) dist = torch.log( torch.tensor([[0.0, 0.1, 0.8, 0.1], [0.01, 0.04, 0.9, 0.05]], device=torch_device, dtype=torch.float) ) eta_warp = EtaLogitsWarper(0.0625) filtered_dist = torch.exp(eta_warp(input_ids, dist)) # dist should be filtered to only keep values with proba >= min(0.0625, sqrt(0.0625) * e^-H(p)) # min(0.0625, 0.1320) is the cutoff for the first row and min(0.0625, 0.1644) is for the second # where H is the entropy function and p is the probability vector. # exp (-inf) => 0 EXPECTED_FILTERED_DIST = torch.tensor( [[0.0, 0.1, 0.8, 0.1], [0.0, 0.0, 0.9, 0.0]], device=torch_device, dtype=torch.float ) self.assertTrue(torch.allclose(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3)) # check edge cases with negative and extreme logits ramp_logits = torch.arange(vocab_size, device=torch_device, dtype=torch.float).unsqueeze(0).repeat( batch_size, 1 ) - (vocab_size // 2) # make ramp_logits more extreme ramp_logits[1] = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept eta_warp = EtaLogitsWarper(0.1, min_tokens_to_keep=2, filter_value=0.0) filtered_dist = eta_warp(input_ids, ramp_logits) # first batch should keep 2 tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).to(torch.long).sum(dim=-1).tolist(), [2, 2]) def test_no_repeat_ngram_dist_processor(self): vocab_size = 3 batch_size = 2 input_ids = torch.tensor([[1, 1, 2, 1], [0, 1, 0, 1]], device=torch_device, dtype=torch.long) scores = self._get_uniform_logits(batch_size, vocab_size) no_repeat_proc_2_gram = NoRepeatNGramLogitsProcessor(2) no_repeat_proc_3_gram = NoRepeatNGramLogitsProcessor(3) filtered_scores_2_gram = no_repeat_proc_2_gram(input_ids, scores.clone()) filtered_scores_3_gram = no_repeat_proc_3_gram(input_ids, scores.clone()) # 2-gram would forbid 2nd and 3rd token (1,2) at 1st batch and 1st token (0) at 2nd batch self.assertListEqual(torch.isinf(filtered_scores_2_gram).tolist(), [[False, True, True], [True, False, False]]) # 3-gram would forbid no token at 1st batch and 1st token (0) at 2nd batch self.assertListEqual( torch.isinf(filtered_scores_3_gram).tolist(), [[False, False, False], [True, False, False]] ) def test_encoder_no_repeat_ngram_dist_processor(self): vocab_size = 3 num_beams = 2 batch_size = 1 encoder_input_ids = torch.tensor([1, 2, 1, 1], device=torch_device, dtype=torch.long) input_ids = torch.tensor([[1, 2, 1], [8, 0, 2]], device=torch_device, dtype=torch.long) scores = self._get_uniform_logits(batch_size * num_beams, vocab_size) no_repeat_proc_2_gram = EncoderNoRepeatNGramLogitsProcessor(2, encoder_input_ids=encoder_input_ids) no_repeat_proc_3_gram = EncoderNoRepeatNGramLogitsProcessor(3, encoder_input_ids=encoder_input_ids) filtered_scores_2_gram = no_repeat_proc_2_gram(input_ids, scores.clone()) filtered_scores_3_gram = no_repeat_proc_3_gram(input_ids, scores.clone()) # 2-gram would forbid 1st and 2nd token at 1st beam and 1st token (0) at 2nd beam self.assertListEqual(torch.isinf(filtered_scores_2_gram).tolist(), [[False, True, True], [False, True, False]]) # 3-gram would forbid 1st token at 1st beam and no token at 2nd beam self.assertListEqual( torch.isinf(filtered_scores_3_gram).tolist(), [[False, True, False], [False, False, False]] ) # Batched input vocab_size = 3 num_beams = 2 batch_size = 2 encoder_input_ids = torch.tensor([[1, 2, 1, 1], [0, 0, 2, 1]], device=torch_device, dtype=torch.long) input_ids = torch.tensor([[1, 2, 1], [1, 0, 2], [0, 0, 0], [0, 2, 2]], device=torch_device, dtype=torch.long) scores = self._get_uniform_logits(batch_size * num_beams, vocab_size) no_repeat_proc_2_gram = EncoderNoRepeatNGramLogitsProcessor(2, encoder_input_ids=encoder_input_ids) no_repeat_proc_3_gram = EncoderNoRepeatNGramLogitsProcessor(3, encoder_input_ids=encoder_input_ids) filtered_scores_2_gram = no_repeat_proc_2_gram(input_ids, scores.clone()) filtered_scores_3_gram = no_repeat_proc_3_gram(input_ids, scores.clone()) # 2gram # Batch 1 # - Beam 1: tokens (1, 2) forbidden # - Beam 2: tokens (1) forbidden # Batch 2 # - Beam 1: tokens (0, 2) forbidden # - Beam 2: tokens (1) forbidden self.assertListEqual( torch.isinf(filtered_scores_2_gram).tolist(), [[False, True, True], [False, True, False], [True, False, True], [False, True, False]], ) # Batch 1 # - Beam 1: tokens (1) forbidden # - Beam 2: tokens () forbidden # Batch 2 # - Beam 1: tokens (2) forbidden # - Beam 2: tokens () forbidden self.assertListEqual( torch.isinf(filtered_scores_3_gram).tolist(), [[False, True, False], [False, False, False], [False, False, True], [False, False, False]], ) def test_no_bad_words_dist_processor(self): vocab_size = 5 batch_size = 2 eos_token_id = 4 input_ids = torch.tensor([[0, 1, 3, 1], [0, 1, 0, 1]], device=torch_device, dtype=torch.long) bad_word_tokens = [[1], [4], [1, 0], [0, 1, 2], [1, 3, 1, 3]] scores = self._get_uniform_logits(batch_size, vocab_size) no_bad_words_dist_proc = NoBadWordsLogitsProcessor(bad_words_ids=bad_word_tokens, eos_token_id=eos_token_id) filtered_scores = no_bad_words_dist_proc(input_ids, scores.clone()) # batch 1: 1st, 2nd, and 4th (0, 1, 3) token are forbidden # batch 2: 1st, 2nd, and 3rd (0, 1, 2) token are forbidden # Note that 5th element cannot be forbidden as it is EOS token self.assertListEqual( torch.isinf(filtered_scores).tolist(), [[True, True, False, True, False], [True, True, True, False, False]] ) # check edge case no_bad_words_dist_proc = NoBadWordsLogitsProcessor(bad_words_ids=[[4]], eos_token_id=eos_token_id) filtered_scores = no_bad_words_dist_proc(input_ids, scores.clone()) self.assertTrue(torch.allclose(scores, filtered_scores, atol=1e-3)) def test_bias_dist_processor(self): vocab_size = 5 batch_size = 2 input_ids = torch.tensor([[0, 1, 3, 1], [0, 1, 0, 1]], device=torch_device, dtype=torch.long) positive_bias = {(1,): 100.0, (4,): 100.0} negative_bias = {(1, 0): -100.0, (0, 1, 2): -100.0, (1, 3, 1, 3): -100.0} # biases the same termination twice, to ensure we can handle overlapping terminations (it won't have an effect # on the test cases, though) negative_bias.update({(1, 3, 1, 3, 1, 3): -100.0}) sequence_bias = {**positive_bias, **negative_bias} # scores = 0 to facilitate checks scores = torch.zeros((batch_size, vocab_size), dtype=torch.float, device=torch_device) bias_dist_proc = SequenceBiasLogitsProcessor(sequence_bias=sequence_bias) filtered_scores = bias_dist_proc(input_ids, scores.clone()) # batch 1: positive bias: tokens (1, 4); negative bias: tokens (0, 3); neutral: tokens (2) # batch 2: positive bias: tokens (1, 4); negative bias: tokens (0, 2); neutral: tokens (3) self.assertListEqual( filtered_scores.tolist(), [[-100.0, 100.0, 0.0, -100.0, 100.0], [-100.0, 100.0, -100.0, 0.0, 100.0]] ) def test_processor_list(self): batch_size = 4 sequence_length = 10 vocab_size = 15 eos_token_id = 0 # dummy input_ids and scores input_ids = ids_tensor((batch_size, sequence_length), vocab_size) input_ids_comp = input_ids.clone() scores = self._get_uniform_logits(batch_size, vocab_size) scores_comp = scores.clone() # instantiate all dist processors min_dist_proc = MinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id) temp_dist_warp = TemperatureLogitsWarper(temperature=0.5) rep_penalty_proc = RepetitionPenaltyLogitsProcessor(penalty=2.0) top_k_warp = TopKLogitsWarper(3) top_p_warp = TopPLogitsWarper(0.8) no_repeat_proc = NoRepeatNGramLogitsProcessor(2) no_bad_words_dist_proc = NoBadWordsLogitsProcessor(bad_words_ids=[[1]], eos_token_id=eos_token_id) # no processor list scores = min_dist_proc(input_ids, scores) scores = temp_dist_warp(input_ids, scores) scores = rep_penalty_proc(input_ids, scores) scores = top_k_warp(input_ids, scores) scores = top_p_warp(input_ids, scores) scores = no_repeat_proc(input_ids, scores) scores = no_bad_words_dist_proc(input_ids, scores) # with processor list processor = LogitsProcessorList( [ min_dist_proc, temp_dist_warp, rep_penalty_proc, top_k_warp, top_p_warp, no_repeat_proc, no_bad_words_dist_proc, ] ) scores_comp = processor(input_ids, scores_comp) # scores should be equal self.assertTrue(torch.allclose(scores, scores_comp, atol=1e-3)) # input_ids should never be changed self.assertListEqual(input_ids.tolist(), input_ids_comp.tolist()) def test_prefix_constrained_logits_processor(self): vocab_size = 5 batch_size = 2 input_ids = torch.tensor([[0, 1, 3, 1], [0, 1, 0, 1]], device=torch_device, dtype=torch.long) scores = self._get_uniform_logits(batch_size, vocab_size) def prefix_allowed_tokens_fn(batch_id, inputs_ids): return [[0, 1], [2, 3]][batch_id] prefix_constrained_logits_proc = PrefixConstrainedLogitsProcessor(prefix_allowed_tokens_fn, 1) filtered_scores = prefix_constrained_logits_proc(input_ids, scores.clone()) # batch 1: 1st, 2nd (0, 1) token are allowed # batch 2: 3rd, 4th (2, 3) token are allowed self.assertListEqual( torch.isinf(filtered_scores).tolist(), [[False, False, True, True, True], [True, True, False, False, True]] ) def test_hamming_diversity(self): vocab_size = 4 num_beams = 2 num_beam_groups = 2 scores = self._get_uniform_logits(num_beams, vocab_size) # batch_idx = 0 -> index batch_idx * num_beam_groups -> idx = 0 * 2 = 0 -> penalises tokens 1 # batch_idx = 1 -> index batch_idx * num_beam_groups -> idx = 1 * 2 = 2 -> penalises tokens 1 current_tokens = torch.tensor([0, 3, 1, 2], device=torch_device, dtype=torch.long) diversity_logits_processor = HammingDiversityLogitsProcessor( diversity_penalty=1.0, num_beams=num_beams, num_beam_groups=num_beam_groups ) processed_scores = diversity_logits_processor(None, scores, current_tokens, 1) self.assertTrue( torch.allclose( processed_scores[0], torch.tensor([-0.7500, 0.2500, 0.2500, 0.2500], device=torch_device), atol=1e-3 ) ) self.assertTrue( torch.allclose( processed_scores[1], torch.tensor([0.2500, -0.7500, 0.2500, 0.2500], device=torch_device), atol=1e-3 ) ) def test_forced_bos_token_logits_processor(self): vocab_size = 20 batch_size = 4 bos_token_id = 0 logits_processor = ForcedBOSTokenLogitsProcessor(bos_token_id=bos_token_id) # check that all scores are -inf except the bos_token_id score input_ids = ids_tensor((batch_size, 1), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores) self.assertTrue(torch.isneginf(scores[:, bos_token_id + 1 :]).all()) self.assertListEqual(scores[:, bos_token_id].tolist(), 4 * [0]) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 input_ids = ids_tensor((batch_size, 4), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores) self.assertFalse(torch.isinf(scores).any()) def test_forced_eos_token_logits_processor(self): vocab_size = 20 batch_size = 4 eos_token_id = 0 max_length = 5 logits_processor = ForcedEOSTokenLogitsProcessor(max_length=max_length, eos_token_id=eos_token_id) # check that all scores are -inf except the eos_token_id when max_length-1 is reached input_ids = ids_tensor((batch_size, 4), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores) self.assertTrue(torch.isneginf(scores[:, eos_token_id + 1 :]).all()) self.assertListEqual(scores[:, eos_token_id].tolist(), 4 * [0]) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length-1 is not reached input_ids = ids_tensor((batch_size, 3), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores) self.assertFalse(torch.isinf(scores).any()) def test_remove_nan_inf_logits_processor(self): scores = torch.tensor( [[0.0, 0.7, 0.8, float("nan")], [0.1, float("inf"), 0.3, float("-inf")]], device=torch_device ) input_ids = ids_tensor((2, 4), vocab_size=20) logits_processor = InfNanRemoveLogitsProcessor() scores = logits_processor(input_ids, scores) self.assertTrue( torch.allclose( scores, torch.tensor( [[0.0, 0.7, 0.8, 0.0], [0.1, torch.finfo(scores.dtype).max, 0.3, torch.finfo(scores.dtype).min]], device=torch_device, ), atol=1e-6, ) ) def test_exponential_decay_length_penalty(self): vocab_size = 20 batch_size = 4 eos_token_id = 0 penalty_start = 5 penalty_factor = 1.1 input_ids = ids_tensor((batch_size, 2), vocab_size=vocab_size) input_ids_seq_length = input_ids.shape[-1] length_decay_processor = ExponentialDecayLengthPenalty( exponential_decay_length_penalty=(penalty_start, penalty_factor), eos_token_id=eos_token_id, input_ids_seq_length=input_ids_seq_length, ) # check that penalty is not applied before start scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_start = torch.clone(scores) # clone scores as precessor updates them inplace scores_before_start = length_decay_processor(input_ids, scores_before_start) self.assertListEqual(scores_before_start[:, eos_token_id].tolist(), scores[:, eos_token_id].tolist()) # check that penalty is applied after start input_ids = ids_tensor((batch_size, 20), vocab_size=vocab_size) scores = self._get_uniform_logits(batch_size, vocab_size) scores_after_start = torch.clone(scores) # clone scores as precessor updates them inplace scores_after_start = length_decay_processor(input_ids, scores_after_start) self.assertTrue(torch.gt(scores_after_start[:, eos_token_id], scores[:, eos_token_id]).all()) # check the penalty increases negative scores input_ids = ids_tensor((batch_size, 20), vocab_size=vocab_size) scores = torch.neg(self._get_uniform_logits(batch_size, vocab_size)) scores_after_start = torch.clone(scores) # clone scores as precessor updates them inplace scores_after_start = length_decay_processor(input_ids, scores_after_start) self.assertTrue(torch.gt(scores_after_start[:, eos_token_id], scores[:, eos_token_id]).all()) def test_normalization(self): input_ids = None scores = torch.tensor( [[-23.18, -29.96, -43.54, 47.77], [-33.58, -26.87, -32.96, 22.51]], device=torch_device, dtype=torch.float ) logit_normalization = LogitNormalization() normalized_scores = logit_normalization(input_ids, scores).exp() ones = torch.ones(scores.shape[0], device=torch_device, dtype=torch.float) self.assertTrue(normalized_scores.sum(dim=-1).allclose(ones)) self.assertTrue(normalized_scores.allclose(scores.softmax(dim=-1))) def test_classifier_free_guidance(self): class Namespace(dict): pass logits_uncond = torch.tensor([[[1.0, 0, 1.5]]]) logits_cond = torch.tensor([[[1.0, 1.0, 1.0]]]) def dummy_model(input_ids, attention_mask, use_cache=True, past_key_values=None): out = Namespace() out.logits = logits_uncond out.past_key_values = None return out def lsm(x): return torch.nn.functional.log_softmax(x, dim=-1) # explicit unconditional prompt + attention mask input_ids = torch.LongTensor([[0]]) cfg = UnbatchedClassifierFreeGuidanceLogitsProcessor( 1.5, dummy_model, input_ids, torch.ones_like(input_ids, dtype=torch.long) ) out = cfg(input_ids, logits_cond)[0, -1] res = (lsm(logits_uncond) + 1.5 * (lsm(logits_cond) - lsm(logits_uncond)))[0, -1] self.assertAlmostEqual(out[0].item(), res[0].item()) self.assertAlmostEqual(out[1].item(), res[1].item()) self.assertAlmostEqual(out[2].item(), res[2].item()) # explicit unconditional prompt input_ids = torch.LongTensor([[0]]) cfg = UnbatchedClassifierFreeGuidanceLogitsProcessor(1.5, dummy_model, input_ids) out = cfg(input_ids, logits_cond)[0, -1] res = (lsm(logits_uncond) + 1.5 * (lsm(logits_cond) - lsm(logits_uncond)))[0, -1] self.assertAlmostEqual(out[0].item(), res[0].item()) self.assertAlmostEqual(out[1].item(), res[1].item()) self.assertAlmostEqual(out[2].item(), res[2].item()) # all implicit input_ids = torch.LongTensor([[0]]) cfg = UnbatchedClassifierFreeGuidanceLogitsProcessor(1.5, dummy_model) out = cfg(input_ids, logits_cond)[0, -1] res = (lsm(logits_uncond) + 1.5 * (lsm(logits_cond) - lsm(logits_uncond)))[0, -1] self.assertAlmostEqual(out[0].item(), res[0].item()) self.assertAlmostEqual(out[1].item(), res[1].item()) self.assertAlmostEqual(out[2].item(), res[2].item()) def test_early_stop_processor(self): input_ids = None eos_token_id = 2 min_eos_p = 0.1 ## some small float scores = self._get_uniform_logits(2, 4) scores[0][eos_token_id] = -6 ## less than log(min_eos_p) esp = BarkEosPrioritizerLogitsProcessor(eos_token_id=eos_token_id, min_eos_p=min_eos_p) actual_scores = esp(input_ids, scores) expected_scores_list = [ scores[0].tolist(), [float("-inf"), float("-inf"), scores[0][0], float("-inf")], ] self.assertListEqual(actual_scores.tolist(), expected_scores_list)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/generation/test_configuration_utils.py
# coding=utf-8 # Copyright 2022 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import os import tempfile import unittest import warnings from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class GenerationConfigTest(unittest.TestCase): @parameterized.expand([(None,), ("foo.json",)]) def test_save_load_config(self, config_name): config = GenerationConfig( do_sample=True, temperature=0.7, length_penalty=1.0, bad_words_ids=[[1, 2, 3], [4, 5]], ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(tmp_dir, config_name=config_name) loaded_config = GenerationConfig.from_pretrained(tmp_dir, config_name=config_name) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample, True) self.assertEqual(loaded_config.temperature, 0.7) self.assertEqual(loaded_config.length_penalty, 1.0) self.assertEqual(loaded_config.bad_words_ids, [[1, 2, 3], [4, 5]]) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k, 50) self.assertEqual(loaded_config.max_length, 20) self.assertEqual(loaded_config.max_time, None) def test_from_model_config(self): model_config = AutoConfig.from_pretrained("gpt2") generation_config_from_model = GenerationConfig.from_model_config(model_config) default_generation_config = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(generation_config_from_model, default_generation_config) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id, default_generation_config.eos_token_id) self.assertEqual(generation_config_from_model.eos_token_id, model_config.eos_token_id) def test_update(self): generation_config = GenerationConfig() update_kwargs = { "max_new_tokens": 1024, "foo": "bar", } update_kwargs_copy = copy.deepcopy(update_kwargs) unused_kwargs = generation_config.update(**update_kwargs) # update_kwargs was not modified (no side effects) self.assertEqual(update_kwargs, update_kwargs_copy) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens, 1024) # `.update()` returns a dictionary of unused kwargs self.assertEqual(unused_kwargs, {"foo": "bar"}) def test_initialize_new_kwargs(self): generation_config = GenerationConfig() generation_config.foo = "bar" with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo, "bar") generation_config = GenerationConfig.from_model_config(new_config) assert not hasattr(generation_config, "foo") # no new kwargs should be initialized if from config def test_kwarg_init(self): """Tests that we can overwrite attributes at `from_pretrained` time.""" default_config = GenerationConfig() self.assertEqual(default_config.temperature, 1.0) self.assertEqual(default_config.do_sample, False) self.assertEqual(default_config.num_beams, 1) config = GenerationConfig( do_sample=True, temperature=0.7, length_penalty=1.0, bad_words_ids=[[1, 2, 3], [4, 5]], ) self.assertEqual(config.temperature, 0.7) self.assertEqual(config.do_sample, True) self.assertEqual(config.num_beams, 1) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(tmp_dir) loaded_config = GenerationConfig.from_pretrained(tmp_dir, temperature=1.0) self.assertEqual(loaded_config.temperature, 1.0) self.assertEqual(loaded_config.do_sample, True) self.assertEqual(loaded_config.num_beams, 1) # default value def test_refuse_to_save(self): """Tests that we refuse to save a generation config that fails validation.""" # setting the temperature alone is invalid, as we also need to set do_sample to True -> throws a warning that # is caught, doesn't save, and raises a warning config = GenerationConfig() config.temperature = 0.5 with tempfile.TemporaryDirectory() as tmp_dir: with warnings.catch_warnings(record=True) as captured_warnings: config.save_pretrained(tmp_dir) self.assertEqual(len(captured_warnings), 1) self.assertTrue("Fix these issues to save the configuration." in str(captured_warnings[0].message)) self.assertTrue(len(os.listdir(tmp_dir)) == 0) # greedy decoding throws an exception if we try to return multiple sequences -> throws an exception that is # caught, doesn't save, and raises a warning config = GenerationConfig() config.num_return_sequences = 2 with tempfile.TemporaryDirectory() as tmp_dir: with warnings.catch_warnings(record=True) as captured_warnings: config.save_pretrained(tmp_dir) self.assertEqual(len(captured_warnings), 1) self.assertTrue("Fix these issues to save the configuration." in str(captured_warnings[0].message)) self.assertTrue(len(os.listdir(tmp_dir)) == 0) # final check: no warnings thrown if it is correct, and file is saved config = GenerationConfig() with tempfile.TemporaryDirectory() as tmp_dir: with warnings.catch_warnings(record=True) as captured_warnings: config.save_pretrained(tmp_dir) self.assertEqual(len(captured_warnings), 0) self.assertTrue(len(os.listdir(tmp_dir)) == 1) @is_staging_test class ConfigPushToHubTester(unittest.TestCase): @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @classmethod def tearDownClass(cls): try: delete_repo(token=cls._token, repo_id="test-generation-config") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-generation-config-org") except HTTPError: pass def test_push_to_hub(self): config = GenerationConfig( do_sample=True, temperature=0.7, length_penalty=1.0, ) config.push_to_hub("test-generation-config", token=self._token) new_config = GenerationConfig.from_pretrained(f"{USER}/test-generation-config") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k)) # Reset repo delete_repo(token=self._token, repo_id="test-generation-config") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(tmp_dir, repo_id="test-generation-config", push_to_hub=True, token=self._token) new_config = GenerationConfig.from_pretrained(f"{USER}/test-generation-config") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k)) def test_push_to_hub_in_organization(self): config = GenerationConfig( do_sample=True, temperature=0.7, length_penalty=1.0, ) config.push_to_hub("valid_org/test-generation-config-org", token=self._token) new_config = GenerationConfig.from_pretrained("valid_org/test-generation-config-org") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k)) # Reset repo delete_repo(token=self._token, repo_id="valid_org/test-generation-config-org") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( tmp_dir, repo_id="valid_org/test-generation-config-org", push_to_hub=True, token=self._token ) new_config = GenerationConfig.from_pretrained("valid_org/test-generation-config-org") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k))
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/generation/test_streamers.py
# coding=utf-8 # Copyright 2023 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class StreamerTester(unittest.TestCase): def test_text_streamer_matches_non_streaming(self): tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) model.config.eos_token_id = -1 input_ids = ids_tensor((1, 5), vocab_size=model.config.vocab_size).to(torch_device) greedy_ids = model.generate(input_ids, max_new_tokens=10, do_sample=False) greedy_text = tokenizer.decode(greedy_ids[0]) with CaptureStdout() as cs: streamer = TextStreamer(tokenizer) model.generate(input_ids, max_new_tokens=10, do_sample=False, streamer=streamer) # The greedy text should be printed to stdout, except for the final "\n" in the streamer streamer_text = cs.out[:-1] self.assertEqual(streamer_text, greedy_text) def test_iterator_streamer_matches_non_streaming(self): tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) model.config.eos_token_id = -1 input_ids = ids_tensor((1, 5), vocab_size=model.config.vocab_size).to(torch_device) greedy_ids = model.generate(input_ids, max_new_tokens=10, do_sample=False) greedy_text = tokenizer.decode(greedy_ids[0]) streamer = TextIteratorStreamer(tokenizer) generation_kwargs = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} thread = Thread(target=model.generate, kwargs=generation_kwargs) thread.start() streamer_text = "" for new_text in streamer: streamer_text += new_text self.assertEqual(streamer_text, greedy_text) def test_text_streamer_skip_prompt(self): tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) model.config.eos_token_id = -1 input_ids = ids_tensor((1, 5), vocab_size=model.config.vocab_size).to(torch_device) greedy_ids = model.generate(input_ids, max_new_tokens=10, do_sample=False) new_greedy_ids = greedy_ids[:, input_ids.shape[1] :] new_greedy_text = tokenizer.decode(new_greedy_ids[0]) with CaptureStdout() as cs: streamer = TextStreamer(tokenizer, skip_prompt=True) model.generate(input_ids, max_new_tokens=10, do_sample=False, streamer=streamer) # The greedy text should be printed to stdout, except for the final "\n" in the streamer streamer_text = cs.out[:-1] self.assertEqual(streamer_text, new_greedy_text) def test_text_streamer_decode_kwargs(self): # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them tokenizer = AutoTokenizer.from_pretrained("distilgpt2") model = AutoModelForCausalLM.from_pretrained("distilgpt2").to(torch_device) model.config.eos_token_id = -1 input_ids = torch.ones((1, 5), device=torch_device).long() * model.config.bos_token_id with CaptureStdout() as cs: streamer = TextStreamer(tokenizer, skip_special_tokens=True) model.generate(input_ids, max_new_tokens=1, do_sample=False, streamer=streamer) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token streamer_text = cs.out[:-1] # Remove the final "\n" streamer_text_tokenized = tokenizer(streamer_text, return_tensors="pt") self.assertEqual(streamer_text_tokenized.input_ids.shape, (1, 1)) def test_iterator_streamer_timeout(self): tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) model.config.eos_token_id = -1 input_ids = ids_tensor((1, 5), vocab_size=model.config.vocab_size).to(torch_device) streamer = TextIteratorStreamer(tokenizer, timeout=0.001) generation_kwargs = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} thread = Thread(target=model.generate, kwargs=generation_kwargs) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(Empty): streamer_text = "" for new_text in streamer: streamer_text += new_text
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/generation/test_utils.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import tempfile import unittest import warnings import numpy as np from transformers import is_torch_available, pipeline from transformers.testing_utils import ( is_flaky, require_accelerate, require_torch, require_torch_multi_accelerator, slow, torch_device, ) from ..test_modeling_common import floats_tensor, ids_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_torch_available(): import torch from transformers import ( AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoModelForSpeechSeq2Seq, AutoModelForVision2Seq, AutoTokenizer, BartForCausalLM, BartForConditionalGeneration, BartTokenizer, GPT2LMHeadModel, GPT2Tokenizer, ImageGPTForCausalImageModeling, SpeechEncoderDecoderModel, top_k_top_p_filtering, ) from transformers.generation import ( BeamSampleDecoderOnlyOutput, BeamSampleEncoderDecoderOutput, BeamSearchDecoderOnlyOutput, BeamSearchEncoderDecoderOutput, BeamSearchScorer, ConstrainedBeamSearchScorer, DisjunctiveConstraint, ForcedBOSTokenLogitsProcessor, ForcedEOSTokenLogitsProcessor, GreedySearchDecoderOnlyOutput, GreedySearchEncoderDecoderOutput, HammingDiversityLogitsProcessor, InfNanRemoveLogitsProcessor, LogitsProcessorList, MaxLengthCriteria, MinLengthLogitsProcessor, NoBadWordsLogitsProcessor, NoRepeatNGramLogitsProcessor, PhrasalConstraint, RepetitionPenaltyLogitsProcessor, SampleDecoderOnlyOutput, SampleEncoderDecoderOutput, StoppingCriteria, StoppingCriteriaList, TemperatureLogitsWarper, TopKLogitsWarper, TopPLogitsWarper, ) class GenerationTesterMixin: model_tester = None all_generative_model_classes = () input_name = "input_ids" def _get_input_ids_and_config(self, batch_size=2): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = inputs_dict[self.input_name] # cut to half length & take max batch_size 3 sequence_length = input_ids.shape[-1] // 2 input_ids = input_ids[:batch_size, :sequence_length] # generate max 3 tokens max_length = input_ids.shape[-1] + 3 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` if isinstance(config.eos_token_id, int): config.eos_token_id = [config.eos_token_id] config.pad_token_id = config.eos_token_id[0] attention_mask = torch.ones_like(input_ids, dtype=torch.long)[:batch_size, :sequence_length] return config, input_ids, attention_mask, max_length @staticmethod def _get_logits_processor_and_kwargs( input_length, eos_token_id, forced_bos_token_id=None, forced_eos_token_id=None, max_length=None, diversity_penalty=None, ): process_kwargs = { "min_length": input_length + 1 if max_length is None else max_length - 1, "bad_words_ids": [[1, 0]], "repetition_penalty": 1.2, "remove_invalid_values": True, } # NoRepeatNGramLogitsProcessor + forced tokens may result in no valid continuations if forced_bos_token_id is None and forced_eos_token_id is None: process_kwargs["no_repeat_ngram_size"] = 2 # NOTE: the order of operations here should match `generate` for accurate testing logits_processor = LogitsProcessorList( ( [ HammingDiversityLogitsProcessor(diversity_penalty, num_beams=2, num_beam_groups=2), ] if diversity_penalty is not None else [] ) + ( [ MinLengthLogitsProcessor(process_kwargs["min_length"], eos_token_id), ] if eos_token_id is not None else [] ) + ( [ ForcedBOSTokenLogitsProcessor(forced_bos_token_id), ] if forced_bos_token_id is not None else [] ) + ( [ForcedEOSTokenLogitsProcessor(max_length, forced_eos_token_id)] if forced_eos_token_id is not None else [] ) + [NoBadWordsLogitsProcessor(process_kwargs["bad_words_ids"], eos_token_id)] + ( [NoRepeatNGramLogitsProcessor(process_kwargs["no_repeat_ngram_size"])] if forced_bos_token_id is None and forced_eos_token_id is None else [] ) + [RepetitionPenaltyLogitsProcessor(process_kwargs["repetition_penalty"])] + [InfNanRemoveLogitsProcessor()] # prevent flaky generation test failures ) return process_kwargs, logits_processor @staticmethod def _get_warper_and_kwargs(num_beams): warp_kwargs = {"top_k": 10, "top_p": 0.7, "temperature": 0.7} logits_warper = LogitsProcessorList( [ TemperatureLogitsWarper(warp_kwargs["temperature"]), TopKLogitsWarper(top_k=warp_kwargs["top_k"], min_tokens_to_keep=(2 if num_beams > 1 else 1)), TopPLogitsWarper(top_p=warp_kwargs["top_p"], min_tokens_to_keep=(2 if num_beams > 1 else 1)), ] ) return warp_kwargs, logits_warper @staticmethod def _get_beam_scorer_and_kwargs(batch_size, max_length, num_return_sequences=1): beam_kwargs = { "early_stopping": False, "length_penalty": 2.0, "num_beams": 2, "num_return_sequences": num_return_sequences, } beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=beam_kwargs["num_beams"], device=torch_device, length_penalty=beam_kwargs["length_penalty"], do_early_stopping=beam_kwargs["early_stopping"], num_beam_hyps_to_keep=num_return_sequences, ) return beam_kwargs, beam_scorer @staticmethod def _get_diverse_beam_scorer_and_kwargs(batch_size, max_length, num_return_sequences=1): beam_kwargs = { "early_stopping": False, "length_penalty": 2.0, "num_beams": 2, "num_return_sequences": num_return_sequences, "num_beam_groups": 2, # one beam per group "diversity_penalty": 2.0, } beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=beam_kwargs["num_beams"], device=torch_device, length_penalty=beam_kwargs["length_penalty"], do_early_stopping=beam_kwargs["early_stopping"], num_beam_hyps_to_keep=num_return_sequences, num_beam_groups=beam_kwargs["num_beam_groups"], ) return beam_kwargs, beam_scorer @staticmethod def _get_constrained_beam_scorer_and_kwargs(batch_size, max_length, constraints, num_return_sequences=1): beam_kwargs = { "early_stopping": False, "length_penalty": 2.0, "num_beams": num_return_sequences * 4, "num_return_sequences": num_return_sequences, } beam_scorer = ConstrainedBeamSearchScorer( batch_size=batch_size, constraints=constraints, num_beams=beam_kwargs["num_beams"], device=torch_device, length_penalty=beam_kwargs["length_penalty"], do_early_stopping=beam_kwargs["early_stopping"], num_beam_hyps_to_keep=num_return_sequences, ) return beam_kwargs, beam_scorer @staticmethod def _get_encoder_outputs( model, input_ids, attention_mask, output_attentions=None, output_hidden_states=None, num_interleave=1 ): encoder = model.get_encoder() encoder_outputs = encoder( input_ids, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.repeat_interleave( num_interleave, dim=0 ) input_ids = torch.zeros_like(input_ids[:, :1]) + model._get_decoder_start_token_id() attention_mask = None return encoder_outputs, input_ids, attention_mask def _greedy_generate( self, model, input_ids, attention_mask, max_length, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], eos_token_id=model.config.eos_token_id, forced_bos_token_id=model.config.forced_bos_token_id, forced_eos_token_id=model.config.forced_eos_token_id, max_length=max_length, ) kwargs = {} model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=False, num_beams=1, max_length=max_length, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, **logits_process_kwargs, **model_kwargs, ) if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_greedy = model.greedy_search( input_ids, max_length=max_length, logits_processor=logits_processor, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_greedy, output_generate def _sample_generate( self, model, input_ids, attention_mask, max_length, num_return_sequences, logits_processor, logits_warper, logits_warper_kwargs, process_kwargs, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): torch.manual_seed(0) model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=True, num_beams=1, max_length=max_length, num_return_sequences=num_return_sequences, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **logits_warper_kwargs, **process_kwargs, **model_kwargs, ) torch.manual_seed(0) kwargs = {} if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, num_interleave=num_return_sequences, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs elif attention_mask is not None: attention_mask = attention_mask.repeat_interleave(num_return_sequences, dim=0) with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_sample = model.sample( input_ids.repeat_interleave(num_return_sequences, dim=0), max_length=max_length, logits_processor=logits_processor, logits_warper=logits_warper, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_sample, output_generate def _beam_search_generate( self, model, input_ids, attention_mask, max_length, beam_scorer, beam_kwargs, logits_processor, logits_process_kwargs, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=False, max_length=max_length, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **beam_kwargs, **logits_process_kwargs, **model_kwargs, ) # beam_search does not automatically interleave `batch_size` dim for `num_beams` kwargs = {} if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, num_interleave=beam_scorer.num_beams, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs elif attention_mask is not None: attention_mask = attention_mask.repeat_interleave(beam_scorer.num_beams, dim=0) with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_beam_search = model.beam_search( input_ids.repeat_interleave(beam_scorer.num_beams, dim=0), beam_scorer, max_length=max_length, logits_processor=logits_processor, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_generate, output_beam_search def _beam_sample_generate( self, model, input_ids, attention_mask, max_length, beam_scorer, beam_kwargs, logits_warper, logits_warper_kwargs, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): torch.manual_seed(0) model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=True, max_length=max_length, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **beam_kwargs, **logits_warper_kwargs, **model_kwargs, ) # beam_search does not automatically interleave `batch_size` dim for `num_beams` torch.manual_seed(0) kwargs = {} if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, num_interleave=beam_scorer.num_beams, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs elif attention_mask is not None: attention_mask = attention_mask.repeat_interleave(beam_scorer.num_beams, dim=0) # prevent flaky generation test failures logits_processor = LogitsProcessorList() logits_processor.append(InfNanRemoveLogitsProcessor()) with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_beam_sample = model.beam_sample( input_ids.repeat_interleave(beam_scorer.num_beams, dim=0), beam_scorer, max_length=max_length, logits_warper=logits_warper, logits_processor=logits_processor, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_generate, output_beam_sample def _group_beam_search_generate( self, model, input_ids, attention_mask, max_length, beam_scorer, beam_kwargs, logits_processor, logits_process_kwargs, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=False, max_length=max_length, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **beam_kwargs, **logits_process_kwargs, **model_kwargs, ) # group_beam_search does not automatically interleave `batch_size` dim for `num_beams` kwargs = {} if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, num_interleave=beam_scorer.num_beams, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs elif attention_mask is not None: attention_mask = attention_mask.repeat_interleave(beam_scorer.num_beams, dim=0) with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_group_beam_search = model.group_beam_search( input_ids.repeat_interleave(beam_scorer.num_beams, dim=0), beam_scorer, max_length=max_length, logits_processor=logits_processor, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_generate, output_group_beam_search def _constrained_beam_search_generate( self, model, input_ids, attention_mask, max_length, constrained_beam_scorer, constraints, beam_kwargs, logits_processor, logits_process_kwargs, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=False, max_length=max_length, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, constraints=constraints, **beam_kwargs, **logits_process_kwargs, **model_kwargs, ) # group_beam_search does not automatically interleave `batch_size` dim for `num_beams` kwargs = {} if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, num_interleave=constrained_beam_scorer.num_beams, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs elif attention_mask is not None: attention_mask = attention_mask.repeat_interleave(constrained_beam_scorer.num_beams, dim=0) with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_group_beam_search = model.constrained_beam_search( input_ids.repeat_interleave(constrained_beam_scorer.num_beams, dim=0), constrained_beam_scorer, max_length=max_length, logits_processor=logits_processor, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_generate, output_group_beam_search def _contrastive_generate( self, model, input_ids, attention_mask, max_length, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): contrastive_search_kwargs = { "penalty_alpha": 0.6, "top_k": 5, } if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], eos_token_id=model.config.eos_token_id, forced_bos_token_id=model.config.forced_bos_token_id, forced_eos_token_id=model.config.forced_eos_token_id, max_length=max_length, ) kwargs = {} model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=False, num_beams=1, max_length=max_length, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, **logits_process_kwargs, **model_kwargs, **contrastive_search_kwargs, ) if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=max_length)]) output_contrastive = model.contrastive_search( input_ids, stopping_criteria=stopping_criteria, logits_processor=logits_processor, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, **contrastive_search_kwargs, ) return output_contrastive, output_generate def test_greedy_generate(self): # check `generate()` and `greedy_search()` are equal for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # test old generation output for backwards compatibility model = model_class(config).to(torch_device).eval() output_greedy, output_generate = self._greedy_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length ) self.assertListEqual(output_greedy.tolist(), output_generate.tolist()) def test_greedy_generate_dict_outputs(self): for model_class in self.all_generative_model_classes: # disable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() config.use_cache = False model = model_class(config).to(torch_device).eval() output_greedy, output_generate = self._greedy_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_greedy, GreedySearchEncoderDecoderOutput) self.assertIsInstance(output_generate, GreedySearchEncoderDecoderOutput) else: self.assertIsInstance(output_greedy, GreedySearchDecoderOnlyOutput) self.assertIsInstance(output_generate, GreedySearchDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_greedy.sequences.tolist()) for output in (output_greedy, output_generate): self._check_outputs(output, input_ids, model.config) def test_greedy_generate_dict_outputs_use_cache(self): for model_class in self.all_generative_model_classes: # enable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() if not hasattr(config, "use_cache"): self.skipTest("This model doesn't support caching") config.use_cache = True config.is_decoder = True model = model_class(config).to(torch_device).eval() output_greedy, output_generate = self._greedy_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) self.assertListEqual(output_generate.sequences.tolist(), output_greedy.sequences.tolist()) for output in (output_greedy, output_generate): self._check_outputs(output, input_ids, model.config, use_cache=True) def test_sample_generate(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], model.config.eos_token_id, forced_bos_token_id=model.config.forced_bos_token_id, forced_eos_token_id=model.config.forced_eos_token_id, max_length=max_length, ) logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=2) # check `generate()` and `sample()` are equal output_sample, output_generate = self._sample_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, num_return_sequences=1, logits_processor=logits_processor, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, process_kwargs=process_kwargs, ) self.assertListEqual(output_sample.tolist(), output_generate.tolist()) # check `generate()` and `sample()` yield equal results for `num_return_sequences` output_sample, output_generate = self._sample_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, num_return_sequences=3, logits_processor=logits_processor, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, process_kwargs=process_kwargs, ) self.assertListEqual(output_sample.tolist(), output_generate.tolist()) def test_sample_generate_dict_output(self): for model_class in self.all_generative_model_classes: # disable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() config.use_cache = False model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], model.config.eos_token_id, forced_bos_token_id=model.config.forced_bos_token_id, forced_eos_token_id=model.config.forced_eos_token_id, max_length=max_length, ) logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1) output_sample, output_generate = self._sample_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, num_return_sequences=2, logits_processor=logits_processor, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, process_kwargs=process_kwargs, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_sample, SampleEncoderDecoderOutput) self.assertIsInstance(output_generate, SampleEncoderDecoderOutput) else: self.assertIsInstance(output_sample, SampleDecoderOnlyOutput) self.assertIsInstance(output_generate, SampleDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_sample.sequences.tolist()) for output in (output_sample, output_generate): self._check_outputs(output, input_ids, model.config, num_return_sequences=2) def test_beam_search_generate(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, ) beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length) # check `generate()` and `beam_search()` are equal output_generate, output_beam_search = self._beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_process_kwargs=logits_process_kwargs, logits_processor=logits_processor, ) self.assertListEqual(output_generate.tolist(), output_beam_search.tolist()) if model.config.is_encoder_decoder: max_length = 4 beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length) output_generate, output_beam_search = self._beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_process_kwargs=logits_process_kwargs, logits_processor=logits_processor, ) self.assertListEqual(output_generate.tolist(), output_beam_search.tolist()) def test_beam_search_generate_dict_output(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # disable cache config.use_cache = False # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, ) beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length) output_generate, output_beam_search = self._beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_process_kwargs=logits_process_kwargs, logits_processor=logits_processor, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_beam_search, BeamSearchEncoderDecoderOutput) self.assertIsInstance(output_generate, BeamSearchEncoderDecoderOutput) else: self.assertIsInstance(output_beam_search, BeamSearchDecoderOnlyOutput) self.assertIsInstance(output_generate, BeamSearchDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_beam_search.sequences.tolist()) self.assertTrue( torch.allclose(output_generate["sequences_scores"], output_beam_search["sequences_scores"], atol=1e-3) ) self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],)) self.assertTrue((output_generate["sequences_scores"] < 0).all().item()) for output in (output_beam_search, output_generate): self._check_outputs(output, input_ids, model.config, num_return_sequences=beam_scorer.num_beams) def test_beam_search_generate_dict_outputs_use_cache(self): for model_class in self.all_generative_model_classes: # enable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None if not hasattr(config, "use_cache"): self.skipTest("This model doesn't support caching") model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, ) beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length) config.use_cache = True config.is_decoder = True model = model_class(config).to(torch_device).eval() output_beam, output_generate = self._beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_process_kwargs=logits_process_kwargs, logits_processor=logits_processor, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) self.assertListEqual(output_generate.sequences.tolist(), output_beam.sequences.tolist()) for output in (output_beam, output_generate): self._check_outputs( output, input_ids, model.config, use_cache=True, num_return_sequences=beam_scorer.num_beams ) @require_accelerate @require_torch_multi_accelerator def test_model_parallel_beam_search(self): for model_class in self.all_generative_model_classes: if model_class._no_split_modules is None: continue config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() model = model_class(config).eval() with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained(tmp_dir) new_model = model_class.from_pretrained(tmp_dir, device_map="auto") new_model.generate( input_ids, attention_mask=attention_mask, max_length=max_length, num_beams=2, ) def test_beam_sample_generate(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1) model = model_class(config).to(torch_device).eval() # check `generate()` and `beam_search()` are equal if model.config.is_encoder_decoder: max_length = 4 beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length) output_generate, output_beam_sample = self._beam_sample_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, ) self.assertListEqual(output_generate.tolist(), output_beam_sample.tolist()) def test_beam_sample_generate_dict_output(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # disable cache config.use_cache = False # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1) if model.config.is_encoder_decoder: max_length = 4 beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length) output_beam_sample, output_generate = self._beam_sample_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_beam_sample, BeamSampleEncoderDecoderOutput) self.assertIsInstance(output_generate, BeamSampleEncoderDecoderOutput) else: self.assertIsInstance(output_beam_sample, BeamSampleDecoderOnlyOutput) self.assertIsInstance(output_generate, BeamSampleDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_beam_sample.sequences.tolist()) self.assertTrue( torch.allclose(output_generate["sequences_scores"], output_beam_sample["sequences_scores"], atol=1e-3) ) self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],)) self.assertTrue((output_generate["sequences_scores"] < 0).all().item()) for output in (output_beam_sample, output_generate): self._check_outputs(output, input_ids, model.config, num_return_sequences=beam_scorer.num_beams) def test_generate_without_input_ids(self): config, _, _, max_length = self._get_input_ids_and_config() # if no bos token id => cannot generate from None if config.bos_token_id is None: return for model_class in self.all_generative_model_classes: model = model_class(config).to(torch_device) model.eval() output_ids_generate = model.generate(do_sample=False, max_length=max_length, remove_invalid_values=True) self.assertIsNotNone(output_ids_generate) def test_group_beam_search_generate(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, diversity_penalty=2.0, ) # check `generate()` and `group_beam_search()` are equal beam_kwargs, beam_scorer = self._get_diverse_beam_scorer_and_kwargs(input_ids.shape[0], max_length) output_generate, output_group_beam_search = self._group_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, ) self.assertListEqual(output_generate.tolist(), output_group_beam_search.tolist()) # check `generate()` and `group_beam_search()` are equal for `num_return_sequences` num_return_sequences = 2 if model.config.is_encoder_decoder: max_length = 4 beam_kwargs, beam_scorer = self._get_diverse_beam_scorer_and_kwargs( input_ids.shape[0], max_length, num_return_sequences=num_return_sequences ) output_generate, output_group_beam_search = self._group_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, ) self.assertListEqual(output_generate.tolist(), output_group_beam_search.tolist()) def test_group_beam_search_generate_dict_output(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() config.use_cache = False # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, diversity_penalty=2.0, ) num_return_sequences = 1 beam_kwargs, beam_scorer = self._get_diverse_beam_scorer_and_kwargs( input_ids.shape[0], max_length, num_return_sequences=num_return_sequences ) output_generate, output_group_beam_search = self._group_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_group_beam_search, BeamSearchEncoderDecoderOutput) self.assertIsInstance(output_generate, BeamSearchEncoderDecoderOutput) else: self.assertIsInstance(output_group_beam_search, BeamSearchDecoderOnlyOutput) self.assertIsInstance(output_generate, BeamSearchDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_group_beam_search.sequences.tolist()) self.assertTrue( torch.allclose( output_generate["sequences_scores"], output_group_beam_search["sequences_scores"], atol=1e-3 ) ) self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],)) self.assertTrue((output_generate["sequences_scores"] < 0).all().item()) for output in (output_group_beam_search, output_generate): self._check_outputs( output, input_ids, model.config, num_return_sequences=num_return_sequences * beam_scorer.num_beams ) def test_constrained_beam_search_generate(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() max_length = 20 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, ) # check `generate()` and `constrained_beam_search()` are equal # Sample constraints min_id = 3 max_id = config.vocab_size force_tokens = torch.randint(min_id, max_id, (1, 2)).tolist()[0] constraints = [ PhrasalConstraint(force_tokens), ] beam_kwargs, beam_scorer = self._get_constrained_beam_scorer_and_kwargs( input_ids.shape[0], max_length, constraints, num_return_sequences=1 ) output_generate, output_beam_search = self._constrained_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, constrained_beam_scorer=beam_scorer, constraints=constraints, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, ) self.assertListEqual(output_generate.tolist(), output_beam_search.tolist()) for generation_output in output_generate: self._check_sequence_inside_sequence(force_tokens, generation_output) # check `generate()` and `constrained_beam_search()` are equal for `num_return_sequences` # Sample constraints force_tokens = torch.randint(min_id, max_id, (1, 2)).tolist()[0] constraints = [ PhrasalConstraint(force_tokens), ] num_return_sequences = 2 max_length = 20 beam_kwargs, beam_scorer = self._get_constrained_beam_scorer_and_kwargs( input_ids.shape[0], max_length, constraints, num_return_sequences=num_return_sequences ) output_generate, output_beam_search = self._constrained_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, constrained_beam_scorer=beam_scorer, constraints=constraints, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, ) self.assertListEqual(output_generate.tolist(), output_beam_search.tolist()) for generation_output in output_generate: self._check_sequence_inside_sequence(force_tokens, generation_output) def test_constrained_beam_search_generate_dict_output(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # disable cache config.use_cache = False # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 20 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, ) # Sample constraints min_id = 3 max_id = model.config.vocab_size force_tokens = torch.randint(min_id, max_id, (1, 2)).tolist()[0] constraints = [ PhrasalConstraint(force_tokens), ] beam_kwargs, beam_scorer = self._get_constrained_beam_scorer_and_kwargs( input_ids.shape[0], max_length, constraints, num_return_sequences=1 ) output_generate, output_beam_search = self._constrained_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, constrained_beam_scorer=beam_scorer, constraints=constraints, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_beam_search, BeamSearchEncoderDecoderOutput) self.assertIsInstance(output_generate, BeamSearchEncoderDecoderOutput) else: self.assertIsInstance(output_beam_search, BeamSearchDecoderOnlyOutput) self.assertIsInstance(output_generate, BeamSearchDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_beam_search.sequences.tolist()) self.assertTrue( torch.allclose(output_generate["sequences_scores"], output_beam_search["sequences_scores"], atol=1e-3) ) self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],)) self.assertTrue((output_generate["sequences_scores"] < 0).all().item()) for output in (output_beam_search, output_generate): self._check_outputs(output, input_ids, model.config, num_return_sequences=beam_scorer.num_beams) def test_contrastive_generate(self): # check `generate()` and `contrastive_search()` are equal for model_class in self.all_generative_model_classes: # won't fix: FSMT and Reformer have a different cache variable type (and format). if any(model_name in model_class.__name__.lower() for model_name in ["fsmt", "reformer"]): self.skipTest("Won't fix: old model with different cache format") config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # NOTE: contrastive search only works with cache on at the moment. if not hasattr(config, "use_cache"): self.skipTest("This model doesn't support caching") config.use_cache = True config.is_decoder = True # test old generation output for backwards compatibility model = model_class(config).to(torch_device).eval() output_contrastive, output_generate = self._contrastive_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length ) self.assertListEqual(output_contrastive.tolist(), output_generate.tolist()) def test_contrastive_generate_dict_outputs_use_cache(self): for model_class in self.all_generative_model_classes: # won't fix: FSMT and Reformer have a different cache variable type (and format). if any(model_name in model_class.__name__.lower() for model_name in ["fsmt", "reformer"]): self.skipTest("Won't fix: old model with different cache format") # enable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # NOTE: contrastive search only works with cache on at the moment. if not hasattr(config, "use_cache"): self.skipTest("This model doesn't support caching") config.use_cache = True config.is_decoder = True model = model_class(config).to(torch_device).eval() output_contrastive, output_generate = self._contrastive_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) self.assertListEqual(output_generate.sequences.tolist(), output_contrastive.sequences.tolist()) for output in (output_contrastive, output_generate): self._check_outputs(output, input_ids, model.config, use_cache=True) def test_contrastive_generate_low_memory(self): # Check that choosing 'low_memory' does not change the model output for model_class in self.all_generative_model_classes: if any(model_name in model_class.__name__.lower() for model_name in ["fsmt", "reformer", "speech2text"]): self.skipTest("Won't fix: old model with different cache format") if any(model_name in model_class.__name__.lower() for model_name in ["gptbigcode"]): self.skipTest("TODO: fix me") config, input_ids, attention_mask, max_length = self._get_input_ids_and_config(batch_size=1) # NOTE: contrastive search only works with cache on at the moment. if not hasattr(config, "use_cache"): self.skipTest("This model doesn't support caching") config.use_cache = True config.is_decoder = True # test output equality of low versus high memory model = model_class(config).to(torch_device).eval() low_output = model.generate( input_ids, top_k=4, penalty_alpha=0.6, low_memory=True, max_length=max_length, attention_mask=attention_mask, ) high_output = model.generate( input_ids, top_k=4, penalty_alpha=0.6, low_memory=False, max_length=max_length, attention_mask=attention_mask, ) self.assertListEqual(low_output.tolist(), high_output.tolist()) @is_flaky() # Read NOTE (1) below. If there are API issues, all attempts will fail. def test_assisted_decoding_matches_greedy_search(self): # This test ensures that the assisted generation does not introduce output changes over greedy search. # NOTE (1): The sentence above is true most of the time, there is a tiny difference in the logits due to matmul # shape differences -- and it may result in a different output. The input shape difference happens in the # main model, that runs the forward pass with several candidates at once (as opposed to generating one token at # a time). See https://github.com/huggingface/transformers/issues/25420#issuecomment-1775317535 for more info. # NOTE (2): It breaks the pattern in the tests above, for multiple reasons: # - assisted_decoding, contrarily to the other methods, can't be called on its own (e.g. needs to # prepare the assistant encoder outputs in the main generate body); # - assisted_decoding does not support `use_cache = False` # - assisted_decoding does not support `batch_size > 1` for model_class in self.all_generative_model_classes: if any(model_name in model_class.__name__.lower() for model_name in ["fsmt", "reformer"]): self.skipTest("Won't fix: old model with different cache format") if any( model_name in model_class.__name__.lower() for model_name in [ "bigbirdpegasus", "led", "mega", "speech2text", "git", "prophetnet", "seamlessm4t", "clvp", ] ): self.skipTest("May fix in the future: need model-specific fixes") # enable cache config, input_ids, attention_mask, _ = self._get_input_ids_and_config(batch_size=1) # NOTE: assisted generation only works with cache on at the moment. if not hasattr(config, "use_cache"): self.skipTest("This model doesn't support caching") config.use_cache = True config.is_decoder = True model = model_class(config).to(torch_device).eval() # Sets assisted generation arguments such that: # a) no EOS is generated, to ensure generation doesn't break early # b) the assistant model always generates two tokens when it is called, to ensure the input preparation of # the assistant model is correct # c) there are at least two forward passes in the main model, to ensure the input preparation of # the main model is correct generation_kwargs = { "eos_token_id": -1, # see a) "max_new_tokens": 4, # see c) "num_beams": 1, "do_sample": False, "output_scores": True, "output_hidden_states": True, "output_attentions": True, "return_dict_in_generate": True, } output_greedy = model.generate(input_ids, attention_mask=attention_mask, **generation_kwargs) assistant_model = model assistant_model.generation_config.num_assistant_tokens = 2 # see b) assistant_model.generation_config.num_assistant_tokens_schedule = "constant" # see b) generation_kwargs.update({"assistant_model": assistant_model}) output_assisted = model.generate(input_ids, attention_mask=attention_mask, **generation_kwargs) # The two outputs must match and their shape must be as expected self.assertListEqual(output_greedy.sequences.tolist(), output_assisted.sequences.tolist()) for output in (output_greedy, output_assisted): self._check_outputs(output, input_ids, model.config, use_cache=True) def test_assisted_decoding_sample(self): # In this test we don't check assisted vs non-assisted output -- seeded assisted decoding with sample will not # match sample for the same seed, as the forward pass does not return the exact same logits (due to matmul with # different shapes, see https://github.com/huggingface/transformers/issues/25420#issuecomment-1775317535). for model_class in self.all_generative_model_classes: if any(model_name in model_class.__name__.lower() for model_name in ["fsmt", "reformer"]): self.skipTest("Won't fix: old model with different cache format") if any( model_name in model_class.__name__.lower() for model_name in [ "bigbirdpegasus", "led", "mega", "speech2text", "git", "prophetnet", "seamlessm4t", "clvp", ] ): self.skipTest("May fix in the future: need model-specific fixes") # enable cache config, input_ids, attention_mask, _ = self._get_input_ids_and_config(batch_size=1) # NOTE: assisted generation only works with cache on at the moment. if not hasattr(config, "use_cache"): self.skipTest("This model doesn't support caching") config.use_cache = True config.is_decoder = True model = model_class(config).to(torch_device).eval() # Sets assisted generation arguments such that: # a) no EOS is generated, to ensure generation doesn't break early # b) the assistant model always generates two tokens when it is called, to ensure the input preparation of # the assistant model is correct # c) there are at least two forward passes in the main model, to ensure the input preparation of # the main model is correct assistant_model = model assistant_model.generation_config.num_assistant_tokens = 2 # see b) assistant_model.generation_config.num_assistant_tokens_schedule = "constant" # see b) generation_kwargs = { "eos_token_id": -1, # see a) "max_new_tokens": 4, # see c) "num_beams": 1, "do_sample": True, "assistant_model": assistant_model, "output_scores": True, "output_hidden_states": True, "output_attentions": True, "return_dict_in_generate": True, } output_assisted = model.generate(input_ids, attention_mask=attention_mask, **generation_kwargs) self._check_outputs(output_assisted, input_ids, model.config, use_cache=True) def test_generate_with_head_masking(self): """Test designed for encoder-decoder models to ensure the attention head masking is used.""" attention_names = ["encoder_attentions", "decoder_attentions", "cross_attentions"] for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # We want to test only encoder-decoder models if not config.is_encoder_decoder: continue model = model_class(config).to(torch_device) head_masking = { "head_mask": torch.zeros(config.encoder_layers, config.encoder_attention_heads, device=torch_device), "decoder_head_mask": torch.zeros( config.decoder_layers, config.decoder_attention_heads, device=torch_device ), "cross_attn_head_mask": torch.zeros( config.decoder_layers, config.decoder_attention_heads, device=torch_device ), } signature = inspect.signature(model.forward) # We want to test only models where encoder/decoder head masking is implemented if not set(head_masking.keys()) < {*signature.parameters.keys()}: continue for attn_name, (name, mask) in zip(attention_names, head_masking.items()): out = model.generate( input_ids, attention_mask=attention_mask, num_beams=1, output_attentions=True, return_dict_in_generate=True, remove_invalid_values=True, **{name: mask}, ) # We check the state of decoder_attentions and cross_attentions just from the last step attn_weights = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights]), 0.0) def test_left_padding_compatibility(self): # The check done in this test is fairly difficult -- depending on the model architecture, passing the right # position index for the position embeddings can still result in a different output, due to numerical masking. # On the other hand, for some types of position embeddings, an incorrect position index can have a minimal # impact on the output. # There are two tricks employed to check whether left-padding compatibility is in place: # 1 - To reduce the negative impact of the numerical attention mask on a correct position index, we set the # padding size to 1. # 2 - To reduce the chance of false positives (i.e. passing when it should be failing), we run the check # multiple times with random inputs, and it has to pass with all of them. # NOTE: because of 2), there is some chance of false positives in this test. for model_class in self.all_generative_model_classes: config, _, _, _ = self._get_input_ids_and_config() if config.is_encoder_decoder: continue # skip for encoder-decoder models -- they don't need left-padding compatibility model = model_class(config).to(torch_device).eval() signature = inspect.signature(model.forward).parameters.keys() no_failures = True for _ in range(10): # there may be false positives with 10 runs, we rely on the CI to catch the flakiness _, input_ids, attention_mask, _ = self._get_input_ids_and_config() model_kwargs = {"input_ids": input_ids, "attention_mask": attention_mask} if "position_ids" in signature: position_ids = torch.cumsum(attention_mask, dim=-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) model_kwargs["position_ids"] = position_ids next_logits_wo_padding = model(**model_kwargs).logits[:, -1, :] pad_size = (input_ids.shape[0], 1) padding = torch.ones(pad_size, dtype=input_ids.dtype, device=torch_device) * config.pad_token_id padded_input_ids = torch.cat((padding, input_ids), dim=1) padded_attention_mask = torch.cat((torch.zeros_like(padding), attention_mask), dim=1) model_kwargs = {"input_ids": padded_input_ids, "attention_mask": padded_attention_mask} if "position_ids" in signature: position_ids = torch.cumsum(padded_attention_mask, dim=-1) - 1 position_ids.masked_fill_(padded_attention_mask == 0, 1) model_kwargs["position_ids"] = position_ids next_logits_with_padding = model(**model_kwargs).logits[:, -1, :] if not torch.allclose(next_logits_wo_padding, next_logits_with_padding, atol=1e-7): no_failures = False break self.assertTrue(no_failures) def test_past_key_values_format(self): # Test that the KV cache is formatted correctly. Exceptions need to explicitly overwrite this test. Having a # standard KV cache format is important for a consistent API (and for advanced generation methods). for model_class in self.all_generative_model_classes: config, inputs = self.model_tester.prepare_config_and_inputs_for_common() # If it doesn't support cache, pass the test if not hasattr(config, "use_cache"): self.skipTest("This model doesn't support caching") model = model_class(config).to(torch_device) if "use_cache" not in inputs: inputs["use_cache"] = True outputs = model(**inputs) # If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format) if "past_key_values" not in outputs: self.skipTest("This model doesn't return `past_key_values`") num_hidden_layers = ( getattr(config, "decoder_layers", None) or getattr(config, "num_decoder_layers", None) or config.num_hidden_layers ) num_attention_heads = getattr(config, "decoder_attention_heads", config.num_attention_heads) embed_dim = getattr(config, "d_model", config.hidden_size) per_head_embed_dim = embed_dim // num_attention_heads past_kv = outputs["past_key_values"] self.assertEqual(len(past_kv), num_hidden_layers) # Encoder-Decoder checks if config.is_encoder_decoder: encoder_num_attention_heads = config.encoder_attention_heads encoder_per_head_embed_dim = embed_dim // encoder_num_attention_heads batch_size, seq_length = inputs["decoder_input_ids"].shape for i in range(num_hidden_layers): self.assertEqual(len(past_kv[i]), 4) # K V for the decoder + K V for the encoder = 4 self.assertEqual( past_kv[i][0].shape, (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) self.assertEqual( past_kv[i][1].shape, (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) # The sequence length for the encoder K V depends on the model. Since it is not manipulated in # autoregressive generation, I'm keeping the test general and not checking the 3rd dim self.assertEqual( (past_kv[i][2].shape[0], past_kv[i][2].shape[1], past_kv[i][2].shape[3]), (batch_size, encoder_num_attention_heads, encoder_per_head_embed_dim), ) self.assertEqual( (past_kv[i][3].shape[0], past_kv[i][3].shape[1], past_kv[i][3].shape[3]), (batch_size, encoder_num_attention_heads, encoder_per_head_embed_dim), ) # Decoder-only checks else: # TODO: this line is only needed because of imagegpt, where "pixel_values" = "input_ids". Fix the # tests in imagegpt such that `prepare_config_and_inputs_for_common` returns the later (and the other # tests use it) key = "input_ids" if "input_ids" in inputs else "pixel_values" batch_size, seq_length = inputs[key].shape for i in range(num_hidden_layers): self.assertEqual(len(past_kv[0]), 2) # K V for the decoder = 2 self.assertEqual( past_kv[i][0].shape, (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) self.assertEqual( past_kv[i][1].shape, (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) def test_generate_from_inputs_embeds_decoder_only(self): # When supported, tests that the decoder model can generate from `inputs_embeds` instead of `input_ids` # if fails, you should probably update the `prepare_inputs_for_generation` function for model_class in self.all_generative_model_classes: config, input_ids, _, _ = self._get_input_ids_and_config() # Ignore: # a) eos (to always output 20 tokens) and pad (so we don't try to infer the attn mask from the input_ids, # which would cause a mismatch), config.pad_token_id = config.eos_token_id = -1 # b) embedding scaling, the scaling factor applied after embeding from input_ids (requires knowledge of the # variable that holds the scaling factor, which is model-dependent) if hasattr(config, "scale_embedding"): config.scale_embedding = False # This test is for decoder-only models (encoder-decoder models have native input embeddings support in the # decoder) if config.is_encoder_decoder: continue # Skip models without explicit support model = model_class(config).to(torch_device).eval() if "inputs_embeds" not in inspect.signature(model.prepare_inputs_for_generation).parameters.keys(): continue # Traditional way of generating text outputs_from_ids = model.generate(input_ids) self.assertEqual(outputs_from_ids.shape, (2, 20)) # Same thing, but from input embeddings (`input_ids` is passed so the prompt is present in the output) inputs_embeds = model.get_input_embeddings()(input_ids) outputs_from_embeds = model.generate(input_ids, inputs_embeds=inputs_embeds) self.assertListEqual(outputs_from_ids.tolist(), outputs_from_embeds.tolist()) # But if we pass different inputs_embeds, we should get different outputs torch.manual_seed(0) random_embeds = torch.rand_like(inputs_embeds) outputs_from_rand_embeds = model.generate(input_ids, inputs_embeds=random_embeds) with self.assertRaises(AssertionError): self.assertListEqual(outputs_from_rand_embeds.tolist(), outputs_from_embeds.tolist()) # input_ids is not a required input -- if we don't pass it, the newly generated tokens will be the same outputs_from_embeds_wo_ids = model.generate( inputs_embeds=inputs_embeds, max_new_tokens=20 - inputs_embeds.shape[1] ) self.assertListEqual( outputs_from_embeds[:, inputs_embeds.shape[1] :].tolist(), outputs_from_embeds_wo_ids[:, 1:].tolist(), ) def test_generate_continue_from_past_key_values(self): # Tests that we can continue generating from past key values, returned from a previous `generate` call for model_class in self.all_generative_model_classes: if any(model_name in model_class.__name__.lower() for model_name in ["imagegpt"]): self.skipTest("Won't fix: old model with unique inputs/caches/other") if any(model_name in model_class.__name__.lower() for model_name in ["umt5"]): self.skipTest("TODO: needs modeling or test input preparation fixes for compatibility") config, inputs = self.model_tester.prepare_config_and_inputs_for_common() if not hasattr(config, "use_cache"): self.skipTest("This model doesn't support caching") # Let's make it always: # 1. use cache (for obvious reasons) # 2. generate to max length (which can be achieved by setting the eos token to an invalid value), which # would make the test flaky (e.g. EOS is generated on iteration 1 on both generations, but the # continuation would force it to generate beyond an EOS token) # 3. ignore `token_type_ids` for simplicity # 4. ignore `forced_eos_token_id`, which requires further manipulation of the continuation inputs and is # active by default on some models config.use_cache = True if "token_type_ids" in inputs: del inputs["token_type_ids"] model = model_class(config).to(torch_device) model.eval() model.generation_config.pad_token_id = model.generation_config.eos_token_id = -1 model.generation_config.forced_eos_token_id = None # If "past_key_values" is not returned, skip the test (e.g. RWKV uses a different cache name and format) outputs = model(**inputs) if "past_key_values" not in outputs: self.skipTest("This model doesn't return `past_key_values`") # Traditional way of generating text, with `return_dict_in_generate` to return the past key values outputs = model.generate(**inputs, do_sample=False, max_new_tokens=4, return_dict_in_generate=True) # Let's generate again, but passing the past key values in between (3 + 1 = 4 tokens). Note that the # inputs may need to be tweaked across `generate` calls (like the attention mask). outputs_cached = model.generate(**inputs, do_sample=False, max_new_tokens=3, return_dict_in_generate=True) # Continue from the tokens generated above, preparing the inputs accordingly inputs["past_key_values"] = outputs_cached.past_key_values new_attention_len = outputs_cached.sequences.shape[-1] if config.is_encoder_decoder: inputs["decoder_input_ids"] = outputs_cached.sequences if "decoder_attention_mask" in inputs: inputs["decoder_attention_mask"] = torch.nn.functional.pad( inputs["decoder_attention_mask"], (0, new_attention_len - inputs["decoder_attention_mask"].shape[1]), mode="constant", value=1, ) else: inputs["input_ids"] = outputs_cached.sequences if "attention_mask" in inputs: inputs["attention_mask"] = torch.nn.functional.pad( inputs["attention_mask"], (0, new_attention_len - inputs["attention_mask"].shape[1]), mode="constant", value=1, ) outputs_cached = model.generate(**inputs, do_sample=False, max_new_tokens=1, return_dict_in_generate=True) # The two sets of generated text and past kv should be equal to each other self.assertListEqual(outputs.sequences.tolist(), outputs_cached.sequences.tolist()) for layer_idx in range(len(outputs_cached.past_key_values)): for kv_idx in range(len(outputs_cached.past_key_values[layer_idx])): self.assertTrue( torch.allclose( outputs.past_key_values[layer_idx][kv_idx], outputs_cached.past_key_values[layer_idx][kv_idx], ) ) def _check_outputs(self, output, input_ids, config, use_cache=False, num_return_sequences=1): batch_size, seq_length = input_ids.shape num_sequences_in_output = batch_size * num_return_sequences gen_len = ( output.sequences.shape[-1] - 1 if config.is_encoder_decoder else output.sequences.shape[-1] - seq_length ) # scores self._check_scores(num_sequences_in_output, output.scores, length=gen_len, config=config) # Attentions if config.is_encoder_decoder: # encoder self._check_encoder_attention_for_generate(output.encoder_attentions, batch_size, config, seq_length) # decoder self._check_attentions_for_generate( num_sequences_in_output, output.decoder_attentions, min_length=1, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) else: # if use_cache first input is equal to no use_cache, so skip here attentions = output.attentions if not use_cache else output.attentions[1:] min_length = seq_length if not use_cache else seq_length + 1 self._check_attentions_for_generate( num_sequences_in_output, attentions=attentions, min_length=min_length, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) # Hidden States if config.is_encoder_decoder: # encoder self._check_encoder_hidden_states_for_generate( output.encoder_hidden_states, batch_size, config, seq_length ) # decoder self._check_hidden_states_for_generate( num_sequences_in_output, output.decoder_hidden_states, min_length=1, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) else: # if use_cache first input is equal to no use_cache, so skip here hidden_states = output.hidden_states if not use_cache else output.hidden_states[1:] min_length = seq_length if not use_cache else seq_length + 1 self._check_hidden_states_for_generate( num_sequences_in_output, hidden_states, min_length=min_length, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) # Past Key Value States -- two notes here: # 1. Its inner sequence length is with respect to the inputs of the latest forward pass, hence the "-1" # 2. Some old models still return `output.past_key_values` even without `use_cache=True` # 3. TODO (joao): A few models have different formats, skipping those until the cache refactor is complete models_without_standard_cache = ("bloom", "ctrl", "fsmt", "gptbigcode", "mega", "reformer") has_standard_cache = not any( model_name in config.__class__.__name__.lower() for model_name in models_without_standard_cache ) if use_cache and has_standard_cache: past_key_values = output.past_key_values past_sequence_length = output.sequences.shape[-1] - 1 self._check_past_key_values_for_generate( num_sequences_in_output, past_key_values, seq_length=past_sequence_length, config=config, ) def _check_scores(self, batch_size, scores, length, config): expected_shape = (batch_size, config.vocab_size) self.assertIsInstance(scores, tuple) self.assertEqual(len(scores), length) self.assertListEqual([iter_scores.shape for iter_scores in scores], [expected_shape] * len(scores)) def _check_attentions_for_generate( self, batch_size, attentions, min_length, max_length, config, use_cache=False, num_beam_groups=1 ): self.assertIsInstance(attentions, tuple) self.assertListEqual( [isinstance(iter_attentions, tuple) for iter_attentions in attentions], [True] * len(attentions) ) self.assertEqual(len(attentions), (max_length - min_length) * num_beam_groups) for idx, iter_attentions in enumerate(attentions): tgt_len = min_length + idx if not use_cache else 1 src_len = min_length + idx expected_shape = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(iter_attentions) ) def _check_encoder_attention_for_generate(self, attentions, batch_size, config, seq_length): encoder_expected_shape = (batch_size, config.num_attention_heads, seq_length, seq_length) self.assertIsInstance(attentions, tuple) self.assertListEqual( [layer_attentions.shape for layer_attentions in attentions], [encoder_expected_shape] * len(attentions), ) def _check_hidden_states_for_generate( self, batch_size, hidden_states, min_length, max_length, config, use_cache=False, num_beam_groups=1 ): self.assertIsInstance(hidden_states, tuple) self.assertListEqual( [isinstance(iter_hidden_states, tuple) for iter_hidden_states in hidden_states], [True] * len(hidden_states), ) self.assertEqual(len(hidden_states), (max_length - min_length) * num_beam_groups) for idx, iter_hidden_states in enumerate(hidden_states): seq_len = min_length + idx if not use_cache else 1 expected_shape = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states], [expected_shape] * len(iter_hidden_states), ) def _check_encoder_hidden_states_for_generate(self, hidden_states, batch_size, config, seq_length): encoder_expected_shape = (batch_size, seq_length, config.hidden_size) self.assertIsInstance(hidden_states, tuple) self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in hidden_states], [encoder_expected_shape] * len(hidden_states), ) def _check_past_key_values_for_generate(self, batch_size, past_key_values, seq_length, config, num_beam_groups=1): self.assertIsInstance(past_key_values, tuple) self.assertListEqual( [isinstance(iter_past_key_values, tuple) for iter_past_key_values in past_key_values], [True] * len(past_key_values), ) # (batch, head, seq_length, head_features) expected_shape = ( batch_size * num_beam_groups, config.num_key_value_heads if hasattr(config, "num_key_value_heads") else config.num_attention_heads, seq_length, config.hidden_size // config.num_attention_heads, ) # check shape key, value self.assertListEqual( [layer_past_key_values[0].shape for layer_past_key_values in past_key_values], [expected_shape] * len(past_key_values), ) self.assertListEqual( [layer_past_key_values[1].shape for layer_past_key_values in past_key_values], [expected_shape] * len(past_key_values), ) def _check_sequence_inside_sequence(self, tensor_1, tensor_2): # check if tensor_1 inside tensor_2 or tensor_2 inside tensor_1. # set to same device. we don't care what device. if not isinstance(tensor_1, list): tensor_1 = tensor_1.cpu().tolist() if not isinstance(tensor_2, list): tensor_2 = tensor_2.cpu().tolist() in_order = len(tensor_1) <= len(tensor_2) longer = tensor_2 if in_order else tensor_1 shorter = tensor_1 if in_order else tensor_2 flag = False chunk_size = len(shorter) for chunk_idx in range(len(longer) - chunk_size + 1): subseq = longer[chunk_idx : chunk_idx + chunk_size] if subseq == shorter: flag = True break self.assertTrue(flag) @require_torch class UtilsFunctionsTest(unittest.TestCase): # tests whether the top_k_top_p function behaves as expected def test_top_k_top_p_filtering(self): logits = torch.tensor( [ [ 8.2220991, # 3rd highest value; idx. 0 -0.5620044, 5.23229752, 4.0386393, -6.8798378, -0.54785802, -3.2012153, 2.92777176, 1.88171953, 7.35341276, 8.43207833, # 2nd highest value; idx. 10 -9.85711836, -5.96209236, -1.13039161, -7.1115294, -0.8369633, -5.3186408, 7.06427407, 0.81369344, -0.82023817, -5.9179796, 0.58813443, -6.99778438, 4.71551189, -0.18771637, 7.44020759, # 4th highest value; idx. 25 9.38450987, # 1st highest value; idx. 26 2.12662941, -9.32562038, 2.35652522, ], # cummulative prob of 4 highest values <= 0.6 [ 0.58425518, 4.53139238, -5.57510464, -6.28030699, -7.19529503, -4.02122551, 1.39337037, -6.06707057, 1.59480517, -9.643119, 0.03907799, 0.67231762, -8.88206726, 6.27115922, # 4th highest value; idx. 13 2.28520723, 4.82767506, 4.30421368, 8.8275313, # 2nd highest value; idx. 17 5.44029958, -4.4735794, 7.38579536, # 3rd highest value; idx. 20 -2.91051663, 2.61946077, -2.5674762, -9.48959302, -4.02922645, -1.35416918, 9.67702323, # 1st highest value; idx. 27 -5.89478553, 1.85370467, ], # cummulative prob of 4 highest values <= 0.6 ], dtype=torch.float, device=torch_device, ) non_inf_expected_idx = torch.tensor( [[0, 0], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 20], [1, 27]], dtype=torch.long, device=torch_device, ) # expected non filtered idx as noted above non_inf_expected_output = torch.tensor( [ 8.2221, 8.4321, 7.4402, 9.3845, 6.2712, 8.8275, 7.3858, 9.6770, ], # expected non filtered values as noted above dtype=torch.float, device=torch_device, ) output = top_k_top_p_filtering(logits, top_k=10, top_p=0.6, min_tokens_to_keep=4) non_inf_output = output[output != -float("inf")].to(device=torch_device) non_inf_idx = (output != -float("inf")).nonzero().to(device=torch_device) self.assertTrue(torch.allclose(non_inf_expected_output, non_inf_output, atol=1e-12)) self.assertTrue(torch.all(torch.eq(non_inf_expected_idx, non_inf_idx))) # tests whether the function uses filter_value instead of default -inf def test_top_k_top_p_filtering_with_filter_value(self): logits = torch.tensor( [ [ 1, 1, 1, 0.99, # get filtered by top-p filtering 0.98, # get filtered by top-k filtering ] ], dtype=torch.float, device=torch_device, ) expected_output = torch.tensor( [[1, 1, 1, 0, 0]], dtype=torch.float, device=torch_device, ) output = top_k_top_p_filtering(logits, top_k=4, top_p=0.5, filter_value=0.0) self.assertTrue(torch.allclose(expected_output, output, atol=1e-12)) @require_torch class GenerationIntegrationTests(unittest.TestCase, GenerationIntegrationTestsMixin): # setting framework_dependent_parameters needs to be gated, just like its contents' imports if is_torch_available(): framework_dependent_parameters = { "AutoModelForCausalLM": AutoModelForCausalLM, "AutoModelForSpeechSeq2Seq": AutoModelForSpeechSeq2Seq, "AutoModelForSeq2SeqLM": AutoModelForSeq2SeqLM, "AutoModelForVision2Seq": AutoModelForVision2Seq, "LogitsProcessorList": LogitsProcessorList, "MinLengthLogitsProcessor": MinLengthLogitsProcessor, "create_tensor_fn": torch.tensor, "floats_tensor": floats_tensor, "return_tensors": "pt", } @slow def test_diverse_beam_search(self): # PT-only test: TF doesn't have a diverse beam search implementation article = """Justin Timberlake and Jessica Biel, welcome to parenthood. The celebrity couple announced the arrival of their son, Silas Randall Timberlake, in statements to People. "Silas was the middle name of Timberlake's maternal grandfather Bill Bomar, who died in 2012, while Randall is the musician's own middle name, as well as his father's first," People reports. The couple announced the pregnancy in January, with an Instagram post. It is the first baby for both.""" bart_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn") bart_model = BartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn").to(torch_device) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) outputs = bart_model.generate( input_ids, num_beams=4, num_return_sequences=2, num_beam_groups=4, diversity_penalty=2.0, remove_invalid_values=True, ) generated_text = bart_tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "The couple announced the birth of their son, Silas Randall Timberlake, in a statement. Silas was the" " middle name of Timberlake's maternal grandfather Bill Bomar. Randall is the musician's own middle" " name, as well as his father's first. It is the first baby for both of them.", "Justin Timberlake and Jessica Biel have a son. The baby is named Silas Randall Timberlake. It is the" " first child for both. The couple announced the pregnancy in January. The name Silas is the middle" " name of Timberlake's maternal grandfather. It's also his own middle name.", ], ) def test_max_length_backward_compat_greedy(self): # PT-only test: TF doesn't have StoppingCriteria article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) max_length = 20 input_ids = input_ids.expand(2, -1) model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {}) input_ids, model_kwargs = bart_model._prepare_decoder_input_ids_for_generation( batch_size=input_ids.shape[0], model_input_name=bart_model.main_input_name, model_kwargs=model_kwargs, decoder_start_token_id=bart_model.config.decoder_start_token_id, bos_token_id=bart_model.config.bos_token_id, ) with self.assertWarns(UserWarning): bart_model.greedy_search( input_ids, max_length=max_length, pad_token_id=bart_model.config.pad_token_id, eos_token_id=bart_model.config.eos_token_id, **model_kwargs, ) def test_max_length_backward_compat_sample(self): # PT-only test: TF doesn't have StoppingCriteria article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) max_length = 20 input_ids = input_ids.expand(2, -1) model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {}) input_ids, model_kwargs = bart_model._prepare_decoder_input_ids_for_generation( batch_size=input_ids.shape[0], model_input_name=bart_model.main_input_name, model_kwargs=model_kwargs, decoder_start_token_id=bart_model.config.decoder_start_token_id, bos_token_id=bart_model.config.bos_token_id, ) with torch.no_grad(): with self.assertWarns(UserWarning): bart_model.sample( input_ids, max_length=max_length, pad_token_id=bart_model.config.pad_token_id, eos_token_id=bart_model.config.eos_token_id, **model_kwargs, ) def test_max_length_backward_compat_beam_search(self): # PT-only test: TF doesn't have StoppingCriteria article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) batch_size = 1 max_length = 20 num_beams = 2 input_ids = input_ids.expand(2, -1) model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {}) input_ids, model_kwargs = bart_model._prepare_decoder_input_ids_for_generation( batch_size=input_ids.shape[0], model_input_name=bart_model.main_input_name, model_kwargs=model_kwargs, decoder_start_token_id=bart_model.config.decoder_start_token_id, bos_token_id=bart_model.config.bos_token_id, ) beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, device=torch_device, ) with self.assertWarns(UserWarning): _ = bart_model.beam_search( input_ids, num_beams=num_beams, max_length=max_length, beam_scorer=beam_scorer, **model_kwargs ) def test_max_length_backward_compat_group_beam_search(self): # PT-only test: TF doesn't have StoppingCriteria & group beam search article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) batch_size = 1 max_length = 20 num_beams = 6 num_beam_groups = 3 num_return_sequences = num_beams * batch_size input_ids = input_ids.expand(6, -1) model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {}) input_ids, model_kwargs = bart_model._prepare_decoder_input_ids_for_generation( batch_size=input_ids.shape[0], model_input_name=bart_model.main_input_name, model_kwargs=model_kwargs, decoder_start_token_id=bart_model.config.decoder_start_token_id, bos_token_id=bart_model.config.bos_token_id, ) diverse_beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, device=torch_device, num_beam_hyps_to_keep=num_return_sequences, num_beam_groups=num_beam_groups, ) with self.assertWarns(UserWarning): bart_model.group_beam_search( input_ids, diverse_beam_scorer, num_beams=num_beams, max_length=max_length, **model_kwargs ) def test_max_length_warning_if_different(self): # PT-only test: TF doesn't have StoppingCriteria article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) batch_size = 1 max_length = 20 num_beams = 6 num_beam_groups = 3 num_return_sequences = num_beams * batch_size stopping_criteria_max_length = 18 stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=stopping_criteria_max_length)]) # Greedy input_ids = input_ids.expand(6, -1) model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {}) input_ids, model_kwargs = bart_model._prepare_decoder_input_ids_for_generation( batch_size=input_ids.shape[0], model_input_name=bart_model.main_input_name, model_kwargs=model_kwargs, decoder_start_token_id=bart_model.config.decoder_start_token_id, bos_token_id=bart_model.config.bos_token_id, ) with self.assertWarns(UserWarning): bart_model.greedy_search( input_ids, max_length=max_length, pad_token_id=bart_model.config.pad_token_id, stopping_criteria=stopping_criteria, eos_token_id=bart_model.config.eos_token_id, **model_kwargs, ) # Sample with self.assertWarns(UserWarning): with torch.no_grad(): bart_model.sample( input_ids, max_length=max_length, stopping_criteria=stopping_criteria, pad_token_id=bart_model.config.pad_token_id, eos_token_id=bart_model.config.eos_token_id, **model_kwargs, ) # Beam beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, device=torch_device, ) with self.assertWarns(UserWarning): with torch.no_grad(): bart_model.beam_search( input_ids, num_beams=num_beams, stopping_criteria=stopping_criteria, max_length=max_length, beam_scorer=beam_scorer, **model_kwargs, ) # Grouped beam search diverse_beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, device=torch_device, num_beam_hyps_to_keep=num_return_sequences, num_beam_groups=num_beam_groups, ) with self.assertWarns(UserWarning): bart_model.group_beam_search( input_ids, diverse_beam_scorer, stopping_criteria=stopping_criteria, num_beams=num_beams, max_length=max_length, **model_kwargs, ) def test_custom_stopping_criteria_overload_error(self): # PT-only test: TF doesn't have StoppingCriteria article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random") bart_model = BartForConditionalGeneration.from_pretrained("sshleifer/bart-tiny-random").to(torch_device) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) stopping_criteria = StoppingCriteriaList() stopping_criteria.append(MaxLengthCriteria(max_length=42)) with self.assertRaises(ValueError): bart_model.generate(input_ids, stopping_criteria=stopping_criteria) with self.assertRaises(ValueError): bart_model.generate(input_ids, stopping_criteria=stopping_criteria, max_length=32) def test_custom_stopping_criteria(self): # PT-only test: TF doesn't have StoppingCriteria article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random") bart_model = BartForConditionalGeneration.from_pretrained("sshleifer/bart-tiny-random").to(torch_device) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) class DummyCriteria(StoppingCriteria): def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: return input_ids.shape[-1] >= 20 stopping_criteria = StoppingCriteriaList() stopping_criteria.append(DummyCriteria()) self.assertEqual( list(bart_model.generate(input_ids, stopping_criteria=stopping_criteria, max_length=22).shape), [1, 20], ) self.assertEqual( list(bart_model.generate(input_ids, stopping_criteria=stopping_criteria, max_length=18).shape), [1, 18], ) def test_stop_sequence_stopping_criteria(self): # PT-only test: TF doesn't have StoppingCriteria prompt = """Hello I believe in""" generator = pipeline("text-generation", model="hf-internal-testing/tiny-random-bart") output = generator(prompt) self.assertEqual( output, [ { "generated_text": ( "Hello I believe in in in number number number number number number number number number" ) } ], ) output = generator(prompt, stop_sequence=" number") self.assertEqual(output, [{"generated_text": "Hello I believe in in in number"}]) def test_generate_non_nlp_input_ids_as_kwarg(self): # PT-only test: AFAIK there's no non-NLP model architecture in TF that supports `input_ids` as its only input model = ImageGPTForCausalImageModeling.from_pretrained( "hf-internal-testing/tiny-random-imagegpt", max_length=10 ).to(torch_device) input_ids = ids_tensor((3, 5), vocab_size=10) output_sequences_kwargs = model.generate(input_ids=input_ids).cpu() output_sequences = model.generate(input_ids).cpu() self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist()) self.assertEqual(output_sequences.shape, (3, 10)) def test_generate_input_values_as_encoder_kwarg(self): # PT-only test: AFAIK there's no generate-capable architecture in TF that supports `input_values` as its input input_values = floats_tensor((2, 250)) model = SpeechEncoderDecoderModel.from_pretrained("hf-internal-testing/tiny-random-speech-encoder-decoder") model = model.to(torch_device) output_sequences_kwargs = model.generate(input_values=input_values, max_length=5).cpu() output_sequences = model.generate(input_values, max_length=5).cpu() self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist()) self.assertEqual(output_sequences.shape, (2, 5)) def test_transition_scores_group_beam_search_encoder_decoder(self): # PT-only test: TF doesn't have group beam search articles = [ "Justin Timberlake and Jessica Biel, welcome to parenthood.", "Michael Phelps is arguably the most decorated Olympian of all time.", ] tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = BartForConditionalGeneration.from_pretrained( "hf-internal-testing/tiny-random-bart", max_length=10, num_beams=2, num_beam_groups=2, num_return_sequences=2, diversity_penalty=1.0, eos_token_id=None, return_dict_in_generate=True, output_scores=True, length_penalty=0.0, ) model = model.to(torch_device) input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids.to(torch_device) outputs = model.generate(input_ids=input_ids) transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores, outputs.beam_indices) transition_scores_sum = transition_scores.sum(-1) self.assertTrue(torch.allclose(transition_scores_sum, outputs.sequences_scores, atol=1e-3)) @slow def test_beam_search_example_integration(self): # PT-only test: TF doesn't have a BeamSearchScorer # exactly the example provided in the docstrings of beam search, which previously # failed after directly copying from it. Refer to PR #15555 tokenizer = AutoTokenizer.from_pretrained("t5-base") model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") encoder_input_str = "translate English to German: How old are you?" encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids # lets run beam search using 3 beams num_beams = 3 # define decoder start token ids input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) input_ids = input_ids * model.config.decoder_start_token_id # add encoder_outputs to model keyword arguments model_kwargs = { "encoder_outputs": model.get_encoder()( encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True ) } # instantiate beam scorer beam_scorer = BeamSearchScorer( batch_size=1, num_beams=num_beams, device=model.device, ) # instantiate logits processors logits_processor = LogitsProcessorList( [ MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id), ] ) outputs = model.beam_search(input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs) outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual(outputs, ["Wie alt bist du?"]) @slow def test_constrained_beam_search(self): # PT-only test: TF doesn't have constrained beam search model = GPT2LMHeadModel.from_pretrained("gpt2").to(torch_device) tokenizer = GPT2Tokenizer.from_pretrained("gpt2") force_tokens = tokenizer("scared", add_prefix_space=True, add_special_tokens=False).input_ids force_tokens_2 = tokenizer("big weapons", add_prefix_space=True, add_special_tokens=False).input_ids constraints = [ PhrasalConstraint(force_tokens), PhrasalConstraint(force_tokens_2), ] starting_text = ["The soldiers were not prepared and"] input_ids = tokenizer(starting_text, return_tensors="pt").input_ids.to(torch_device) outputs = model.generate( input_ids, constraints=constraints, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, max_length=30, remove_invalid_values=True, ) generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "The soldiers were not prepared and didn't know what to do. They had no idea how they would react if" " the enemy attacked them, big weapons scared" ], ) @slow def test_constrained_beam_search_mixed(self): # PT-only test: TF doesn't have constrained beam search model = GPT2LMHeadModel.from_pretrained("gpt2").to(torch_device) tokenizer = GPT2Tokenizer.from_pretrained("gpt2") force_phrase = tokenizer("scared", add_prefix_space=True, add_special_tokens=False).input_ids flexible_phrases = tokenizer( ["scream", "screams", "screaming", "screamed"], add_prefix_space=True, add_special_tokens=False ).input_ids constraints = [ PhrasalConstraint(force_phrase), DisjunctiveConstraint(flexible_phrases), ] starting_text = ["The soldiers", "The child"] input_ids = tokenizer(starting_text, return_tensors="pt").input_ids.to(torch_device) outputs = model.generate( input_ids, constraints=constraints, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, # max_length=20, remove_invalid_values=True, ) generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "The soldiers, who had been stationed at the base for more than a year before being evacuated" " screaming scared", "The child was taken to a local hospital where he died.\n 'I don't think screaming scared", ], ) @slow def test_constrained_beam_search_mixed_mixin(self): # PT-only test: TF doesn't have constrained beam search model = GPT2LMHeadModel.from_pretrained("gpt2").to(torch_device) tokenizer = GPT2Tokenizer.from_pretrained("gpt2") force_word = "scared" force_flexible = ["scream", "screams", "screaming", "screamed"] force_words_ids = [ tokenizer([force_word], add_prefix_space=True, add_special_tokens=False).input_ids, tokenizer(force_flexible, add_prefix_space=True, add_special_tokens=False).input_ids, ] starting_text = ["The soldiers", "The child"] input_ids = tokenizer(starting_text, return_tensors="pt").input_ids.to(torch_device) outputs = model.generate( input_ids, force_words_ids=force_words_ids, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, remove_invalid_values=True, ) generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "The soldiers, who had been stationed at the base for more than a year before being evacuated" " screaming scared", "The child was taken to a local hospital where he died.\n 'I don't think screaming scared", ], ) @slow def test_cfg_mixin(self): model = GPT2LMHeadModel.from_pretrained("gpt2").to(torch_device) tokenizer = GPT2Tokenizer.from_pretrained("gpt2") input = tokenizer(["The dragon flew over Paris,"], return_tensors="pt", return_attention_mask=True) input["input_ids"] = input["input_ids"].to(torch_device) input["attention_mask"] = input["attention_mask"].to(torch_device) outputs = model.generate(**input, max_new_tokens=32, guidance_scale=1.5) generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "The dragon flew over Paris, landing in the Rue de la Bastille. The crowd was so excited " 'that they had to leave the city.\n\n"We\'re going to Paris!"\n' ], ) neg = tokenizer(["France,"], return_tensors="pt", return_attention_mask=True) neg["input_ids"] = neg["input_ids"].to(torch_device) neg["attention_mask"] = neg["attention_mask"].to(torch_device) outputs = model.generate( **input, max_new_tokens=32, guidance_scale=1.5, negative_prompt_ids=neg["input_ids"], negative_prompt_attention_mask=neg["attention_mask"], ) generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ 'The dragon flew over Paris, landing on the pavement.\n\n"Paris!"\n\n"Paris!"\n\n"' 'Paris!"\n\n"Paris!"\n\n"Paris!"\n\n' ], ) @slow def test_constrained_beam_search_example_translation_mixin(self): # PT-only test: TF doesn't have constrained beam search tokenizer = AutoTokenizer.from_pretrained("t5-base") model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") encoder_input_str = "translate English to German: How old are you?" force_words = ["sind"] input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids force_words_ids = tokenizer(force_words, add_special_tokens=False).input_ids outputs = model.generate( input_ids, force_words_ids=force_words_ids, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, remove_invalid_values=True, ) outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual(outputs, ["Wie alt sind Sie?"]) @slow def test_constrained_beam_search_example_integration(self): # PT-only test: TF doesn't have constrained beam search tokenizer = AutoTokenizer.from_pretrained("t5-base") model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") encoder_input_str = "translate English to German: How old are you?" encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids # lets run beam search using 5 beams num_beams = 5 # define decoder start token ids input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) input_ids = input_ids * model.config.decoder_start_token_id # add encoder_outputs to model keyword arguments model_kwargs = { "encoder_outputs": model.get_encoder()( encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True ) } constraint_str = "sind" constraint_token_ids = tokenizer.encode(constraint_str)[:-1] # remove eos token constraints = [PhrasalConstraint(token_ids=constraint_token_ids)] # instantiate beam scorer beam_scorer = ConstrainedBeamSearchScorer( batch_size=1, num_beams=num_beams, device=model.device, constraints=constraints ) # instantiate logits processors logits_processor = LogitsProcessorList( [ MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id), ] ) outputs = model.constrained_beam_search( input_ids, beam_scorer, constraints=constraints, logits_processor=logits_processor, **model_kwargs ) outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual(outputs, ["Wie alt sind Sie?"]) def test_constrained_beam_search_mixin_type_checks(self): # PT-only test: TF doesn't have constrained beam search tokenizer = AutoTokenizer.from_pretrained("patrickvonplaten/t5-tiny-random") model = AutoModelForSeq2SeqLM.from_pretrained("patrickvonplaten/t5-tiny-random") encoder_input_str = "translate English to German: How old are you?" input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids with self.assertRaises(ValueError): force_words = ["sind"] force_words_ids = tokenizer(force_words, return_tensors="pt").input_ids model.generate( input_ids, force_words_ids=force_words_ids, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, remove_invalid_values=True, ) with self.assertRaises(ValueError): force_words = ["sind"] force_words_ids = [tokenizer(force_words, return_tensors="pt").input_ids] model.generate( input_ids, force_words_ids=force_words_ids, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, remove_invalid_values=True, ) with self.assertRaises(ValueError): model.generate(input_ids, force_words_ids=[]) with self.assertRaises(ValueError): model.generate(input_ids, force_words_ids=[[-1]]) with self.assertRaises(ValueError): model.generate(input_ids, force_words_ids=[[[-1]]]) def test_contrastive_search_batched(self): # PT-only test: TF doesn't have constrained beam search # Tests that contrastive search works with batched inputs (i.e. has the same output as for non-batched inputs) articles = ["Foo", "Bar Baz"] tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to(torch_device) model.config.eos_token_id = None input_ids_batched = tokenizer(articles, padding=True, return_tensors="pt").input_ids.to(torch_device) input_ids = tokenizer(articles[1], return_tensors="pt").input_ids.to(torch_device) output_sequences_batched = model.generate( input_ids=input_ids_batched, penalty_alpha=0.6, top_k=4, return_dict_in_generate=True, output_scores=True ) output_sequences = model.generate( input_ids=input_ids, penalty_alpha=0.6, top_k=4, return_dict_in_generate=True, output_scores=True ) batched_out = tokenizer.decode(output_sequences_batched.sequences[1], skip_special_tokens=True) out = tokenizer.decode(output_sequences.sequences[0], skip_special_tokens=True) self.assertEqual(batched_out, out) # output_sequences_batched.scores[0][1] -> 1st set of logits, 2nd sequence max_score_diff = (output_sequences_batched.scores[0][1] - output_sequences.scores[0][0]).abs().max() self.assertTrue(max_score_diff < 1e-5) def test_eos_token_id_int_and_list_top_k_top_sampling(self): # Has TF equivalent: this test relies on random sampling generation_kwargs = { "do_sample": True, "num_beams": 1, "top_p": 0.7, "top_k": 10, "temperature": 0.7, } expectation = 20 tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") text = """Hello, my dog is cute and""" tokens = tokenizer(text, return_tensors="pt").to(torch_device) model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) # Only some seeds will work both on CPU/GPU for a fixed `expectation` value. # The selected seed is not guaranteed to work on all torch versions. torch.manual_seed(1) eos_token_id = 846 generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) self.assertTrue(expectation == len(generated_tokens[0])) torch.manual_seed(1) eos_token_id = [846, 198] generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) self.assertTrue(expectation == len(generated_tokens[0])) def test_model_kwarg_encoder_signature_filtering(self): # Has TF equivalent: ample use of framework-specific code bart_tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") article = """Hugging Face is a technology company based in New York and Paris.""" input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) output = bart_model.generate(input_ids).cpu().numpy() # Let's create a fake model that has a different signature. In particular, this fake model accepts "foo" as an # argument. Because "foo" is not in the encoder signature and doesn't start with "decoder_", it will be part of # the encoder kwargs prior to signature filtering, which would lead to an exception. But filtering kicks in and # saves the day. class FakeBart(BartForConditionalGeneration): def forward(self, input_ids, foo=None, **kwargs): return super().forward(input_ids, **kwargs) bart_model = FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart").to(torch_device) fake_output = bart_model.generate(input_ids, foo="bar").cpu().numpy() self.assertTrue(np.array_equal(output, fake_output)) # Encoder signature filtering only kicks in if it doesn't accept wildcard kwargs. The following test will fail # because it doesn't do signature filtering. class FakeEncoder(bart_model.model.encoder.__class__): def forward(self, input_ids, **kwargs): return super().forward(input_ids, **kwargs) fake_encoder = FakeEncoder(bart_model.config, bart_model.model.shared).to(torch_device) bart_model.model.encoder = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) fake_output = bart_model.generate(input_ids).cpu().numpy() with self.assertRaises(TypeError): # FakeEncoder.forward() accepts **kwargs -> no filtering -> type error due to unexpected input "foo" bart_model.generate(input_ids, foo="bar") def test_default_max_length_warning(self): model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model.config.pad_token_id = tokenizer.eos_token_id text = "Hello world" tokenized_inputs = tokenizer([text], return_tensors="pt") input_ids = tokenized_inputs.input_ids.to(torch_device) # Default generation config value of 20 -> emits warning with self.assertWarns(UserWarning): model.generate(input_ids) # Explicitly setting max_length to 20 -> no warning with warnings.catch_warnings(record=True) as warning_list: model.generate(input_ids, max_length=20) self.assertEqual(len(warning_list), 0) # Generation config max_length != 20 -> no warning with warnings.catch_warnings(record=True) as warning_list: # generation_config is modified -> legacy mode is disabled = generation_config takes precedence model.generation_config.max_length = 10 model.generate(input_ids) self.assertEqual(len(warning_list), 0) def test_model_kwarg_assisted_decoding_decoder_only(self): # PT-only test: TF doesn't support assisted decoding yet. model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model.config.pad_token_id = tokenizer.eos_token_id text = "Hello world" tokenized_inputs = tokenizer([text], return_tensors="pt") input_ids = tokenized_inputs.input_ids.to(torch_device) # Traditional way of generating text outputs_normal = model.generate(input_ids) self.assertEqual(outputs_normal.shape, (1, 20)) # Should be different with token_type_ids outputs_tti = model.generate( input_ids, token_type_ids=torch.zeros(input_ids.shape, dtype=torch.long).to(torch_device), ) with self.assertRaises(AssertionError): self.assertListEqual(outputs_tti.tolist(), outputs_normal.tolist()) # Assistant model assistant = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) assistant.config.pad_token_id = tokenizer.eos_token_id # If assisted generation passes model_kwargs correctly, should be same as previous outputs_assisted = model.generate( input_ids, token_type_ids=torch.zeros(input_ids.shape, dtype=torch.long).to(torch_device), assistant_model=assistant, ) self.assertListEqual(outputs_assisted.tolist(), outputs_tti.tolist()) def test_model_kwarg_assisted_decoding_encoder_decoder(self): # PT-only test: TF doesn't support assisted decoding yet. # Bart subclass with a kwarg that distorts the output class FakeBart(BartForConditionalGeneration): def forward(self, input_ids, foo=False, **kwargs): outs = super().forward(input_ids, **kwargs) if foo: outs["logits"][:, :, :] = 0.0 return outs def prepare_inputs_for_generation(self, *args, foo=False, encoder_outputs=None, **kwargs): kwargs["encoder_outputs"] = encoder_outputs inputs = super().prepare_inputs_for_generation(*args, **kwargs) inputs["foo"] = foo return inputs model = FakeBart.from_pretrained("hf-internal-testing/tiny-random-BartForConditionalGeneration").to( torch_device ) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-BartForConditionalGeneration") text = "Hello world" tokenized_inputs = tokenizer([text], return_tensors="pt") input_ids = tokenized_inputs.input_ids.to(torch_device) # Traditional way of generating text outputs_normal = model.generate(input_ids) self.assertEqual(outputs_normal.shape, (1, 20)) # Should be different with foo outputs_foo = model.generate( input_ids, foo=True, ) with self.assertRaises(AssertionError): self.assertListEqual(outputs_foo.tolist(), outputs_normal.tolist()) # Assistant model assistant = AutoModelForSeq2SeqLM.from_pretrained( "hf-internal-testing/tiny-random-BartForConditionalGeneration" ).to(torch_device) # If assisted generation passes model_kwargs correctly, should be same as previous outputs_assisted = model.generate( input_ids, foo=True, assistant_model=assistant, ) self.assertListEqual(outputs_assisted.tolist(), outputs_foo.tolist()) # Check that passing encoder_outputs directly also works as expected encoder_outputs = assistant.get_encoder()(input_ids) outputs_assisted = model.generate( foo=True, assistant_model=assistant, encoder_outputs=encoder_outputs, assistant_encoder_outputs=encoder_outputs, ) self.assertListEqual(outputs_assisted.tolist(), outputs_foo.tolist()) def test_assisted_decoding_encoder_decoder_shared_encoder(self): # PT-only test: TF doesn't support assisted decoding yet. # Bart subclass with a kwarg called foo that distorts the output class FakeBart(BartForConditionalGeneration): def forward(self, input_ids, foo=False, **kwargs): outs = super().forward(input_ids, **kwargs) if foo: outs["logits"][:, :, :] = 0.0 return outs def prepare_inputs_for_generation(self, *args, foo=False, encoder_outputs=None, **kwargs): kwargs["encoder_outputs"] = encoder_outputs inputs = super().prepare_inputs_for_generation(*args, **kwargs) inputs["foo"] = foo return inputs model = FakeBart.from_pretrained("hf-internal-testing/tiny-random-BartForConditionalGeneration").to( torch_device ) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-BartForConditionalGeneration") text = "Hello world" tokenized_inputs = tokenizer([text], return_tensors="pt") input_ids = tokenized_inputs.input_ids.to(torch_device) # Traditional way of generating text outputs_normal = model.generate(input_ids) self.assertEqual(outputs_normal.shape, (1, 20)) # Should be different with foo outputs_foo = model.generate(input_ids, foo=True) with self.assertRaises(AssertionError): self.assertListEqual(outputs_foo.tolist(), outputs_normal.tolist()) # Assistant model assistant = BartForCausalLM.from_pretrained("hf-internal-testing/tiny-random-BartForConditionalGeneration").to( torch_device ) # If assisted generation passes model_kwargs correctly, should be same as previous outputs_assisted = model.generate( input_ids, foo=True, assistant_model=assistant, ) self.assertListEqual(outputs_assisted.tolist(), outputs_foo.tolist()) # Check that passing encoder_outputs directly also works as expected encoder_outputs = model.get_encoder()(input_ids) outputs_assisted = model.generate( foo=True, assistant_model=assistant, encoder_outputs=encoder_outputs, ) self.assertListEqual(outputs_assisted.tolist(), outputs_foo.tolist())
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/generation/test_tf_logits_process.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest import numpy as np from parameterized import parameterized from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers.generation import ( TFForcedBOSTokenLogitsProcessor, TFForcedEOSTokenLogitsProcessor, TFForceTokensLogitsProcessor, TFLogitsProcessorList, TFMinLengthLogitsProcessor, TFNoBadWordsLogitsProcessor, TFNoRepeatNGramLogitsProcessor, TFRepetitionPenaltyLogitsProcessor, TFSuppressTokensAtBeginLogitsProcessor, TFSuppressTokensLogitsProcessor, TFTemperatureLogitsWarper, TFTopKLogitsWarper, TFTopPLogitsWarper, ) from ..test_modeling_tf_common import ids_tensor @require_tf class TFLogitsProcessorTest(unittest.TestCase): def _get_uniform_logits(self, batch_size: int, length: int): scores = tf.ones((batch_size, length), dtype=tf.float32) / length return scores @parameterized.expand([(False,), (True,)]) def test_min_length_dist_processor(self, use_xla): vocab_size = 20 batch_size = 4 eos_token_id = 0 min_dist_processor = TFMinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id) if use_xla: min_dist_processor = tf.function(min_dist_processor, jit_compile=True) # check that min length is applied at length 5 cur_len = 5 input_ids = ids_tensor((batch_size, cur_len), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = min_dist_processor(input_ids, scores, cur_len) self.assertListEqual(scores_before_min_length[:, eos_token_id].numpy().tolist(), 4 * [-float("inf")]) # check that min length is not applied anymore at length 15 cur_len = 15 input_ids = ids_tensor((batch_size, cur_len), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = min_dist_processor(input_ids, scores, cur_len) self.assertFalse(tf.math.reduce_any(tf.math.is_inf(scores_before_min_length)).numpy()) @parameterized.expand([(False,), (True,)]) def test_temperature_dist_warper(self, use_xla): input_ids = None cur_len = None length = 20 scores = self._get_uniform_logits(batch_size=2, length=length) # tweak scores to not be uniform anymore scores = scores.numpy() scores[1, 5] = (1 / length) + 0.1 # peak, 1st batch scores[1, 10] = (1 / length) - 0.4 # valley, 1st batch scores = tf.convert_to_tensor(scores) # compute softmax probs = tf.nn.softmax(scores, axis=-1) temp_dist_warper_sharper = TFTemperatureLogitsWarper(temperature=0.5) temp_dist_warper_smoother = TFTemperatureLogitsWarper(temperature=1.3) if use_xla: temp_dist_warper_sharper = tf.function(temp_dist_warper_sharper, jit_compile=True) temp_dist_warper_smoother = tf.function(temp_dist_warper_smoother, jit_compile=True) warped_prob_sharp = tf.nn.softmax(temp_dist_warper_sharper(input_ids, tf.identity(scores), cur_len), axis=-1) warped_prob_smooth = tf.nn.softmax(temp_dist_warper_smoother(input_ids, tf.identity(scores), cur_len), axis=-1) # uniform distribution stays uniform tf.debugging.assert_near(probs[0, :], warped_prob_sharp[0, :], atol=1e-3) tf.debugging.assert_near(probs[0, :], warped_prob_smooth[0, :], atol=1e-3) # sharp peaks get higher, valleys get lower self.assertLess(tf.math.reduce_max(probs[1, :]), tf.math.reduce_max(warped_prob_sharp[1, :])) self.assertGreater(tf.math.reduce_min(probs[1, :]), tf.math.reduce_min(warped_prob_sharp[1, :])) # smooth peaks get lower, valleys get higher self.assertGreater(tf.math.reduce_max(probs[1, :]), tf.math.reduce_max(warped_prob_smooth[1, :])) self.assertLess(tf.math.reduce_min(probs[1, :]), tf.math.reduce_min(warped_prob_smooth[1, :])) @parameterized.expand([(False,), (True,)]) def test_repetition_penalty_dist_process(self, use_xla): vocab_size = 10 cur_len = 2 input_ids = tf.constant([[0, 1], [5, 0]], dtype=tf.int32) self.assertEqual(cur_len, input_ids.shape[1]) scores = self._get_uniform_logits(batch_size=2, length=vocab_size) mask = tf.cast(tf.constant([[1] + 9 * [0], 10 * [0]]), tf.bool) scores = tf.where(mask, -1 / vocab_size, scores) mask = tf.cast(tf.constant([10 * [0], 5 * [0] + [1] + 4 * [0]]), tf.bool) scores = tf.where(mask, 4 / vocab_size, scores) rep_penalty_proc = TFRepetitionPenaltyLogitsProcessor(penalty=2.0) if use_xla: rep_penalty_proc = tf.function(rep_penalty_proc, jit_compile=True) scores = rep_penalty_proc(input_ids, tf.identity(scores), cur_len) # check that values were correctly changed (negative scores for used tokens should increase, others # should decrease) self.assertAlmostEqual(scores[0, 0].numpy(), -(1 / vocab_size) * 2) self.assertAlmostEqual(scores[0, 1].numpy(), (1 / vocab_size) / 2) self.assertAlmostEqual(scores[0, 2].numpy(), (1 / vocab_size)) # unused tokens should see no change self.assertAlmostEqual(scores[1, 0].numpy(), (1 / vocab_size) / 2) self.assertAlmostEqual(scores[1, 5].numpy(), (4 / vocab_size) / 2) self.assertAlmostEqual(scores[0, 2].numpy(), (1 / vocab_size)) # unused tokens should see no change @parameterized.expand([(False,), (True,)]) def test_top_k_dist_warper(self, use_xla): input_ids = None cur_len = None vocab_size = 10 batch_size = 2 # create ramp distribution ramp_logits = np.broadcast_to(np.arange(vocab_size, dtype=np.float32), (batch_size, vocab_size)).copy() ramp_logits[1:, : vocab_size // 2] = ramp_logits[1:, : vocab_size // 2] + vocab_size top_k_warp = TFTopKLogitsWarper(3) if use_xla: top_k_warp = tf.function(top_k_warp, jit_compile=True) scores = top_k_warp(input_ids, ramp_logits, cur_len) # check that correct tokens are filtered self.assertListEqual(tf.math.is_inf(scores[0]).numpy().tolist(), 7 * [True] + 3 * [False]) self.assertListEqual(tf.math.is_inf(scores[1]).numpy().tolist(), 2 * [True] + 3 * [False] + 5 * [True]) # check special cases length = 5 logits = self._get_uniform_logits(batch_size=batch_size, length=length) top_k_warp_safety_check = TFTopKLogitsWarper(top_k=1, filter_value=0.0, min_tokens_to_keep=3) if use_xla: top_k_warp_safety_check = tf.function(top_k_warp_safety_check, jit_compile=True) scores = top_k_warp_safety_check(input_ids, logits, cur_len) # uniform dist is not changed self.assertListEqual(tf.math.reduce_sum(tf.where(scores == 0.0, 1, 0), axis=-1).numpy().tolist(), [0, 0]) ramp_logits = np.broadcast_to(np.arange(length, dtype=np.float32), (batch_size, length)).copy() scores = top_k_warp_safety_check(input_ids, ramp_logits, cur_len) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual(tf.math.reduce_sum(tf.where(scores == 0.0, 1, 0), axis=-1).numpy().tolist(), [2, 2]) @parameterized.expand([(False,), (True,)]) def test_top_p_dist_warper(self, use_xla): input_ids = None cur_len = None vocab_size = 10 batch_size = 2 # create distribution and take log (inverse to Softmax as taken in TFTopPLogitsWarper) dist = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]], dtype=np.float32)) # top_p should have been 0.8 to test the edge case of top_p being exactly equal to sum of some token prob # However, due to the numerical instability of softmax in TF we choose this as the edge case # top_p as 0.8 passes when use_xla is True and fails when False. Refer PR #18984. top_p_warp = TFTopPLogitsWarper(0.79999995) if use_xla: top_p_warp = tf.function(top_p_warp, jit_compile=True) filtered_dist = tf.exp(top_p_warp(input_ids, dist, cur_len)) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 EXPECTED_FILTERED_DIST = tf.constant([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]], dtype=tf.float32) tf.debugging.assert_near(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3) # check edge cases with negative and extreme logits ramp_logits = np.broadcast_to( np.arange(vocab_size, dtype=np.float32)[None, :], (batch_size, vocab_size) ).copy() - (vocab_size // 2) # make ramp_logits more extreme ramp_logits[1] = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept top_p_warp = TFTopPLogitsWarper(0.9, min_tokens_to_keep=2, filter_value=0.0) if use_xla: top_p_warp = tf.function(top_p_warp, jit_compile=True) filtered_dist = top_p_warp(input_ids, ramp_logits, cur_len) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps # 2. self.assertListEqual( tf.math.reduce_sum(tf.where(filtered_dist != 0.0, 1, 0), axis=-1).numpy().tolist(), [3, 2] ) def test_no_repeat_ngram_dist_processor(self): vocab_size = 3 batch_size = 2 cur_len = 4 input_ids = tf.constant([[1, 1, 2, 1], [0, 1, 0, 1]], dtype=tf.int32) self.assertEqual(cur_len, input_ids.shape[1]) scores = self._get_uniform_logits(batch_size, vocab_size) no_repeat_proc_2_gram = TFNoRepeatNGramLogitsProcessor(2) no_repeat_proc_3_gram = TFNoRepeatNGramLogitsProcessor(3) filtered_scores_2_gram = no_repeat_proc_2_gram(input_ids, tf.identity(scores), cur_len) filtered_scores_3_gram = no_repeat_proc_3_gram(input_ids, tf.identity(scores), cur_len) # 2-gram would forbid 2nd and 3rd token (1,2) at 1st batch and 1st token (0) at 2nd batch self.assertListEqual( tf.math.is_inf(filtered_scores_2_gram).numpy().tolist(), [[False, True, True], [True, False, False]] ) # 3-gram would forbid no token at 1st batch and 1st token (0) at 2nd batch self.assertListEqual( tf.math.is_inf(filtered_scores_3_gram).numpy().tolist(), [[False, False, False], [True, False, False]] ) @parameterized.expand([(False,), (True,)]) def test_no_bad_words_dist_processor(self, use_xla): vocab_size = 5 batch_size = 2 eos_token_id = 4 cur_len = 4 input_ids = tf.constant([[0, 1, 3, 1], [0, 1, 0, 1]], dtype=tf.int32) self.assertEqual(cur_len, input_ids.shape[1]) bad_word_tokens = [[1], [4], [1, 0], [0, 1, 2], [1, 3, 1, 3]] scores = self._get_uniform_logits(batch_size, vocab_size) no_bad_words_dist_proc = TFNoBadWordsLogitsProcessor(bad_words_ids=bad_word_tokens, eos_token_id=eos_token_id) if use_xla: no_bad_words_dist_proc = tf.function(no_bad_words_dist_proc, jit_compile=True) filtered_scores = no_bad_words_dist_proc(input_ids, tf.identity(scores), cur_len) # batch 1: 1st, 2nd, and 4th (0, 1, 3) token are forbidden # batch 2: 1st, 2nd, and 3rd (0, 1, 2) token are forbidden self.assertListEqual( tf.math.is_inf(filtered_scores).numpy().tolist(), [[True, True, False, True, True], [True, True, True, False, True]], ) @parameterized.expand([(False,), (True,)]) def test_forced_bos_token_logits_processor(self, use_xla): vocab_size = 20 batch_size = 4 bos_token_id = 0 logits_processor = TFForcedBOSTokenLogitsProcessor(bos_token_id=bos_token_id) if use_xla: logits_processor = tf.function(logits_processor, jit_compile=True) # check that all scores are -inf except the bos_token_id score cur_len = 1 input_ids = ids_tensor((batch_size, cur_len), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) self.assertTrue( tf.math.reduce_all(tf.math.is_inf(scores[:, bos_token_id + 1 :]) & (scores[:, bos_token_id + 1 :] < 0)) ) self.assertListEqual(scores[:, bos_token_id].numpy().tolist(), 4 * [0]) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 cur_len = 4 input_ids = ids_tensor((batch_size, cur_len), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) self.assertFalse(tf.math.reduce_any(tf.math.is_inf((scores)))) @parameterized.expand([(False,), (True,)]) def test_forced_eos_token_logits_processor(self, use_xla): vocab_size = 20 batch_size = 4 eos_token_id = 0 max_length = 5 logits_processor = TFForcedEOSTokenLogitsProcessor(max_length=max_length, eos_token_id=eos_token_id) if use_xla: logits_processor = tf.function(logits_processor, jit_compile=True) # check that all scores are -inf except the eos_token_id when max_length-1 is reached cur_len = 4 input_ids = ids_tensor((batch_size, cur_len), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) self.assertTrue( tf.math.reduce_all(tf.math.is_inf(scores[:, eos_token_id + 1 :]) & (scores[:, eos_token_id + 1 :] < 0)) ) self.assertListEqual( scores[:, eos_token_id].numpy().tolist(), 4 * [0] ) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length-1 is not reached cur_len = 3 input_ids = ids_tensor((batch_size, cur_len), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) self.assertFalse(tf.math.reduce_any(tf.math.is_inf((scores)))) @parameterized.expand([(False,), (True,)]) def test_suppress_tokens_at_begin_logits_processor(self, use_xla): vocab_size = 20 batch_size = 4 begin_suppress_tokens = [1, 2, 3] begin_index = 5 logits_processor = TFSuppressTokensAtBeginLogitsProcessor( begin_suppress_tokens=begin_suppress_tokens, begin_index=begin_index ) if use_xla: logits_processor = tf.function(logits_processor, jit_compile=True) # Check that no scores are suppressed if begin_index is not reached cur_len = 4 input_ids = tf.convert_to_tensor([[11, 17, 15, 8], [14, 0, 19, 5], [13, 11, 18, 19], [11, 12, 16, 15]]) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) self.assertFalse(tf.math.reduce_any(tf.math.is_inf((scores)))) # Check that scores are suppressed if begin_index is reached cur_len = 5 input_ids = tf.convert_to_tensor([[5, 5, 5, 0, 17], [18, 1, 9, 14, 17], [18, 6, 8, 15, 19], [8, 12, 17, 1, 2]]) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) self.assertTrue(tf.math.reduce_all(tf.math.is_inf(tf.gather(scores, begin_suppress_tokens, axis=1)))) @parameterized.expand([(False,), (True,)]) def test_suppress_tokens_logits_processor(self, use_xla): vocab_size = 20 batch_size = 4 suppress_tokens = [1, 3, 5] keep_tokens = [i for i in range(vocab_size) if i not in suppress_tokens] logits_processor = TFSuppressTokensLogitsProcessor(suppress_tokens=suppress_tokens) if use_xla: logits_processor = tf.function(logits_processor, jit_compile=True) # Check that suppress_tokens are suppressed and others are not cur_len = 5 input_ids = tf.convert_to_tensor([[0, 10, 19, 6, 3], [17, 4, 8, 17, 2], [7, 1, 11, 6, 15], [5, 8, 13, 16, 0]]) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) self.assertTrue(tf.math.reduce_all(tf.math.is_inf(tf.gather(scores, suppress_tokens, axis=1)))) self.assertFalse(tf.math.reduce_any(tf.math.is_inf(tf.gather(scores, keep_tokens, axis=1)))) @parameterized.expand([(False,), (True,)]) def test_force_tokens_logits_processor(self, use_xla): vocab_size = 20 batch_size = 4 force_token_map = {1: 2, 3: 2} logits_processor = TFForceTokensLogitsProcessor(force_token_map=force_token_map) if use_xla: logits_processor = tf.function(logits_processor, jit_compile=True) # check that if the cur_len is contained in the force_token_map, the logits are the same # for all tokens except the one the force_token_map points to cur_len = 1 input_ids = tf.convert_to_tensor([[11], [7], [5], [15]]) ids_tensor((batch_size, cur_len), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) tf.debugging.assert_near(tf.gather(scores, [force_token_map[cur_len]], axis=1), 0.0) non_forced_inds = [i for i in range(vocab_size) if i != force_token_map[cur_len]] self.assertTrue( tf.math.reduce_all(tf.math.is_inf(tf.gather(scores, [non_forced_inds], axis=1))), ) # check that if the cur_len is not contained in the force_token_map, the logits are not modified cur_len = 2 input_ids = tf.convert_to_tensor([[2, 19], [19, 15], [4, 9], [7, 6]]) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) self.assertFalse(tf.math.reduce_any(tf.math.is_inf((scores)))) @parameterized.expand([(False,), (True,)]) def test_processor_list(self, use_xla): # TODO (Joao): reintroduce TFNoRepeatNGramLogitsProcessor when it gets compatible with XLA batch_size = 4 cur_len = 10 vocab_size = 15 eos_token_id = 0 # dummy input_ids and scores input_ids = ids_tensor((batch_size, cur_len), vocab_size) input_ids_comp = tf.identity(input_ids) scores = self._get_uniform_logits(batch_size, vocab_size) scores_comp = tf.identity(scores) # instantiate all dist processors min_dist_proc = TFMinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id) temp_dist_warp = TFTemperatureLogitsWarper(temperature=0.5) rep_penalty_proc = TFRepetitionPenaltyLogitsProcessor(penalty=2.0) top_k_warp = TFTopKLogitsWarper(3) top_p_warp = TFTopPLogitsWarper(0.8) # no_repeat_proc = TFNoRepeatNGramLogitsProcessor(2) no_bad_words_dist_proc = TFNoBadWordsLogitsProcessor(bad_words_ids=[[1]], eos_token_id=eos_token_id) if use_xla: min_dist_proc = tf.function(min_dist_proc, jit_compile=True) temp_dist_warp = tf.function(temp_dist_warp, jit_compile=True) rep_penalty_proc = tf.function(rep_penalty_proc, jit_compile=True) top_k_warp = tf.function(top_k_warp, jit_compile=True) top_p_warp = tf.function(top_p_warp, jit_compile=True) # no_repeat_proc = tf.function(no_repeat_proc, jit_compile=True) no_bad_words_dist_proc = tf.function(no_bad_words_dist_proc, jit_compile=True) # no processor list scores = min_dist_proc(input_ids, scores, cur_len) scores = temp_dist_warp(input_ids, scores, cur_len) scores = rep_penalty_proc(input_ids, scores, cur_len) scores = top_k_warp(input_ids, scores, cur_len) scores = top_p_warp(input_ids, scores, cur_len) # scores = no_repeat_proc(input_ids, scores, cur_len) scores = no_bad_words_dist_proc(input_ids, scores, cur_len) # with processor list processor = TFLogitsProcessorList( [ min_dist_proc, temp_dist_warp, rep_penalty_proc, top_k_warp, top_p_warp, # no_repeat_proc, no_bad_words_dist_proc, ] ) scores_comp = processor(input_ids, scores_comp, cur_len) # remove inf scores = tf.where(tf.math.is_inf(scores), -1e9, scores) scores_comp = tf.where(tf.math.is_inf(scores_comp), -1e9, scores_comp) # scores should be equal tf.debugging.assert_near(scores, scores_comp, atol=1e-3) # input_ids should never be changed self.assertListEqual(input_ids.numpy().tolist(), input_ids_comp.numpy().tolist())
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/generation/test_flax_utils.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = "0.12" # assumed parallelism: 8 if is_torch_available(): import torch def ids_tensor(shape, vocab_size, rng=None): """Creates a random int32 tensor of the shape within the vocab size.""" if rng is None: rng = random.Random() total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.randint(0, vocab_size - 1)) output = np.array(values, dtype=jnp.int32).reshape(shape) return output def random_attention_mask(shape, rng=None): attn_mask = ids_tensor(shape, vocab_size=2, rng=rng) # make sure that at least one token is attended to for each batch attn_mask[:, -1] = 1 return attn_mask @require_flax class FlaxGenerationTesterMixin: model_tester = None all_generative_model_classes = () def _get_input_ids_and_config(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() # cut to half length & take max batch_size 3 max_batch_size = 2 sequence_length = inputs["input_ids"].shape[-1] // 2 input_ids = inputs["input_ids"][:max_batch_size, :sequence_length] attention_mask = jnp.ones_like(input_ids) attention_mask = attention_mask[:max_batch_size, :sequence_length] # generate max 5 tokens max_length = input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` config.pad_token_id = config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def test_greedy_generate_pt_fx(self): config, input_ids, _, max_length = self._get_input_ids_and_config() config.do_sample = False config.max_length = max_length config.decoder_start_token_id = 0 for model_class in self.all_generative_model_classes: flax_model = model_class(config) pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning pt_model_class = getattr(transformers, pt_model_class_name) pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, flax_model.params) flax_generation_outputs = flax_model.generate(input_ids).sequences pt_generation_outputs = pt_model.generate(torch.tensor(input_ids, dtype=torch.long)) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: flax_generation_outputs = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist(), flax_generation_outputs.tolist()) def test_greedy_generate(self): config, input_ids, _, max_length = self._get_input_ids_and_config() config.do_sample = False config.max_length = max_length for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def test_sample_generate(self): config, input_ids, _, max_length = self._get_input_ids_and_config() config.do_sample = True config.max_length = max_length for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def test_beam_search_generate(self): config, input_ids, _, max_length = self._get_input_ids_and_config() config.do_sample = False config.max_length = max_length config.num_beams = 2 for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def test_beam_search_generate_num_return_sequences(self): config, input_ids, _, max_length = self._get_input_ids_and_config() config.do_sample = False config.max_length = max_length config.num_beams = 2 config.num_return_sequences = 2 for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids).sequences self.assertEqual(generation_outputs.shape[0], input_ids.shape[0] * config.num_return_sequences) def test_sample_generate_logits_warper(self): config, input_ids, _, max_length = self._get_input_ids_and_config() config.do_sample = True config.max_length = max_length config.temperature = 0.8 config.top_k = 10 config.top_p = 0.3 config.min_length = 1 config.forced_bos_token_id = 8 config.forced_eos_token_id = 9 for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def test_greedy_generate_logits_warper(self): config, input_ids, _, max_length = self._get_input_ids_and_config() config.max_length = max_length config.min_length = 1 config.forced_bos_token_id = 8 config.forced_eos_token_id = 9 for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def test_beam_search_generate_logits_warper(self): config, input_ids, _, max_length = self._get_input_ids_and_config() config.max_length = max_length config.num_beams = 2 config.min_length = 1 config.forced_bos_token_id = 8 config.forced_eos_token_id = 9 for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def test_greedy_generate_attn_mask(self): config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # pad attention mask on the left attention_mask = attention_mask.at[(0, 0)].set(0) config.do_sample = False config.max_length = max_length for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids, attention_mask=attention_mask).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids, attention_mask=attention_mask).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def test_sample_generate_attn_mask(self): config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # pad attention mask on the left attention_mask = attention_mask.at[(0, 0)].set(0) config.do_sample = True config.max_length = max_length for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids, attention_mask=attention_mask).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids, attention_mask=attention_mask).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def test_beam_search_generate_attn_mask(self): config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # pad attention mask on the left attention_mask = attention_mask.at[(0, 0)].set(0) config.num_beams = 2 config.max_length = max_length for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids, attention_mask=attention_mask).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids, attention_mask=attention_mask).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) @require_flax class FlaxGenerationIntegrationTests(unittest.TestCase): def test_validate_generation_inputs(self): tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert") model = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only") encoder_input_str = "Hello world" input_ids = tokenizer(encoder_input_str, return_tensors="np").input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(ValueError, "do_samples"): model.generate(input_ids, do_samples=True) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(ValueError, "foo"): fake_model_kwargs = {"foo": "bar"} model.generate(input_ids, **fake_model_kwargs)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/generation/test_flax_logits_process.py
# coding=utf-8 # Copyright 2021 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class LogitsProcessorTest(unittest.TestCase): def _get_uniform_logits(self, batch_size: int, length: int): scores = jnp.ones((batch_size, length)) / length return scores def test_temperature_dist_warper(self): input_ids = None length = 20 scores = self._get_uniform_logits(batch_size=2, length=length) # tweak scores to not be uniform anymore scores = scores.at[1, 5].set((1 / length) + 0.1) # peak, 1st batch scores = scores.at[1, 10].set((1 / length) - 0.4) # valley, 1st batch # compute softmax probs = jax.nn.softmax(scores, axis=-1) temp_dist_warper_sharper = FlaxTemperatureLogitsWarper(temperature=0.5) temp_dist_warper_smoother = FlaxTemperatureLogitsWarper(temperature=1.3) warped_prob_sharp = jax.nn.softmax(temp_dist_warper_sharper(input_ids, scores.copy(), cur_len=None), axis=-1) warped_prob_smooth = jax.nn.softmax(temp_dist_warper_smoother(input_ids, scores.copy(), cur_len=None), axis=-1) # uniform distribution stays uniform self.assertTrue(jnp.allclose(probs[0, :], warped_prob_sharp[0, :], atol=1e-3)) self.assertTrue(jnp.allclose(probs[0, :], warped_prob_smooth[0, :], atol=1e-3)) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max(), warped_prob_sharp[1, :].max()) self.assertGreater(probs[1, :].min(), warped_prob_sharp[1, :].min()) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max(), warped_prob_smooth[1, :].max()) self.assertLess(probs[1, :].min(), warped_prob_smooth[1, :].min()) def test_top_k_dist_warper(self): input_ids = None vocab_size = 10 batch_size = 2 # create ramp distribution ramp_logits = np.broadcast_to(np.arange(vocab_size)[None, :], (batch_size, vocab_size)).copy() ramp_logits[1:, : vocab_size // 2] = ramp_logits[1:, : vocab_size // 2] + vocab_size top_k_warp = FlaxTopKLogitsWarper(3) scores = top_k_warp(input_ids, ramp_logits, cur_len=None) # check that correct tokens are filtered self.assertListEqual(jnp.isinf(scores[0]).tolist(), 7 * [True] + 3 * [False]) self.assertListEqual(jnp.isinf(scores[1]).tolist(), 2 * [True] + 3 * [False] + 5 * [True]) # check special case length = 5 top_k_warp_safety_check = FlaxTopKLogitsWarper(top_k=1, filter_value=0.0, min_tokens_to_keep=3) ramp_logits = np.broadcast_to(np.arange(length)[None, :], (batch_size, length)).copy() scores = top_k_warp_safety_check(input_ids, ramp_logits, cur_len=None) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).sum(axis=-1).tolist(), [2, 2]) def test_top_p_dist_warper(self): input_ids = None vocab_size = 10 batch_size = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) dist = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]])) top_p_warp = FlaxTopPLogitsWarper(0.8) filtered_dist = np.exp(top_p_warp(input_ids, dist, cur_len=None)) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 EXPECTED_FILTERED_DIST = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]]) self.assertTrue(np.allclose(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3)) # check edge cases with negative and extreme logits ramp_logits = np.broadcast_to(np.arange(vocab_size)[None, :], (batch_size, vocab_size)).copy() - ( vocab_size // 2 ) # make ramp_logits more extreme ramp_logits[1] = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept top_p_warp = FlaxTopPLogitsWarper(0.9, min_tokens_to_keep=2, filter_value=0.0) filtered_dist = top_p_warp(input_ids, ramp_logits, cur_len=None) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).sum(axis=-1).tolist(), [3, 2]) def test_min_length_dist_processor(self): vocab_size = 20 batch_size = 4 eos_token_id = 0 min_dist_processor = FlaxMinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id) # check that min length is applied at length 5 input_ids = ids_tensor((batch_size, 20), vocab_size=20) cur_len = 5 scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = min_dist_processor(input_ids, scores, cur_len=cur_len) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist(), 4 * [-float("inf")]) # check that min length is not applied anymore at length 15 scores = self._get_uniform_logits(batch_size, vocab_size) cur_len = 15 scores_before_min_length = min_dist_processor(input_ids, scores, cur_len=cur_len) self.assertFalse(jnp.isinf(scores_before_min_length).any()) def test_forced_bos_token_logits_processor(self): vocab_size = 20 batch_size = 4 bos_token_id = 0 logits_processor = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=bos_token_id) # check that all scores are -inf except the bos_token_id score input_ids = ids_tensor((batch_size, 1), vocab_size=20) cur_len = 1 scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len=cur_len) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :]).all()) self.assertListEqual(scores[:, bos_token_id].tolist(), 4 * [0]) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 cur_len = 3 scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len=cur_len) self.assertFalse(jnp.isinf(scores).any()) def test_forced_eos_token_logits_processor(self): vocab_size = 20 batch_size = 4 eos_token_id = 0 max_length = 5 logits_processor = FlaxForcedEOSTokenLogitsProcessor(max_length=max_length, eos_token_id=eos_token_id) # check that all scores are -inf except the eos_token_id when max_length is reached input_ids = ids_tensor((batch_size, 4), vocab_size=20) cur_len = 4 scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len=cur_len) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :]).all()) self.assertListEqual(scores[:, eos_token_id].tolist(), 4 * [0]) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length is not reached cur_len = 3 scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len=cur_len) self.assertFalse(jnp.isinf(scores).any()) def test_processor_list(self): batch_size = 4 sequence_length = 10 vocab_size = 15 eos_token_id = 2 bos_token_id = 1 max_length = 15 # dummy input_ids and scores input_ids = ids_tensor((batch_size, sequence_length), vocab_size) input_ids_comp = input_ids.copy() scores = self._get_uniform_logits(batch_size, vocab_size) scores_comp = scores.copy() # instantiate all dist processors temp_dist_warp = FlaxTemperatureLogitsWarper(temperature=0.5) top_k_warp = FlaxTopKLogitsWarper(3) top_p_warp = FlaxTopPLogitsWarper(0.8) # instantiate all logits processors min_dist_proc = FlaxMinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id) bos_dist_proc = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=bos_token_id) eos_dist_proc = FlaxForcedEOSTokenLogitsProcessor(max_length=max_length, eos_token_id=eos_token_id) cur_len = 10 # no processor list scores = temp_dist_warp(input_ids, scores, cur_len=cur_len) scores = top_k_warp(input_ids, scores, cur_len=cur_len) scores = top_p_warp(input_ids, scores, cur_len=cur_len) scores = min_dist_proc(input_ids, scores, cur_len=cur_len) scores = bos_dist_proc(input_ids, scores, cur_len=cur_len) scores = eos_dist_proc(input_ids, scores, cur_len=cur_len) # with processor list processor = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) scores_comp = processor(input_ids, scores_comp, cur_len=cur_len) # scores should be equal self.assertTrue(jnp.allclose(scores, scores_comp, atol=1e-3)) # input_ids should never be changed self.assertListEqual(input_ids.tolist(), input_ids_comp.tolist()) def test_processor_list_jitted(self): batch_size = 4 sequence_length = 10 vocab_size = 15 eos_token_id = 2 bos_token_id = 1 max_length = 15 # dummy input_ids and scores input_ids = ids_tensor((batch_size, sequence_length), vocab_size) input_ids_comp = input_ids.copy() scores = self._get_uniform_logits(batch_size, vocab_size) scores_comp = scores.copy() # instantiate all dist processors temp_dist_warp = FlaxTemperatureLogitsWarper(temperature=0.5) top_k_warp = FlaxTopKLogitsWarper(3) top_p_warp = FlaxTopPLogitsWarper(0.8) # instantiate all logits processors min_dist_proc = FlaxMinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id) bos_dist_proc = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=bos_token_id) eos_dist_proc = FlaxForcedEOSTokenLogitsProcessor(max_length=max_length, eos_token_id=eos_token_id) cur_len = 10 # no processor list def run_no_processor_list(input_ids, scores, cur_len): scores = temp_dist_warp(input_ids, scores, cur_len=cur_len) scores = top_k_warp(input_ids, scores, cur_len=cur_len) scores = top_p_warp(input_ids, scores, cur_len=cur_len) scores = min_dist_proc(input_ids, scores, cur_len=cur_len) scores = bos_dist_proc(input_ids, scores, cur_len=cur_len) scores = eos_dist_proc(input_ids, scores, cur_len=cur_len) return scores # with processor list def run_processor_list(input_ids, scores, cur_len): processor = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) scores = processor(input_ids, scores, cur_len=cur_len) return scores jitted_run_no_processor_list = jax.jit(run_no_processor_list) jitted_run_processor_list = jax.jit(run_processor_list) scores = jitted_run_no_processor_list(input_ids, scores, cur_len) scores_comp = jitted_run_processor_list(input_ids, scores_comp, cur_len) # scores should be equal self.assertTrue(jnp.allclose(scores, scores_comp, atol=1e-3)) # input_ids should never be changed self.assertListEqual(input_ids.tolist(), input_ids_comp.tolist())
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/generation/test_beam_constraints.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class ConstraintTest(unittest.TestCase): def test_input_types(self): # For consistency across different places the DisjunctiveConstraint is called, # dc.token_ids is a list of integers. It is also initialized only by integers. cset = [[1, 2, 4], [1, 2, 3, 4]] dc = DisjunctiveConstraint(cset) self.assertTrue(isinstance(dc.token_ids, list)) with self.assertRaises(ValueError): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]])) with self.assertRaises(ValueError): DisjunctiveConstraint([torch.LongTensor([1, 2, 4]), torch.LongTensor([1, 2, 3, 4, 5])]) def test_check_illegal_input(self): # We can't have constraints that are complete subsets of another. This leads to a preverse # interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint? # It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially # fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm # will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it). cset = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(ValueError): DisjunctiveConstraint(cset) # fails here def test_example_progression(self): cset = [[1, 2, 3], [1, 2, 4]] dc = DisjunctiveConstraint(cset) stepped, completed, reset = dc.update(1) desired = stepped is True and completed is False and reset is False self.assertTrue(desired) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1]) stepped, completed, reset = dc.update(2) desired = stepped is True and completed is False and reset is False self.assertTrue(desired) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1, 2]) stepped, completed, reset = dc.update(3) desired = stepped is True and completed is True and reset is False self.assertTrue(desired) self.assertTrue(dc.completed) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3]) def test_example_progression_unequal_three_mid_and_reset(self): cset = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] dc = DisjunctiveConstraint(cset) stepped, completed, reset = dc.update(1) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1]) stepped, completed, reset = dc.update(2) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1, 2]) stepped, completed, reset = dc.update(4) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1, 2, 4]) stepped, completed, reset = dc.update(5) self.assertTrue(dc.completed) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5]) dc.reset() stepped, completed, reset = dc.update(1) self.assertTrue(not dc.completed) self.assertTrue(dc.remaining() == 3) self.assertTrue(dc.current_seq == [1]) stepped, completed, reset = dc.update(2) self.assertTrue(not dc.completed) self.assertTrue(dc.remaining() == 2) self.assertTrue(dc.current_seq == [1, 2]) stepped, completed, reset = dc.update(5) self.assertTrue(dc.completed) # Completed! self.assertTrue(dc.remaining() == 0) self.assertTrue(dc.current_seq == [1, 2, 5])
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/generation/test_tf_utils.py
# coding=utf-8 # Copyright 2022 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeq2SeqLM, TFAutoModelForSpeechSeq2Seq, TFAutoModelForVision2Seq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class UtilsFunctionsTest(unittest.TestCase): # tests whether the top_k_top_p_filtering function behaves as expected def test_top_k_top_p_filtering(self): logits = tf.convert_to_tensor( [ [ 8.2220991, # 3rd highest value; idx. 0 -0.5620044, 5.23229752, 4.0386393, -6.8798378, -0.54785802, -3.2012153, 2.92777176, 1.88171953, 7.35341276, # 5th highest value; idx. 9 8.43207833, # 2nd highest value; idx. 10 -9.85711836, -5.96209236, -1.13039161, -7.1115294, -0.8369633, -5.3186408, 7.06427407, 0.81369344, -0.82023817, -5.9179796, 0.58813443, -6.99778438, 4.71551189, -0.18771637, 7.44020759, # 4th highest value; idx. 25 9.38450987, # 1st highest value; idx. 26 2.12662941, -9.32562038, 2.35652522, ], # cummulative prob of 5 highest values <= 0.6 [ 0.58425518, 4.53139238, -5.57510464, -6.28030699, -7.19529503, -4.02122551, 1.39337037, -6.06707057, 1.59480517, -9.643119, 0.03907799, 0.67231762, -8.88206726, 6.27115922, # 4th highest value; idx. 13 2.28520723, 4.82767506, 4.30421368, 8.8275313, # 2nd highest value; idx. 17 5.44029958, # 5th highest value; idx. 18 -4.4735794, 7.38579536, # 3rd highest value; idx. 20 -2.91051663, 2.61946077, -2.5674762, -9.48959302, -4.02922645, -1.35416918, 9.67702323, # 1st highest value; idx. 27 -5.89478553, 1.85370467, ], # cummulative prob of 5 highest values <= 0.6 ], dtype=tf.float32, ) non_inf_expected_idx = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]], dtype=tf.int32, ) # expected non filtered idx as noted above non_inf_expected_output = tf.convert_to_tensor( [8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023], dtype=tf.float32, ) # expected non filtered values as noted above output = tf_top_k_top_p_filtering(logits, top_k=10, top_p=0.6, min_tokens_to_keep=4) non_inf_output = output[output != -float("inf")] non_inf_idx = tf.cast( tf.where(tf.not_equal(output, tf.constant(-float("inf"), dtype=tf.float32))), dtype=tf.int32, ) tf.debugging.assert_near(non_inf_output, non_inf_expected_output, rtol=1e-12) tf.debugging.assert_equal(non_inf_idx, non_inf_expected_idx) @require_tf class TFGenerationIntegrationTests(unittest.TestCase, GenerationIntegrationTestsMixin): # setting framework_dependent_parameters needs to be gated, just like its contents' imports if is_tf_available(): framework_dependent_parameters = { "AutoModelForCausalLM": TFAutoModelForCausalLM, "AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeq2Seq, "AutoModelForSeq2SeqLM": TFAutoModelForSeq2SeqLM, "AutoModelForVision2Seq": TFAutoModelForVision2Seq, "LogitsProcessorList": TFLogitsProcessorList, "MinLengthLogitsProcessor": TFMinLengthLogitsProcessor, "create_tensor_fn": tf.convert_to_tensor, "floats_tensor": floats_tensor, "return_tensors": "tf", } @slow def test_generate_tf_function_export_fixed_input_length(self): # TF-only test: tf.saved_model export test_model = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2") input_length = 2 max_new_tokens = 2 class DummyModel(tf.Module): def __init__(self, model): super(DummyModel, self).__init__() self.model = model @tf.function( input_signature=( tf.TensorSpec((None, input_length), tf.int32, name="input_ids"), tf.TensorSpec((None, input_length), tf.int32, name="attention_mask"), ), jit_compile=True, ) def serving(self, input_ids, attention_mask): outputs = self.model.generate( input_ids=input_ids, attention_mask=attention_mask, max_new_tokens=max_new_tokens, return_dict_in_generate=True, ) return {"sequences": outputs["sequences"]} dummy_input_ids = [[2, 0], [102, 103]] dummy_attention_masks = [[1, 0], [1, 1]] dummy_model = DummyModel(model=test_model) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(dummy_model, tmp_dir, signatures={"serving_default": dummy_model.serving}) serving_func = tf.saved_model.load(tmp_dir).signatures["serving_default"] for batch_size in range(1, len(dummy_input_ids) + 1): inputs = { "input_ids": tf.constant(dummy_input_ids[:batch_size]), "attention_mask": tf.constant(dummy_attention_masks[:batch_size]), } tf_func_outputs = serving_func(**inputs)["sequences"] tf_model_outputs = test_model.generate(**inputs, max_new_tokens=max_new_tokens) tf.debugging.assert_equal(tf_func_outputs, tf_model_outputs) @slow def test_generate_tf_function_export_fixed_batch_size(self): # TF-only test: tf.saved_model export test_model = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2") batch_size = 1 max_new_tokens = 2 class DummyModel(tf.Module): def __init__(self, model): super(DummyModel, self).__init__() self.model = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None), tf.int32, name="input_ids"), tf.TensorSpec((batch_size, None), tf.int32, name="attention_mask"), ), jit_compile=True, ) def serving(self, input_ids, attention_mask): outputs = self.model.generate( input_ids=input_ids, attention_mask=attention_mask, max_new_tokens=max_new_tokens, return_dict_in_generate=True, ) return {"sequences": outputs["sequences"]} dummy_input_ids = [[2], [102, 103]] dummy_attention_masks = [[1], [1, 1]] dummy_model = DummyModel(model=test_model) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(dummy_model, tmp_dir, signatures={"serving_default": dummy_model.serving}) serving_func = tf.saved_model.load(tmp_dir).signatures["serving_default"] for input_row in range(len(dummy_input_ids)): inputs = { "input_ids": tf.constant([dummy_input_ids[input_row]]), "attention_mask": tf.constant([dummy_attention_masks[input_row]]), } tf_func_outputs = serving_func(**inputs)["sequences"] tf_model_outputs = test_model.generate(**inputs, max_new_tokens=max_new_tokens) tf.debugging.assert_equal(tf_func_outputs, tf_model_outputs) @slow @require_tensorflow_text def test_generate_tf_function_export_with_tf_tokenizer(self): # TF-only test: tf.saved_model export with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id="google/flan-t5-small", filename="spiece.model", local_dir=tmp_dir) class CompleteSentenceTransformer(tf.keras.layers.Layer): def __init__(self): super().__init__() self.tokenizer = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(tmp_dir, "spiece.model"), "rb").read() ) self.model = TFAutoModelForSeq2SeqLM.from_pretrained("hf-internal-testing/tiny-random-t5") def call(self, inputs, *args, **kwargs): tokens = self.tokenizer.tokenize(inputs) input_ids, attention_mask = text.pad_model_inputs( tokens, max_seq_length=64, pad_value=self.model.config.pad_token_id ) outputs = self.model.generate(input_ids=input_ids, attention_mask=attention_mask) return self.tokenizer.detokenize(outputs) complete_model = CompleteSentenceTransformer() inputs = tf.keras.layers.Input(shape=(1,), dtype=tf.string, name="inputs") outputs = complete_model(inputs) keras_model = tf.keras.Model(inputs, outputs) keras_model.save(tmp_dir) def test_eos_token_id_int_and_list_top_k_top_sampling(self): # Has PT equivalent: this test relies on random sampling generation_kwargs = { "do_sample": True, "num_beams": 1, "top_p": 0.7, "top_k": 10, "temperature": 0.7, } expectation = 14 tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") text = """Hello, my dog is cute and""" tokens = tokenizer(text, return_tensors="tf") model = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2") eos_token_id = 638 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(":/CPU:0"): tf.random.set_seed(0) generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) self.assertTrue(expectation == len(generated_tokens[0])) eos_token_id = [638, 198] with tf.device(":/CPU:0"): tf.random.set_seed(0) generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) self.assertTrue(expectation == len(generated_tokens[0])) def test_model_kwarg_encoder_signature_filtering(self): # Has PT equivalent: ample use of framework-specific code bart_tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") article = """Hugging Face is a technology company based in New York and Paris.""" input_ids = bart_tokenizer(article, return_tensors="tf").input_ids bart_model = TFBartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart") output = bart_model.generate(input_ids).numpy() # Let's create a fake model that has a different signature. In particular, this fake model accepts "foo" as an # argument. Because "foo" is not in the encoder signature and doesn't start with "decoder_", it will be part of # the encoder kwargs prior to signature filtering, which would lead to an exception. But filtering kicks in and # saves the day. class FakeBart(TFBartForConditionalGeneration): def call(self, input_ids, foo=None, **kwargs): return super().call(input_ids, **kwargs) bart_model = FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart") fake_output = bart_model.generate(input_ids, foo="bar").numpy() self.assertTrue(np.array_equal(output, fake_output)) # Encoder signature filtering only kicks in if it doesn't accept wildcard kwargs. The following test will fail # because it doesn't do signature filtering. class FakeEncoder(bart_model.model.encoder.__class__): def call(self, input_ids, **kwargs): return super().call(input_ids, **kwargs) fake_encoder = FakeEncoder(bart_model.config, bart_model.model.shared) bart_model.model.encoder = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) fake_output = bart_model.generate(input_ids).numpy() with self.assertRaises(ValueError): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(input_ids, foo="bar")
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/generation/test_stopping_criteria.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class StoppingCriteriaTestCase(unittest.TestCase): def _get_tensors(self, length): batch_size = 3 vocab_size = 250 input_ids = ids_tensor((batch_size, length), vocab_size) scores = torch.ones((batch_size, length), device=torch_device, dtype=torch.float) / length return input_ids, scores def test_list_criteria(self): input_ids, scores = self._get_tensors(5) criteria = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10), MaxTimeCriteria(max_time=0.1), ] ) self.assertFalse(criteria(input_ids, scores)) input_ids, scores = self._get_tensors(9) self.assertFalse(criteria(input_ids, scores)) input_ids, scores = self._get_tensors(10) self.assertTrue(criteria(input_ids, scores)) def test_max_length_criteria(self): criteria = MaxLengthCriteria(max_length=10) input_ids, scores = self._get_tensors(5) self.assertFalse(criteria(input_ids, scores)) input_ids, scores = self._get_tensors(9) self.assertFalse(criteria(input_ids, scores)) input_ids, scores = self._get_tensors(10) self.assertTrue(criteria(input_ids, scores)) def test_max_new_tokens_criteria(self): criteria = MaxNewTokensCriteria(start_length=5, max_new_tokens=5) input_ids, scores = self._get_tensors(5) self.assertFalse(criteria(input_ids, scores)) input_ids, scores = self._get_tensors(9) self.assertFalse(criteria(input_ids, scores)) input_ids, scores = self._get_tensors(10) self.assertTrue(criteria(input_ids, scores)) criteria_list = StoppingCriteriaList([criteria]) self.assertEqual(criteria_list.max_length, 10) def test_max_time_criteria(self): input_ids, scores = self._get_tensors(5) criteria = MaxTimeCriteria(max_time=0.1) self.assertFalse(criteria(input_ids, scores)) criteria = MaxTimeCriteria(max_time=0.1, initial_timestamp=time.time() - 0.2) self.assertTrue(criteria(input_ids, scores)) def test_validate_stopping_criteria(self): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10)]), 10) with self.assertWarns(UserWarning): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10)]), 11) stopping_criteria = validate_stopping_criteria(StoppingCriteriaList(), 11) self.assertEqual(len(stopping_criteria), 1)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/generation/test_beam_search.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import floats_tensor, ids_tensor if is_torch_available(): import torch from transformers.generation import ( BeamHypotheses, BeamSearchScorer, ConstrainedBeamSearchScorer, DisjunctiveConstraint, PhrasalConstraint, ) class BeamSearchTester: def __init__( self, parent, batch_size=3, sequence_length=10, vocab_size=99, pad_token_id=0, max_length=20, num_beams=4, length_penalty=2.0, do_early_stopping=True, num_beam_hyps_to_keep=2, ): self.parent = parent self.batch_size = batch_size self.sequence_length = sequence_length self.vocab_size = vocab_size self.pad_token_id = pad_token_id self.max_length = max_length self.num_beams = num_beams self.length_penalty = length_penalty self.do_early_stopping = do_early_stopping self.num_beam_hyps_to_keep = num_beam_hyps_to_keep # cannot be randomly generated self.eos_token_id = vocab_size + 1 def prepare_beam_scorer(self, **kwargs): return BeamSearchScorer( batch_size=kwargs.get("batch_size", self.batch_size), num_beams=kwargs.get("num_beams", self.num_beams), device=torch_device, length_penalty=kwargs.get("length_penalty", self.length_penalty), do_early_stopping=kwargs.get("do_early_stopping", self.do_early_stopping), num_beam_hyps_to_keep=kwargs.get("num_beam_hyps_to_keep", self.num_beam_hyps_to_keep), ) def prepare_inputs(self): input_ids = ids_tensor((self.batch_size * self.num_beams, self.sequence_length), self.vocab_size) next_tokens = ids_tensor((self.batch_size, 2 * self.num_beams), self.vocab_size).to(torch_device) next_indices = ids_tensor((self.batch_size, 2 * self.num_beams), self.num_beams).to(torch_device) next_scores, _ = (-floats_tensor((self.batch_size, 2 * self.num_beams)).to(torch_device)).sort(descending=True) return (input_ids, next_tokens, next_indices, next_scores) def check_beam_hypotheses(self, input_ids, *args): # check that correct number of beam hypotheses is set in beam scorer beam_scorer = self.prepare_beam_scorer(do_early_stopping=True) beam_hyp = beam_scorer._beam_hyps[0] self.parent.assertEqual(len(beam_scorer._beam_hyps), self.batch_size) # check correct type self.parent.assertTrue(isinstance(beam_hyp, BeamHypotheses)) # check that num_beams is correctly set self.parent.assertEqual(beam_hyp.num_beams, self.num_beams) # check for early stopping deactivated for beam_idx in range(self.num_beams): beam_hyp.add(input_ids[beam_idx], -10.0) # if early stopping True -> score does not matter self.parent.assertTrue(beam_hyp.is_done(-10.0, 5)) # re-init beam_scorer = self.prepare_beam_scorer(do_early_stopping=False) beam_hyp = beam_scorer._beam_hyps[0] # add `num_beams + 1` beams to change `worst_score` for beam_idx in range(self.num_beams + 1): beam_hyp.add(input_ids[beam_idx], -10.0 + float(beam_idx)) # -10.0 is removed => -9.0 is worst score self.parent.assertAlmostEqual(beam_hyp.worst_score, -9.0 / (self.sequence_length**beam_hyp.length_penalty)) # -5.0 is better than worst score => should not be finished self.parent.assertFalse(beam_hyp.is_done(-5.0, self.sequence_length)) # -20.0 is worse than worst score => should be finished self.parent.assertTrue(beam_hyp.is_done(-20.0, self.sequence_length)) def check_beam_scorer_update(self, input_ids, next_tokens, next_indices, next_scores): # check too many eos tokens beam_scorer = self.prepare_beam_scorer() tokens = next_tokens.clone() tokens[0, :] = self.eos_token_id with self.parent.assertRaises(ValueError): beam_scorer.process(input_ids, next_scores, tokens, next_indices, eos_token_id=self.eos_token_id) # check all batches are done beam_scorer = self.prepare_beam_scorer() tokens = next_tokens.clone() tokens[:, : self.num_beams] = self.eos_token_id beam_indices = torch.zeros_like(input_ids) + torch.arange(input_ids.shape[-1], device=input_ids.device) beam_indices = tuple(tuple(b) for b in beam_indices) beam_scorer.process( input_ids, next_scores, tokens, next_indices, eos_token_id=self.eos_token_id, beam_indices=beam_indices ) # beam scorer should be done self.parent.assertTrue(beam_scorer.is_done) # check beam_scorer = self.prepare_beam_scorer() tokens = next_tokens.clone() tokens[:, 1] = self.eos_token_id beam_outputs = beam_scorer.process( input_ids, next_scores, tokens, next_indices, eos_token_id=self.eos_token_id, beam_indices=beam_indices ) output_scores = beam_outputs["next_beam_scores"] output_tokens = beam_outputs["next_beam_tokens"] output_indices = beam_outputs["next_beam_indices"] def cut_expected_tensor(tensor): return torch.cat([tensor[:, :1], tensor[:, 2 : self.num_beams + 1]], dim=1).flatten() # check all outptus # cut out id of eos token and take best `num_beams` outputs expected_output_tokens = cut_expected_tensor(tokens) expected_output_scores = cut_expected_tensor(next_scores) # add num_beams * batch_idx offset = torch.div( torch.arange(self.num_beams * self.batch_size, device=torch_device), self.num_beams, rounding_mode="floor" ) expected_output_indices = cut_expected_tensor(next_indices) + offset * self.num_beams self.parent.assertListEqual(expected_output_tokens.tolist(), output_tokens.tolist()) self.parent.assertListEqual(expected_output_indices.tolist(), output_indices.tolist()) self.parent.assertTrue(torch.allclose(expected_output_scores, output_scores, atol=1e-3)) # make sure ids of eos token are correctly saved in beam_hyps of beam scorer expected_beam_indices = list(range(10)) for batch_idx in range(self.batch_size): correct_idx = batch_idx * self.num_beams + next_indices[batch_idx, 1] self.parent.assertListEqual( input_ids[correct_idx].tolist(), beam_scorer._beam_hyps[batch_idx].beams[0][1].tolist() ) self.parent.assertListEqual( expected_beam_indices + [correct_idx], torch.tensor(beam_scorer._beam_hyps[batch_idx].beams[0][2]).tolist(), ) def check_beam_scores_finalize(self, input_ids, next_tokens, next_indices, next_scores): # max_length should be only one more than current input_ids to check that eos is correctly appended max_length = self.sequence_length + 1 beam_scorer = self.prepare_beam_scorer(num_beam_hyps_to_keep=1, length_penalty=1.0, do_early_stopping=False) # update beams and append to input_ids tokens = next_tokens.clone() # first batch, first output has to finish with eos token id since scores are correctly sorted tokens[0, 0] = self.eos_token_id # make sure corresponding score is as good as possible to surely be picked first next_scores[0, 0] = 0.0 beam_outputs = beam_scorer.process( input_ids, next_scores, tokens, next_indices, eos_token_id=self.eos_token_id ) output_scores = beam_outputs["next_beam_scores"] output_tokens = beam_outputs["next_beam_tokens"] output_indices = beam_outputs["next_beam_indices"] input_ids = torch.cat([input_ids[output_indices, :], output_tokens.unsqueeze(-1)], dim=-1) # finalize beam_indices = torch.zeros_like(input_ids) + torch.arange(input_ids.shape[-1], device=input_ids.device) beam_indices = tuple(tuple(b) for b in beam_indices) sequence_output = beam_scorer.finalize( input_ids, output_scores, output_tokens, output_indices, pad_token_id=self.pad_token_id, eos_token_id=self.eos_token_id, max_length=max_length, beam_indices=beam_indices, ) sequences = sequence_output["sequences"] sequence_scores = sequence_output["sequence_scores"] # since `num_beam_hyps_to_keep` = 1 => only return `batch_size` x `max_length` self.parent.assertListEqual(list(sequences.shape), [self.batch_size, max_length]) self.parent.assertListEqual(list(sequence_scores.shape), [self.batch_size]) # check sequence_scores self.parent.assertFalse((sequence_scores > 0).any().item()) # first batch has to finish with eos_token self.parent.assertEqual(sequences[0, -1].item(), self.eos_token_id) # other batches cannot finish with eos token self.parent.assertNotEqual(sequences[1, -1].item(), self.eos_token_id) self.parent.assertNotEqual(sequences[2, -1].item(), self.eos_token_id) # now test that if `num_beam_hyps_to_keep` is 3 => all beams are returned beam_scorer.num_beam_hyps_to_keep = self.num_beams sequence_output = beam_scorer.finalize( input_ids, output_scores, output_tokens, output_indices, pad_token_id=self.pad_token_id, eos_token_id=self.eos_token_id, max_length=max_length, beam_indices=beam_indices, ) sequences = sequence_output["sequences"] sequence_scores = sequence_output["sequence_scores"] self.parent.assertListEqual(list(sequences.shape), [self.num_beams * self.batch_size, max_length]) self.parent.assertListEqual(list(sequence_scores.shape), [self.num_beams * self.batch_size]) class ConstrainedBeamSearchTester: def __init__( self, parent, constraints=None, batch_size=3, sequence_length=10, vocab_size=99, pad_token_id=0, max_length=20, num_beams=4, length_penalty=2.0, do_early_stopping=True, num_beam_hyps_to_keep=2, ): self.parent = parent self.batch_size = batch_size self.sequence_length = sequence_length self.vocab_size = vocab_size self.pad_token_id = pad_token_id self.max_length = max_length self.num_beams = num_beams self.length_penalty = length_penalty self.do_early_stopping = do_early_stopping self.num_beam_hyps_to_keep = num_beam_hyps_to_keep if constraints is None: force_tokens = torch.randint(10, 50, (1, 2))[0].tolist() disjunctive_tokens = torch.randint(10, 50, (2, 2)).tolist() constraints = [PhrasalConstraint(force_tokens), DisjunctiveConstraint(disjunctive_tokens)] self.constraints = constraints # cannot be randomly generated self.eos_token_id = vocab_size + 1 def prepare_constrained_beam_scorer(self, **kwargs): return ConstrainedBeamSearchScorer( constraints=kwargs.get("constraints", self.constraints), batch_size=kwargs.get("batch_size", self.batch_size), num_beams=kwargs.get("num_beams", self.num_beams), device=torch_device, length_penalty=kwargs.get("length_penalty", self.length_penalty), do_early_stopping=kwargs.get("do_early_stopping", self.do_early_stopping), num_beam_hyps_to_keep=kwargs.get("num_beam_hyps_to_keep", self.num_beam_hyps_to_keep), ) def prepare_inputs(self): input_ids = ids_tensor((self.batch_size * self.num_beams, self.sequence_length), self.vocab_size) next_tokens = ids_tensor((self.batch_size, 2 * self.num_beams), self.vocab_size).to(torch_device) next_indices = ids_tensor((self.batch_size, 2 * self.num_beams), self.num_beams).to(torch_device) next_scores, _ = (-floats_tensor((self.batch_size, 2 * self.num_beams)).to(torch_device)).sort(descending=True) scores_for_all_vocab, _ = ( -floats_tensor((self.batch_size * self.num_beams, self.vocab_size)).to(torch_device) ).sort(descending=True) return (input_ids, next_tokens, next_indices, next_scores, scores_for_all_vocab) def check_beam_hypotheses(self, input_ids, *args): # check that correct number of beam hypotheses is set in beam scorer constrained_beam_scorer = self.prepare_constrained_beam_scorer(do_early_stopping=True) beam_hyp = constrained_beam_scorer._beam_hyps[0] self.parent.assertEqual(len(constrained_beam_scorer._beam_hyps), self.batch_size) # check correct type self.parent.assertTrue(isinstance(beam_hyp, BeamHypotheses)) # check that num_beams is correctly set self.parent.assertEqual(beam_hyp.num_beams, self.num_beams) # check for early stopping deactivated for beam_idx in range(self.num_beams): beam_hyp.add(input_ids[beam_idx], -10.0) # if early stopping True -> score does not matter self.parent.assertTrue(beam_hyp.is_done(-10.0, 5)) # re-init constrained_beam_scorer = self.prepare_constrained_beam_scorer(do_early_stopping=False) beam_hyp = constrained_beam_scorer._beam_hyps[0] # add `num_beams + 1` beams to change `worst_score` for beam_idx in range(self.num_beams + 1): beam_hyp.add(input_ids[beam_idx], -10.0 + float(beam_idx)) # -10.0 is removed => -9.0 is worst score self.parent.assertAlmostEqual(beam_hyp.worst_score, -9.0 / (self.sequence_length**beam_hyp.length_penalty)) # -5.0 is better than worst score => should not be finished self.parent.assertFalse(beam_hyp.is_done(-5.0, self.sequence_length)) # -20.0 is worse than worst score => should be finished self.parent.assertTrue(beam_hyp.is_done(-20.0, self.sequence_length)) def check_constrained_beam_scorer_update( self, input_ids, next_tokens, next_indices, next_scores, scores_for_all_vocab ): # check too many eos tokens constrained_beam_scorer = self.prepare_constrained_beam_scorer() stacked_token_ids = [] for constraint in self.constraints: token_ids = constraint.token_ids token_ids = token_ids[0] if isinstance(token_ids[0], list) else token_ids stacked_token_ids = stacked_token_ids + token_ids fulfilling_sequence = torch.LongTensor(stacked_token_ids) fulfill_len = fulfilling_sequence.size(0) input_ids[:, :fulfill_len] = fulfilling_sequence tokens = next_tokens.clone() tokens[0, :] = self.eos_token_id with self.parent.assertRaises(ValueError): constrained_beam_scorer.process( input_ids, next_scores, tokens, next_indices, scores_for_all_vocab, eos_token_id=self.eos_token_id ) # check all batches are done constrained_beam_scorer = self.prepare_constrained_beam_scorer() tokens = next_tokens.clone() tokens[:, : self.num_beams] = self.eos_token_id constrained_beam_scorer.process( input_ids, next_scores, tokens, next_indices, scores_for_all_vocab, eos_token_id=self.eos_token_id ) # beam scorer should be done self.parent.assertTrue(constrained_beam_scorer.is_done) # check constrained_beam_scorer = self.prepare_constrained_beam_scorer() tokens = next_tokens.clone() tokens[:, 1] = self.eos_token_id beam_outputs = constrained_beam_scorer.process( input_ids, next_scores, tokens, next_indices, scores_for_all_vocab, eos_token_id=self.eos_token_id ) output_scores = beam_outputs["next_beam_scores"] output_tokens = beam_outputs["next_beam_tokens"] output_indices = beam_outputs["next_beam_indices"] def cut_expected_tensor(tensor): return torch.cat([tensor[:, :1], tensor[:, 2 : self.num_beams + 1]], dim=1).flatten() # check all outptus # cut out id of eos token and take best `num_beams` outputs expected_output_tokens = cut_expected_tensor(tokens) expected_output_scores = cut_expected_tensor(next_scores) # add num_beams * batch_idx offset = torch.div( torch.arange(self.num_beams * self.batch_size, device=torch_device), self.num_beams, rounding_mode="floor" ) expected_output_indices = cut_expected_tensor(next_indices) + offset * self.num_beams self.parent.assertListEqual(expected_output_tokens.tolist(), output_tokens.tolist()) self.parent.assertListEqual(expected_output_indices.tolist(), output_indices.tolist()) self.parent.assertTrue(torch.allclose(expected_output_scores, output_scores, atol=1e-3)) # make sure ids of eos token are correctly saved in beam_hyps of beam scorer for batch_idx in range(self.batch_size): correct_idx = batch_idx * self.num_beams + next_indices[batch_idx, 1] self.parent.assertListEqual( input_ids[correct_idx].tolist(), constrained_beam_scorer._beam_hyps[batch_idx].beams[0][1].tolist() ) def check_constrained_beam_scorer_finalize( self, input_ids, next_tokens, next_indices, next_scores, scores_for_all_vocab ): # max_length should be only one more than current input_ids to check that eos is correctly appended max_length = self.sequence_length + 1 # for testing finalize, we do want to have fulfilled constraints stacked_token_ids = [] for constraint in self.constraints: token_ids = constraint.token_ids token_ids = token_ids[0] if isinstance(token_ids[0], list) else token_ids stacked_token_ids = stacked_token_ids + token_ids fulfilling_sequence = torch.LongTensor(stacked_token_ids) fulfill_len = fulfilling_sequence.size(0) input_ids[:, :fulfill_len] = fulfilling_sequence constrained_beam_scorer = self.prepare_constrained_beam_scorer( num_beam_hyps_to_keep=1, length_penalty=1.0, do_early_stopping=False ) constraints = constrained_beam_scorer.constraints # update beams and append to input_ids tokens = next_tokens.clone() # first batch, first output has to finish with eos token id since scores are correctly sorted tokens[0, 0] = self.eos_token_id # make sure corresponding score is as good as possible to surely be picked first next_scores[0, 0] = 0.0 beam_outputs = constrained_beam_scorer.process( input_ids, next_scores, tokens, next_indices, scores_for_all_vocab, eos_token_id=self.eos_token_id ) output_scores = beam_outputs["next_beam_scores"] output_tokens = beam_outputs["next_beam_tokens"] output_indices = beam_outputs["next_beam_indices"] input_ids = torch.cat([input_ids[output_indices, :], output_tokens.unsqueeze(-1)], dim=-1) # finalize sequence_output = constrained_beam_scorer.finalize( input_ids, output_scores, output_tokens, output_indices, pad_token_id=self.pad_token_id, eos_token_id=self.eos_token_id, max_length=max_length, ) sequences = sequence_output["sequences"] sequence_scores = sequence_output["sequence_scores"] # since `num_beam_hyps_to_keep` = 1 => only return `batch_size` x `max_length` self.parent.assertListEqual(list(sequences.shape), [self.batch_size, max_length]) self.parent.assertListEqual(list(sequence_scores.shape), [self.batch_size]) # check sequence_scores self.parent.assertFalse((sequence_scores > 0).any().item()) # first batch has to finish with eos_token self.parent.assertEqual(sequences[0, -1].item(), self.eos_token_id) # other batches cannot finish with eos token self.parent.assertNotEqual(sequences[1, -1].item(), self.eos_token_id) self.parent.assertNotEqual(sequences[2, -1].item(), self.eos_token_id) # test that the constraint is indeed fulfilled for output, constraint in [(s, c) for s in sequences for c in constraints]: forced_token_ids = constraint.token_ids if isinstance(forced_token_ids[0], list): # disjunctive case flag = False for token_ids in forced_token_ids: if self._check_sequence_inside_sequence(output, token_ids): flag = True break self.parent.assertEqual(flag, True) else: self.parent.assertEqual(self._check_sequence_inside_sequence(output, forced_token_ids), True) # now test that if `num_beam_hyps_to_keep` is 3 => all beams are returned # constrained_beam_scorer.num_beam_hyps_to_keep = self.num_beams constrained_beam_scorer = self.prepare_constrained_beam_scorer( num_beam_hyps_to_keep=self.num_beams, length_penalty=1.0, do_early_stopping=False ) sequence_output = constrained_beam_scorer.finalize( input_ids, output_scores, output_tokens, output_indices, pad_token_id=self.pad_token_id, eos_token_id=self.eos_token_id, max_length=max_length, ) sequences = sequence_output["sequences"] sequence_scores = sequence_output["sequence_scores"] self.parent.assertListEqual(list(sequences.shape), [self.num_beams * self.batch_size, max_length]) self.parent.assertListEqual(list(sequence_scores.shape), [self.num_beams * self.batch_size]) def _check_sequence_inside_sequence(self, tensor_1, tensor_2): # check if tensor_1 inside tensor_2 or tensor_2 inside tensor_1. # set to same device. we don't care what device. if not isinstance(tensor_1, list): tensor_1 = tensor_1.cpu().tolist() if not isinstance(tensor_2, list): tensor_2 = tensor_2.cpu().tolist() in_order = len(tensor_1) <= len(tensor_2) longer = tensor_2 if in_order else tensor_1 shorter = tensor_1 if in_order else tensor_2 flag = False chunk_size = len(shorter) for chunk_idx in range(len(longer) - chunk_size + 1): subseq = longer[chunk_idx : chunk_idx + chunk_size] if subseq == shorter: flag = True break return flag @require_torch class BeamSearchTest(unittest.TestCase): def setUp(self): self.beam_search_tester = BeamSearchTester(self) def test_beam_hypotheses(self): inputs = self.beam_search_tester.prepare_inputs() self.beam_search_tester.check_beam_hypotheses(*inputs) def test_beam_scorer_update(self): inputs = self.beam_search_tester.prepare_inputs() self.beam_search_tester.check_beam_scorer_update(*inputs) def test_beam_scorer_finalize(self): inputs = self.beam_search_tester.prepare_inputs() self.beam_search_tester.check_beam_scores_finalize(*inputs) @require_torch class ConstrainedBeamSearchTest(unittest.TestCase): def setUp(self): self.constrained_beam_search_tester = ConstrainedBeamSearchTester(self) def test_constrained_beam_hypotheses(self): inputs = self.constrained_beam_search_tester.prepare_inputs() self.constrained_beam_search_tester.check_beam_hypotheses(*inputs) def test_constrained_beam_scorer_update(self): inputs = self.constrained_beam_search_tester.prepare_inputs() self.constrained_beam_search_tester.check_constrained_beam_scorer_update(*inputs) def test_constrained_beam_scorer_finalize(self): inputs = self.constrained_beam_search_tester.prepare_inputs() self.constrained_beam_search_tester.check_constrained_beam_scorer_finalize(*inputs)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/peft_integration/test_peft_integration.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import unittest from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, OPTForCausalLM from transformers.testing_utils import require_peft, require_torch, require_torch_gpu, slow, torch_device from transformers.utils import is_torch_available if is_torch_available(): import torch @require_peft @require_torch class PeftTesterMixin: peft_test_model_ids = ("peft-internal-testing/tiny-OPTForCausalLM-lora",) transformers_test_model_ids = ("hf-internal-testing/tiny-random-OPTForCausalLM",) transformers_test_model_classes = (AutoModelForCausalLM, OPTForCausalLM) # TODO: run it with CI after PEFT release. @slow class PeftIntegrationTester(unittest.TestCase, PeftTesterMixin): """ A testing suite that makes sure that the PeftModel class is correctly integrated into the transformers library. """ def _check_lora_correctly_converted(self, model): """ Utility method to check if the model has correctly adapters injected on it. """ from peft.tuners.tuners_utils import BaseTunerLayer is_peft_loaded = False for _, m in model.named_modules(): if isinstance(m, BaseTunerLayer): is_peft_loaded = True break return is_peft_loaded def test_peft_from_pretrained(self): """ Simple test that tests the basic usage of PEFT model through `from_pretrained`. This checks if we pass a remote folder that contains an adapter config and adapter weights, it should correctly load a model that has adapters injected on it. """ for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id).to(torch_device) self.assertTrue(self._check_lora_correctly_converted(peft_model)) self.assertTrue(peft_model._hf_peft_config_loaded) # dummy generation _ = peft_model.generate(input_ids=torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device)) def test_peft_state_dict(self): """ Simple test that checks if the returned state dict of `get_adapter_state_dict()` method contains the expected keys. """ for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id).to(torch_device) state_dict = peft_model.get_adapter_state_dict() for key in state_dict.keys(): self.assertTrue("lora" in key) def test_peft_save_pretrained(self): """ Test that checks various combinations of `save_pretrained` with a model that has adapters loaded on it. This checks if the saved model contains the expected files (adapter weights and adapter config). """ for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id).to(torch_device) with tempfile.TemporaryDirectory() as tmpdirname: peft_model.save_pretrained(tmpdirname) self.assertTrue("adapter_model.safetensors" in os.listdir(tmpdirname)) self.assertTrue("adapter_config.json" in os.listdir(tmpdirname)) self.assertTrue("config.json" not in os.listdir(tmpdirname)) self.assertTrue("pytorch_model.bin" not in os.listdir(tmpdirname)) self.assertTrue("model.safetensors" not in os.listdir(tmpdirname)) peft_model = transformers_class.from_pretrained(tmpdirname).to(torch_device) self.assertTrue(self._check_lora_correctly_converted(peft_model)) peft_model.save_pretrained(tmpdirname, safe_serialization=False) self.assertTrue("adapter_model.bin" in os.listdir(tmpdirname)) self.assertTrue("adapter_config.json" in os.listdir(tmpdirname)) peft_model = transformers_class.from_pretrained(tmpdirname).to(torch_device) self.assertTrue(self._check_lora_correctly_converted(peft_model)) def test_peft_enable_disable_adapters(self): """ A test that checks if `enable_adapters` and `disable_adapters` methods work as expected. """ from peft import LoraConfig dummy_input = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device) for model_id in self.transformers_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id).to(torch_device) peft_config = LoraConfig(init_lora_weights=False) peft_model.add_adapter(peft_config) peft_logits = peft_model(dummy_input).logits peft_model.disable_adapters() peft_logits_disabled = peft_model(dummy_input).logits peft_model.enable_adapters() peft_logits_enabled = peft_model(dummy_input).logits self.assertTrue(torch.allclose(peft_logits, peft_logits_enabled, atol=1e-12, rtol=1e-12)) self.assertFalse(torch.allclose(peft_logits_enabled, peft_logits_disabled, atol=1e-12, rtol=1e-12)) def test_peft_add_adapter(self): """ Simple test that tests if `add_adapter` works as expected """ from peft import LoraConfig for model_id in self.transformers_test_model_ids: for transformers_class in self.transformers_test_model_classes: model = transformers_class.from_pretrained(model_id).to(torch_device) peft_config = LoraConfig(init_lora_weights=False) model.add_adapter(peft_config) self.assertTrue(self._check_lora_correctly_converted(model)) # dummy generation _ = model.generate(input_ids=torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device)) def test_peft_add_adapter_from_pretrained(self): """ Simple test that tests if `add_adapter` works as expected """ from peft import LoraConfig for model_id in self.transformers_test_model_ids: for transformers_class in self.transformers_test_model_classes: model = transformers_class.from_pretrained(model_id).to(torch_device) peft_config = LoraConfig(init_lora_weights=False) model.add_adapter(peft_config) self.assertTrue(self._check_lora_correctly_converted(model)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_from_pretrained = transformers_class.from_pretrained(tmpdirname).to(torch_device) self.assertTrue(self._check_lora_correctly_converted(model_from_pretrained)) def test_peft_add_adapter_modules_to_save(self): """ Simple test that tests if `add_adapter` works as expected when training with modules to save. """ from peft import LoraConfig from peft.utils import ModulesToSaveWrapper for model_id in self.transformers_test_model_ids: for transformers_class in self.transformers_test_model_classes: dummy_input = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device) model = transformers_class.from_pretrained(model_id).to(torch_device) peft_config = LoraConfig(init_lora_weights=False, modules_to_save=["lm_head"]) model.add_adapter(peft_config) self._check_lora_correctly_converted(model) _has_modules_to_save_wrapper = False for name, module in model.named_modules(): if isinstance(module, ModulesToSaveWrapper): _has_modules_to_save_wrapper = True self.assertTrue(module.modules_to_save.default.weight.requires_grad) self.assertTrue("lm_head" in name) break self.assertTrue(_has_modules_to_save_wrapper) state_dict = model.get_adapter_state_dict() self.assertTrue("lm_head.weight" in state_dict.keys()) logits = model(dummy_input).logits loss = logits.mean() loss.backward() for _, param in model.named_parameters(): if param.requires_grad: self.assertTrue(param.grad is not None) def test_peft_add_adapter_training_gradient_checkpointing(self): """ Simple test that tests if `add_adapter` works as expected when training with gradient checkpointing. """ from peft import LoraConfig for model_id in self.transformers_test_model_ids: for transformers_class in self.transformers_test_model_classes: model = transformers_class.from_pretrained(model_id).to(torch_device) peft_config = LoraConfig(init_lora_weights=False) model.add_adapter(peft_config) self.assertTrue(self._check_lora_correctly_converted(model)) # When attaching adapters the input embeddings will stay frozen, this will # lead to the output embedding having requires_grad=False. dummy_input = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device) frozen_output = model.get_input_embeddings()(dummy_input) self.assertTrue(frozen_output.requires_grad is False) model.gradient_checkpointing_enable() # Since here we attached the hook, the input should have requires_grad to set # properly non_frozen_output = model.get_input_embeddings()(dummy_input) self.assertTrue(non_frozen_output.requires_grad is True) # To repro the Trainer issue dummy_input.requires_grad = False for name, param in model.named_parameters(): if "lora" in name.lower(): self.assertTrue(param.requires_grad) logits = model(dummy_input).logits loss = logits.mean() loss.backward() for name, param in model.named_parameters(): if param.requires_grad: self.assertTrue("lora" in name.lower()) self.assertTrue(param.grad is not None) def test_peft_add_multi_adapter(self): """ Simple test that tests the basic usage of PEFT model through `from_pretrained`. This test tests if add_adapter works as expected in multi-adapter setting. """ from peft import LoraConfig from peft.tuners.tuners_utils import BaseTunerLayer dummy_input = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device) for model_id in self.transformers_test_model_ids: for transformers_class in self.transformers_test_model_classes: is_peft_loaded = False model = transformers_class.from_pretrained(model_id).to(torch_device) logits_original_model = model(dummy_input).logits peft_config = LoraConfig(init_lora_weights=False) model.add_adapter(peft_config) logits_adapter_1 = model(dummy_input) model.add_adapter(peft_config, adapter_name="adapter-2") logits_adapter_2 = model(dummy_input) for _, m in model.named_modules(): if isinstance(m, BaseTunerLayer): is_peft_loaded = True break self.assertTrue(is_peft_loaded) # dummy generation _ = model.generate(input_ids=dummy_input) model.set_adapter("default") self.assertTrue(model.active_adapters() == ["default"]) self.assertTrue(model.active_adapter() == "default") model.set_adapter("adapter-2") self.assertTrue(model.active_adapters() == ["adapter-2"]) self.assertTrue(model.active_adapter() == "adapter-2") # Logits comparison self.assertFalse( torch.allclose(logits_adapter_1.logits, logits_adapter_2.logits, atol=1e-6, rtol=1e-6) ) self.assertFalse(torch.allclose(logits_original_model, logits_adapter_2.logits, atol=1e-6, rtol=1e-6)) model.set_adapter(["adapter-2", "default"]) self.assertTrue(model.active_adapters() == ["adapter-2", "default"]) self.assertTrue(model.active_adapter() == "adapter-2") logits_adapter_mixed = model(dummy_input) self.assertFalse( torch.allclose(logits_adapter_1.logits, logits_adapter_mixed.logits, atol=1e-6, rtol=1e-6) ) self.assertFalse( torch.allclose(logits_adapter_2.logits, logits_adapter_mixed.logits, atol=1e-6, rtol=1e-6) ) # multi active adapter saving not supported with self.assertRaises(ValueError), tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) @require_torch_gpu def test_peft_from_pretrained_kwargs(self): """ Simple test that tests the basic usage of PEFT model through `from_pretrained` + additional kwargs and see if the integraiton behaves as expected. """ for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id, load_in_8bit=True, device_map="auto") module = peft_model.model.decoder.layers[0].self_attn.v_proj self.assertTrue(module.__class__.__name__ == "Linear8bitLt") self.assertTrue(peft_model.hf_device_map is not None) # dummy generation _ = peft_model.generate(input_ids=torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device)) @require_torch_gpu def test_peft_save_quantized(self): """ Simple test that tests the basic usage of PEFT model save_pretrained with quantized base models """ # 4bit for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id, load_in_4bit=True, device_map="auto") module = peft_model.model.decoder.layers[0].self_attn.v_proj self.assertTrue(module.__class__.__name__ == "Linear4bit") self.assertTrue(peft_model.hf_device_map is not None) with tempfile.TemporaryDirectory() as tmpdirname: peft_model.save_pretrained(tmpdirname) self.assertTrue("adapter_model.safetensors" in os.listdir(tmpdirname)) self.assertTrue("adapter_config.json" in os.listdir(tmpdirname)) self.assertTrue("pytorch_model.bin" not in os.listdir(tmpdirname)) self.assertTrue("model.safetensors" not in os.listdir(tmpdirname)) # 8-bit for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id, load_in_8bit=True, device_map="auto") module = peft_model.model.decoder.layers[0].self_attn.v_proj self.assertTrue(module.__class__.__name__ == "Linear8bitLt") self.assertTrue(peft_model.hf_device_map is not None) with tempfile.TemporaryDirectory() as tmpdirname: peft_model.save_pretrained(tmpdirname) self.assertTrue("adapter_model.safetensors" in os.listdir(tmpdirname)) self.assertTrue("adapter_config.json" in os.listdir(tmpdirname)) self.assertTrue("pytorch_model.bin" not in os.listdir(tmpdirname)) self.assertTrue("model.safetensors" not in os.listdir(tmpdirname)) @require_torch_gpu def test_peft_save_quantized_regression(self): """ Simple test that tests the basic usage of PEFT model save_pretrained with quantized base models Regression test to make sure everything works as expected before the safetensors integration. """ # 4bit for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id, load_in_4bit=True, device_map="auto") module = peft_model.model.decoder.layers[0].self_attn.v_proj self.assertTrue(module.__class__.__name__ == "Linear4bit") self.assertTrue(peft_model.hf_device_map is not None) with tempfile.TemporaryDirectory() as tmpdirname: peft_model.save_pretrained(tmpdirname, safe_serialization=False) self.assertTrue("adapter_model.bin" in os.listdir(tmpdirname)) self.assertTrue("adapter_config.json" in os.listdir(tmpdirname)) self.assertTrue("pytorch_model.bin" not in os.listdir(tmpdirname)) self.assertTrue("model.safetensors" not in os.listdir(tmpdirname)) # 8-bit for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id, load_in_8bit=True, device_map="auto") module = peft_model.model.decoder.layers[0].self_attn.v_proj self.assertTrue(module.__class__.__name__ == "Linear8bitLt") self.assertTrue(peft_model.hf_device_map is not None) with tempfile.TemporaryDirectory() as tmpdirname: peft_model.save_pretrained(tmpdirname, safe_serialization=False) self.assertTrue("adapter_model.bin" in os.listdir(tmpdirname)) self.assertTrue("adapter_config.json" in os.listdir(tmpdirname)) self.assertTrue("pytorch_model.bin" not in os.listdir(tmpdirname)) self.assertTrue("model.safetensors" not in os.listdir(tmpdirname)) def test_peft_pipeline(self): """ Simple test that tests the basic usage of PEFT model + pipeline """ from transformers import pipeline for model_id in self.peft_test_model_ids: pipe = pipeline("text-generation", model_id) _ = pipe("Hello") def test_peft_add_adapter_with_state_dict(self): """ Simple test that tests the basic usage of PEFT model through `from_pretrained`. This test tests if add_adapter works as expected with a state_dict being passed. """ from peft import LoraConfig dummy_input = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device) for model_id, peft_model_id in zip(self.transformers_test_model_ids, self.peft_test_model_ids): for transformers_class in self.transformers_test_model_classes: model = transformers_class.from_pretrained(model_id).to(torch_device) peft_config = LoraConfig(init_lora_weights=False) with self.assertRaises(ValueError): model.load_adapter(peft_model_id=None) state_dict_path = hf_hub_download(peft_model_id, "adapter_model.bin") dummy_state_dict = torch.load(state_dict_path) model.load_adapter(adapter_state_dict=dummy_state_dict, peft_config=peft_config) with self.assertRaises(ValueError): model.load_adapter(model.load_adapter(adapter_state_dict=dummy_state_dict, peft_config=None)) self.assertTrue(self._check_lora_correctly_converted(model)) # dummy generation _ = model.generate(input_ids=dummy_input) def test_peft_from_pretrained_hub_kwargs(self): """ Tests different combinations of PEFT model + from_pretrained + hub kwargs """ peft_model_id = "peft-internal-testing/tiny-opt-lora-revision" # This should not work with self.assertRaises(OSError): _ = AutoModelForCausalLM.from_pretrained(peft_model_id) adapter_kwargs = {"revision": "test"} # This should work model = AutoModelForCausalLM.from_pretrained(peft_model_id, adapter_kwargs=adapter_kwargs) self.assertTrue(self._check_lora_correctly_converted(model)) model = OPTForCausalLM.from_pretrained(peft_model_id, adapter_kwargs=adapter_kwargs) self.assertTrue(self._check_lora_correctly_converted(model)) adapter_kwargs = {"revision": "main", "subfolder": "test_subfolder"} model = AutoModelForCausalLM.from_pretrained(peft_model_id, adapter_kwargs=adapter_kwargs) self.assertTrue(self._check_lora_correctly_converted(model)) model = OPTForCausalLM.from_pretrained(peft_model_id, adapter_kwargs=adapter_kwargs) self.assertTrue(self._check_lora_correctly_converted(model))
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/fixtures/preprocessor_config.json
{ "feature_extractor_type": "Wav2Vec2FeatureExtractor", "processor_class": "Wav2Vec2Processor" }
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/fixtures/merges.txt
#version: 0.2 Ġ l Ġl o Ġlo w e r
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/fixtures/sample_text_no_unicode.txt
Text should be one-sentence-per-line, with empty lines between documents. This sample text is public domain and was randomly selected from Project Guttenberg. The rain had only ceased with the gray streaks of morning at Blazing Star, and the settlement awoke to a moral sense of cleanliness, and the finding of forgotten knives, tin cups, and smaller camp utensils, where the heavy showers had washed away the debris and dust heaps before the cabin doors. Indeed, it was recorded in Blazing Star that a fortunate early riser had once picked up on the highway a solid chunk of gold quartz which the rain had freed from its incumbering soil, and washed into immediate and glittering popularity. Possibly this may have been the reason why early risers in that locality, during the rainy season, adopted a thoughtful habit of body, and seldom lifted their eyes to the rifted or india-ink washed skies above them. "Cass" Beard had risen early that morning, but not with a view to discovery. A leak in his cabin roof,--quite consistent with his careless, improvident habits,--had roused him at 4 A. M., with a flooded "bunk" and wet blankets. The chips from his wood pile refused to kindle a fire to dry his bed-clothes, and he had recourse to a more provident neighbor's to supply the deficiency. This was nearly opposite. Mr. Cassius crossed the highway, and stopped suddenly. Something glittered in the nearest red pool before him. Gold, surely! But, wonderful to relate, not an irregular, shapeless fragment of crude ore, fresh from Nature's crucible, but a bit of jeweler's handicraft in the form of a plain gold ring. Looking at it more attentively, he saw that it bore the inscription, "May to Cass." Like most of his fellow gold-seekers, Cass was superstitious. The fountain of classic wisdom, Hypatia herself. As the ancient sage--the name is unimportant to a monk--pumped water nightly that he might study by day, so I, the guardian of cloaks and parasols, at the sacred doors of her lecture-room, imbibe celestial knowledge. From my youth I felt in me a soul above the matter-entangled herd. She revealed to me the glorious fact, that I am a spark of Divinity itself. A fallen star, I am, sir!' continued he, pensively, stroking his lean stomach--'a fallen star!--fallen, if the dignity of philosophy will allow of the simile, among the hogs of the lower world--indeed, even into the hog-bucket itself. Well, after all, I will show you the way to the Archbishop's. There is a philosophic pleasure in opening one's treasures to the modest young. Perhaps you will assist me by carrying this basket of fruit?' And the little man jumped up, put his basket on Philammon's head, and trotted off up a neighbouring street. Philammon followed, half contemptuous, half wondering at what this philosophy might be, which could feed the self-conceit of anything so abject as his ragged little apish guide; but the novel roar and whirl of the street, the perpetual stream of busy faces, the line of curricles, palanquins, laden asses, camels, elephants, which met and passed him, and squeezed him up steps and into doorways, as they threaded their way through the great Moon-gate into the ample street beyond, drove everything from his mind but wondering curiosity, and a vague, helpless dread of that great living wilderness, more terrible than any dead wilderness of sand which he had left behind. Already he longed for the repose, the silence of the Laura--for faces which knew him and smiled upon him; but it was too late to turn back now. His guide held on for more than a mile up the great main street, crossed in the centre of the city, at right angles, by one equally magnificent, at each end of which, miles away, appeared, dim and distant over the heads of the living stream of passengers, the yellow sand-hills of the desert; while at the end of the vista in front of them gleamed the blue harbour, through a network of countless masts. At last they reached the quay at the opposite end of the street; and there burst on Philammon's astonished eyes a vast semicircle of blue sea, ringed with palaces and towers. He stopped involuntarily; and his little guide stopped also, and looked askance at the young monk, to watch the effect which that grand panorama should produce on him.
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/fixtures/dummy_feature_extractor_config.json
{ "feature_extractor_type": "Wav2Vec2FeatureExtractor", "processor_class": "Wav2Vec2Processor" }
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/fixtures/vocab.json
{"l": 0, "o": 1, "w": 2, "e": 3, "r": 4, "s": 5, "t": 6, "i": 7, "d": 8, "n": 9, "Ġ": 10, "Ġl": 11, "Ġn": 12, "Ġlo": 13, "Ġlow": 14, "er": 15, "Ġlowest": 16, "Ġnewer": 17, "Ġwider": 18, "<unk>": 19, "<|endoftext|>": 20}
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/fixtures/vocab.txt
[PAD] [SEP] [MASK] [CLS] [unused3] [unused4] [unused5] [unused6] [unused7] [unused8]
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/fixtures/add_distilbert_like_config.json
{ "add_copied_from": true, "old_model_type": "distilbert", "new_model_patterns": { "model_name": "BERT New", "checkpoint": "huggingface/bert-new-base", "model_type": "bert-new", "model_lower_cased": "bert_new", "model_camel_cased": "BertNew", "model_upper_cased": "BERT_NEW", "config_class": "BertNewConfig", "tokenizer_class": "DistilBertTokenizer" }, "frameworks": [ "pt", "tf", "flax" ] }
0