repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
robust-transformers | robust-transformers-main/tests/pipelines/test_pipelines_text_generation.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, pipeline
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from .test_pipelines_common import ANY, PipelineTestCaseMeta
@is_pipeline_test
class TextGenerationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta):
model_mapping = MODEL_FOR_CAUSAL_LM_MAPPING
tf_model_mapping = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def test_small_model_pt(self):
text_generator = pipeline(task="text-generation", model="sshleifer/tiny-ctrl", framework="pt")
# Using `do_sample=False` to force deterministic output
outputs = text_generator("This is a test", do_sample=False)
self.assertEqual(
outputs,
[
{
"generated_text": "This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope. oscope. FiliFili@@"
}
],
)
outputs = text_generator(["This is a test", "This is a second test"])
self.assertEqual(
outputs,
[
[
{
"generated_text": "This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope. oscope. FiliFili@@"
}
],
[
{
"generated_text": "This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope. oscope. FiliFili@@"
}
],
],
)
@require_tf
def test_small_model_tf(self):
text_generator = pipeline(task="text-generation", model="sshleifer/tiny-ctrl", framework="tf")
# Using `do_sample=False` to force deterministic output
outputs = text_generator("This is a test", do_sample=False)
self.assertEqual(
outputs,
[
{
"generated_text": "This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵 please,"
}
],
)
outputs = text_generator(["This is a test", "This is a second test"], do_sample=False)
self.assertEqual(
outputs,
[
[
{
"generated_text": "This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵 please,"
}
],
[
{
"generated_text": "This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵 please,"
}
],
],
)
def get_test_pipeline(self, model, tokenizer, feature_extractor):
text_generator = TextGenerationPipeline(model=model, tokenizer=tokenizer)
return text_generator, ["This is a test", "Another test"]
def run_pipeline_test(self, text_generator, _):
model = text_generator.model
tokenizer = text_generator.tokenizer
outputs = text_generator("This is a test")
self.assertEqual(outputs, [{"generated_text": ANY(str)}])
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test"))
outputs = text_generator("This is a test", return_full_text=False)
self.assertEqual(outputs, [{"generated_text": ANY(str)}])
self.assertNotIn("This is a test", outputs[0]["generated_text"])
text_generator = pipeline(task="text-generation", model=model, tokenizer=tokenizer, return_full_text=False)
outputs = text_generator("This is a test")
self.assertEqual(outputs, [{"generated_text": ANY(str)}])
self.assertNotIn("This is a test", outputs[0]["generated_text"])
outputs = text_generator("This is a test", return_full_text=True)
self.assertEqual(outputs, [{"generated_text": ANY(str)}])
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test"))
outputs = text_generator(["This is great !", "Something else"], num_return_sequences=2, do_sample=True)
self.assertEqual(
outputs,
[
[{"generated_text": ANY(str)}, {"generated_text": ANY(str)}],
[{"generated_text": ANY(str)}, {"generated_text": ANY(str)}],
],
)
if text_generator.tokenizer.pad_token is not None:
outputs = text_generator(
["This is great !", "Something else"], num_return_sequences=2, batch_size=2, do_sample=True
)
self.assertEqual(
outputs,
[
[{"generated_text": ANY(str)}, {"generated_text": ANY(str)}],
[{"generated_text": ANY(str)}, {"generated_text": ANY(str)}],
],
)
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__:
outputs = text_generator("")
self.assertEqual(outputs, [{"generated_text": ANY(str)}])
else:
with self.assertRaises((ValueError, AssertionError)):
outputs = text_generator("")
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
if tokenizer.model_max_length < 10000 and "XGLM" not in tokenizer.__class__.__name__:
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError)):
text_generator("This is a test" * 500, max_new_tokens=20)
outputs = text_generator("This is a test" * 500, handle_long_generation="hole", max_new_tokens=20)
# Hole strategy cannot work
with self.assertRaises(ValueError):
text_generator(
"This is a test" * 500,
handle_long_generation="hole",
max_new_tokens=tokenizer.model_max_length + 10,
)
| 7,361 | 42.56213 | 172 | py |
robust-transformers | robust-transformers-main/tests/pipelines/test_pipelines_table_question_answering.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import (
MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING,
AutoModelForTableQuestionAnswering,
AutoTokenizer,
TableQuestionAnsweringPipeline,
TFAutoModelForTableQuestionAnswering,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
require_pandas,
require_tensorflow_probability,
require_tf,
require_torch,
require_torch_scatter,
slow,
)
from .test_pipelines_common import PipelineTestCaseMeta
@is_pipeline_test
class TQAPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta):
# Putting it there for consistency, but TQA do not have fast tokenizer
# which are needed to generate automatic tests
model_mapping = MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING
@require_tensorflow_probability
@require_pandas
@require_tf
@require_torch
def test_small_model_tf(self):
model_id = "lysandre/tiny-tapas-random-wtq"
model = TFAutoModelForTableQuestionAnswering.from_pretrained(model_id, from_pt=True)
tokenizer = AutoTokenizer.from_pretrained(model_id)
self.assertIsInstance(model.config.aggregation_labels, dict)
self.assertIsInstance(model.config.no_aggregation_label_index, int)
table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer)
outputs = table_querier(
table={
"actors": ["brad pitt", "leonardo di caprio", "george clooney"],
"age": ["56", "45", "59"],
"number of movies": ["87", "53", "69"],
"date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
},
query="how many movies has george clooney played in?",
)
self.assertEqual(
outputs,
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
)
outputs = table_querier(
table={
"actors": ["brad pitt", "leonardo di caprio", "george clooney"],
"age": ["56", "45", "59"],
"number of movies": ["87", "53", "69"],
"date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
},
query=["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"],
)
self.assertEqual(
outputs,
[
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
],
)
outputs = table_querier(
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
query=[
"What repository has the largest number of stars?",
"Given that the numbers of stars defines if a repository is active, what repository is the most active?",
"What is the number of repositories?",
"What is the average number of stars?",
"What is the total amount of stars?",
],
)
self.assertEqual(
outputs,
[
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
],
)
with self.assertRaises(ValueError):
table_querier(query="What does it do with empty context ?", table=None)
with self.assertRaises(ValueError):
table_querier(query="What does it do with empty context ?", table="")
with self.assertRaises(ValueError):
table_querier(query="What does it do with empty context ?", table={})
with self.assertRaises(ValueError):
table_querier(
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
}
)
with self.assertRaises(ValueError):
table_querier(
query="",
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
)
with self.assertRaises(ValueError):
table_querier(
query=None,
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
)
@require_torch
@require_torch_scatter
def test_small_model_pt(self):
model_id = "lysandre/tiny-tapas-random-wtq"
model = AutoModelForTableQuestionAnswering.from_pretrained(model_id)
tokenizer = AutoTokenizer.from_pretrained(model_id)
self.assertIsInstance(model.config.aggregation_labels, dict)
self.assertIsInstance(model.config.no_aggregation_label_index, int)
table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer)
outputs = table_querier(
table={
"actors": ["brad pitt", "leonardo di caprio", "george clooney"],
"age": ["56", "45", "59"],
"number of movies": ["87", "53", "69"],
"date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
},
query="how many movies has george clooney played in?",
)
self.assertEqual(
outputs,
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
)
outputs = table_querier(
table={
"actors": ["brad pitt", "leonardo di caprio", "george clooney"],
"age": ["56", "45", "59"],
"number of movies": ["87", "53", "69"],
"date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
},
query=["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"],
)
self.assertEqual(
outputs,
[
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
],
)
outputs = table_querier(
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
query=[
"What repository has the largest number of stars?",
"Given that the numbers of stars defines if a repository is active, what repository is the most active?",
"What is the number of repositories?",
"What is the average number of stars?",
"What is the total amount of stars?",
],
)
self.assertEqual(
outputs,
[
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
],
)
with self.assertRaises(ValueError):
table_querier(query="What does it do with empty context ?", table=None)
with self.assertRaises(ValueError):
table_querier(query="What does it do with empty context ?", table="")
with self.assertRaises(ValueError):
table_querier(query="What does it do with empty context ?", table={})
with self.assertRaises(ValueError):
table_querier(
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
}
)
with self.assertRaises(ValueError):
table_querier(
query="",
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
)
with self.assertRaises(ValueError):
table_querier(
query=None,
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
)
@require_torch
@require_torch_scatter
def test_slow_tokenizer_sqa_pt(self):
model_id = "lysandre/tiny-tapas-random-sqa"
model = AutoModelForTableQuestionAnswering.from_pretrained(model_id)
tokenizer = AutoTokenizer.from_pretrained(model_id)
table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer)
inputs = {
"table": {
"actors": ["brad pitt", "leonardo di caprio", "george clooney"],
"age": ["56", "45", "59"],
"number of movies": ["87", "53", "69"],
"date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
},
"query": ["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"],
}
sequential_outputs = table_querier(**inputs, sequential=True)
batch_outputs = table_querier(**inputs, sequential=False)
self.assertEqual(len(sequential_outputs), 3)
self.assertEqual(len(batch_outputs), 3)
self.assertEqual(sequential_outputs[0], batch_outputs[0])
self.assertNotEqual(sequential_outputs[1], batch_outputs[1])
# self.assertNotEqual(sequential_outputs[2], batch_outputs[2])
table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer)
outputs = table_querier(
table={
"actors": ["brad pitt", "leonardo di caprio", "george clooney"],
"age": ["56", "45", "59"],
"number of movies": ["87", "53", "69"],
"date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
},
query="how many movies has george clooney played in?",
)
self.assertEqual(
outputs,
{"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]},
)
outputs = table_querier(
table={
"actors": ["brad pitt", "leonardo di caprio", "george clooney"],
"age": ["56", "45", "59"],
"number of movies": ["87", "53", "69"],
"date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
},
query=["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"],
)
self.assertEqual(
outputs,
[
{"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]},
{"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]},
{"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]},
],
)
outputs = table_querier(
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
query=[
"What repository has the largest number of stars?",
"Given that the numbers of stars defines if a repository is active, what repository is the most active?",
"What is the number of repositories?",
"What is the average number of stars?",
"What is the total amount of stars?",
],
)
self.assertEqual(
outputs,
[
{"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]},
{"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]},
{"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]},
{"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]},
{"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]},
],
)
with self.assertRaises(ValueError):
table_querier(query="What does it do with empty context ?", table=None)
with self.assertRaises(ValueError):
table_querier(query="What does it do with empty context ?", table="")
with self.assertRaises(ValueError):
table_querier(query="What does it do with empty context ?", table={})
with self.assertRaises(ValueError):
table_querier(
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
}
)
with self.assertRaises(ValueError):
table_querier(
query="",
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
)
with self.assertRaises(ValueError):
table_querier(
query=None,
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
)
@require_tf
@require_tensorflow_probability
@require_pandas
@require_torch
def test_slow_tokenizer_sqa_tf(self):
model_id = "lysandre/tiny-tapas-random-sqa"
model = TFAutoModelForTableQuestionAnswering.from_pretrained(model_id, from_pt=True)
tokenizer = AutoTokenizer.from_pretrained(model_id)
table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer)
inputs = {
"table": {
"actors": ["brad pitt", "leonardo di caprio", "george clooney"],
"age": ["56", "45", "59"],
"number of movies": ["87", "53", "69"],
"date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
},
"query": ["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"],
}
sequential_outputs = table_querier(**inputs, sequential=True)
batch_outputs = table_querier(**inputs, sequential=False)
self.assertEqual(len(sequential_outputs), 3)
self.assertEqual(len(batch_outputs), 3)
self.assertEqual(sequential_outputs[0], batch_outputs[0])
self.assertNotEqual(sequential_outputs[1], batch_outputs[1])
# self.assertNotEqual(sequential_outputs[2], batch_outputs[2])
table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer)
outputs = table_querier(
table={
"actors": ["brad pitt", "leonardo di caprio", "george clooney"],
"age": ["56", "45", "59"],
"number of movies": ["87", "53", "69"],
"date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
},
query="how many movies has george clooney played in?",
)
self.assertEqual(
outputs,
{"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]},
)
outputs = table_querier(
table={
"actors": ["brad pitt", "leonardo di caprio", "george clooney"],
"age": ["56", "45", "59"],
"number of movies": ["87", "53", "69"],
"date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
},
query=["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"],
)
self.assertEqual(
outputs,
[
{"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]},
{"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]},
{"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]},
],
)
outputs = table_querier(
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
query=[
"What repository has the largest number of stars?",
"Given that the numbers of stars defines if a repository is active, what repository is the most active?",
"What is the number of repositories?",
"What is the average number of stars?",
"What is the total amount of stars?",
],
)
self.assertEqual(
outputs,
[
{"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]},
{"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]},
{"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]},
{"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]},
{"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]},
],
)
with self.assertRaises(ValueError):
table_querier(query="What does it do with empty context ?", table=None)
with self.assertRaises(ValueError):
table_querier(query="What does it do with empty context ?", table="")
with self.assertRaises(ValueError):
table_querier(query="What does it do with empty context ?", table={})
with self.assertRaises(ValueError):
table_querier(
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
}
)
with self.assertRaises(ValueError):
table_querier(
query="",
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
)
with self.assertRaises(ValueError):
table_querier(
query=None,
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
)
@slow
@require_torch_scatter
def test_integration_wtq_pt(self):
table_querier = pipeline("table-question-answering")
data = {
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
}
queries = [
"What repository has the largest number of stars?",
"Given that the numbers of stars defines if a repository is active, what repository is the most active?",
"What is the number of repositories?",
"What is the average number of stars?",
"What is the total amount of stars?",
]
results = table_querier(data, queries)
expected_results = [
{"answer": "Transformers", "coordinates": [(0, 0)], "cells": ["Transformers"], "aggregator": "NONE"},
{"answer": "Transformers", "coordinates": [(0, 0)], "cells": ["Transformers"], "aggregator": "NONE"},
{
"answer": "COUNT > Transformers, Datasets, Tokenizers",
"coordinates": [(0, 0), (1, 0), (2, 0)],
"cells": ["Transformers", "Datasets", "Tokenizers"],
"aggregator": "COUNT",
},
{
"answer": "AVERAGE > 36542, 4512, 3934",
"coordinates": [(0, 1), (1, 1), (2, 1)],
"cells": ["36542", "4512", "3934"],
"aggregator": "AVERAGE",
},
{
"answer": "SUM > 36542, 4512, 3934",
"coordinates": [(0, 1), (1, 1), (2, 1)],
"cells": ["36542", "4512", "3934"],
"aggregator": "SUM",
},
]
self.assertListEqual(results, expected_results)
@slow
@require_tensorflow_probability
@require_pandas
def test_integration_wtq_tf(self):
model_id = "google/tapas-base-finetuned-wtq"
model = TFAutoModelForTableQuestionAnswering.from_pretrained(model_id)
tokenizer = AutoTokenizer.from_pretrained(model_id)
table_querier = pipeline("table-question-answering", model=model, tokenizer=tokenizer)
data = {
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
}
queries = [
"What repository has the largest number of stars?",
"Given that the numbers of stars defines if a repository is active, what repository is the most active?",
"What is the number of repositories?",
"What is the average number of stars?",
"What is the total amount of stars?",
]
results = table_querier(data, queries)
expected_results = [
{"answer": "Transformers", "coordinates": [(0, 0)], "cells": ["Transformers"], "aggregator": "NONE"},
{"answer": "Transformers", "coordinates": [(0, 0)], "cells": ["Transformers"], "aggregator": "NONE"},
{
"answer": "COUNT > Transformers, Datasets, Tokenizers",
"coordinates": [(0, 0), (1, 0), (2, 0)],
"cells": ["Transformers", "Datasets", "Tokenizers"],
"aggregator": "COUNT",
},
{
"answer": "AVERAGE > 36542, 4512, 3934",
"coordinates": [(0, 1), (1, 1), (2, 1)],
"cells": ["36542", "4512", "3934"],
"aggregator": "AVERAGE",
},
{
"answer": "SUM > 36542, 4512, 3934",
"coordinates": [(0, 1), (1, 1), (2, 1)],
"cells": ["36542", "4512", "3934"],
"aggregator": "SUM",
},
]
self.assertListEqual(results, expected_results)
@slow
@require_torch_scatter
def test_integration_sqa_pt(self):
table_querier = pipeline(
"table-question-answering",
model="google/tapas-base-finetuned-sqa",
tokenizer="google/tapas-base-finetuned-sqa",
)
data = {
"Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"],
"Age": ["56", "45", "59"],
"Number of movies": ["87", "53", "69"],
"Date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
}
queries = ["How many movies has George Clooney played in?", "How old is he?", "What's his date of birth?"]
results = table_querier(data, queries, sequential=True)
expected_results = [
{"answer": "69", "coordinates": [(2, 2)], "cells": ["69"]},
{"answer": "59", "coordinates": [(2, 1)], "cells": ["59"]},
{"answer": "28 november 1967", "coordinates": [(2, 3)], "cells": ["28 november 1967"]},
]
self.assertListEqual(results, expected_results)
@slow
@require_tensorflow_probability
@require_pandas
def test_integration_sqa_tf(self):
model_id = "google/tapas-base-finetuned-sqa"
model = TFAutoModelForTableQuestionAnswering.from_pretrained(model_id)
tokenizer = AutoTokenizer.from_pretrained(model_id)
table_querier = pipeline(
"table-question-answering",
model=model,
tokenizer=tokenizer,
)
data = {
"Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"],
"Age": ["56", "45", "59"],
"Number of movies": ["87", "53", "69"],
"Date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
}
queries = ["How many movies has George Clooney played in?", "How old is he?", "What's his date of birth?"]
results = table_querier(data, queries, sequential=True)
expected_results = [
{"answer": "69", "coordinates": [(2, 2)], "cells": ["69"]},
{"answer": "59", "coordinates": [(2, 1)], "cells": ["59"]},
{"answer": "28 november 1967", "coordinates": [(2, 3)], "cells": ["28 november 1967"]},
]
self.assertListEqual(results, expected_results)
| 29,310 | 45.159055 | 121 | py |
robust-transformers | robust-transformers-main/tests/pipelines/test_pipelines_fill_mask.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY, PipelineTestCaseMeta
@is_pipeline_test
class FillMaskPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta):
model_mapping = MODEL_FOR_MASKED_LM_MAPPING
tf_model_mapping = TF_MODEL_FOR_MASKED_LM_MAPPING
@require_tf
def test_small_model_tf(self):
unmasker = pipeline(task="fill-mask", model="sshleifer/tiny-distilroberta-base", top_k=2, framework="tf")
outputs = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(outputs, decimals=6),
[
{"sequence": "My name is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser"},
],
)
outputs = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(outputs, decimals=6),
[
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-05,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-05,
"token": 25506,
"token_str": " accuser",
},
],
)
outputs = unmasker("My name is <mask>", targets=[" Patrick", " Clara", " Teven"], top_k=3)
self.assertEqual(
nested_simplify(outputs, decimals=6),
[
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"},
],
)
@require_torch
def test_small_model_pt(self):
unmasker = pipeline(task="fill-mask", model="sshleifer/tiny-distilroberta-base", top_k=2, framework="pt")
outputs = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(outputs, decimals=6),
[
{"sequence": "My name is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
],
)
outputs = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(outputs, decimals=6),
[
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
],
)
outputs = unmasker("My name is <mask>", targets=[" Patrick", " Clara", " Teven"], top_k=3)
self.assertEqual(
nested_simplify(outputs, decimals=6),
[
{"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
],
)
outputs = unmasker("My name is <mask> <mask>", top_k=2)
self.assertEqual(
nested_simplify(outputs, decimals=6),
[
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
],
)
@slow
@require_torch
def test_large_model_pt(self):
unmasker = pipeline(task="fill-mask", model="distilroberta-base", top_k=2, framework="pt")
self.run_large_test(unmasker)
@slow
@require_tf
def test_large_model_tf(self):
unmasker = pipeline(task="fill-mask", model="distilroberta-base", top_k=2, framework="tf")
self.run_large_test(unmasker)
def run_large_test(self, unmasker):
outputs = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(outputs),
[
{"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.007, "token": 1573, "token_str": " Chris"},
],
)
outputs = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(outputs),
[
{
"sequence": "The largest city in France is Paris",
"score": 0.251,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.214,
"token": 12790,
"token_str": " Lyon",
},
],
)
outputs = unmasker("My name is <mask>", targets=[" Patrick", " Clara", " Teven"], top_k=3)
self.assertEqual(
nested_simplify(outputs),
[
{"sequence": "My name is Patrick", "score": 0.005, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.000, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.000, "token": 2941, "token_str": " Te"},
],
)
@require_torch
def test_model_no_pad_pt(self):
unmasker = pipeline(task="fill-mask", model="sshleifer/tiny-distilroberta-base", framework="pt")
unmasker.tokenizer.pad_token_id = None
unmasker.tokenizer.pad_token = None
self.run_pipeline_test(unmasker, [])
@require_tf
def test_model_no_pad_tf(self):
unmasker = pipeline(task="fill-mask", model="sshleifer/tiny-distilroberta-base", framework="tf")
unmasker.tokenizer.pad_token_id = None
unmasker.tokenizer.pad_token = None
self.run_pipeline_test(unmasker, [])
def get_test_pipeline(self, model, tokenizer, feature_extractor):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)")
fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer)
examples = [
f"This is another {tokenizer.mask_token} test",
]
return fill_masker, examples
def run_pipeline_test(self, fill_masker, examples):
tokenizer = fill_masker.tokenizer
model = fill_masker.model
outputs = fill_masker(
f"This is a {tokenizer.mask_token}",
)
self.assertEqual(
outputs,
[
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
],
)
outputs = fill_masker([f"This is a {tokenizer.mask_token}"])
self.assertEqual(
outputs,
[
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
],
)
outputs = fill_masker([f"This is a {tokenizer.mask_token}", f"Another {tokenizer.mask_token} great test."])
self.assertEqual(
outputs,
[
[
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
],
[
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
],
],
)
with self.assertRaises(ValueError):
fill_masker([None])
# No mask_token is not supported
with self.assertRaises(PipelineException):
fill_masker("This is")
self.run_test_top_k(model, tokenizer)
self.run_test_targets(model, tokenizer)
self.run_test_top_k_targets(model, tokenizer)
self.fill_mask_with_duplicate_targets_and_top_k(model, tokenizer)
self.fill_mask_with_multiple_masks(model, tokenizer)
def run_test_targets(self, model, tokenizer):
vocab = tokenizer.get_vocab()
targets = list(sorted(vocab.keys()))[:2]
# Pipeline argument
fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer, targets=targets)
outputs = fill_masker(f"This is a {tokenizer.mask_token}")
self.assertEqual(
outputs,
[
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
],
)
target_ids = {vocab[el] for el in targets}
self.assertEqual(set(el["token"] for el in outputs), target_ids)
self.assertEqual(set(el["token_str"] for el in outputs), set(targets))
# Call argument
fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer)
outputs = fill_masker(f"This is a {tokenizer.mask_token}", targets=targets)
self.assertEqual(
outputs,
[
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
],
)
target_ids = {vocab[el] for el in targets}
self.assertEqual(set(el["token"] for el in outputs), target_ids)
self.assertEqual(set(el["token_str"] for el in outputs), set(targets))
# Score equivalence
outputs = fill_masker(f"This is a {tokenizer.mask_token}", targets=targets)
tokens = [top_mask["token_str"] for top_mask in outputs]
scores = [top_mask["score"] for top_mask in outputs]
unmasked_targets = fill_masker(f"This is a {tokenizer.mask_token}", targets=tokens)
target_scores = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(scores), nested_simplify(target_scores))
# Raises with invalid
with self.assertRaises(ValueError):
outputs = fill_masker(f"This is a {tokenizer.mask_token}", targets=[""])
with self.assertRaises(ValueError):
outputs = fill_masker(f"This is a {tokenizer.mask_token}", targets=[])
with self.assertRaises(ValueError):
outputs = fill_masker(f"This is a {tokenizer.mask_token}", targets="")
def run_test_top_k(self, model, tokenizer):
fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer, top_k=2)
outputs = fill_masker(f"This is a {tokenizer.mask_token}")
self.assertEqual(
outputs,
[
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
],
)
fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer)
outputs2 = fill_masker(f"This is a {tokenizer.mask_token}", top_k=2)
self.assertEqual(
outputs2,
[
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
],
)
self.assertEqual(nested_simplify(outputs), nested_simplify(outputs2))
def run_test_top_k_targets(self, model, tokenizer):
vocab = tokenizer.get_vocab()
fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer)
# top_k=2, ntargets=3
targets = list(sorted(vocab.keys()))[:3]
outputs = fill_masker(f"This is a {tokenizer.mask_token}", top_k=2, targets=targets)
# If we use the most probably targets, and filter differently, we should still
# have the same results
targets2 = [el["token_str"] for el in sorted(outputs, key=lambda x: x["score"], reverse=True)]
outputs2 = fill_masker(f"This is a {tokenizer.mask_token}", top_k=3, targets=targets2)
# They should yield exactly the same result
self.assertEqual(nested_simplify(outputs), nested_simplify(outputs2))
def fill_mask_with_duplicate_targets_and_top_k(self, model, tokenizer):
fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer)
vocab = tokenizer.get_vocab()
# String duplicates + id duplicates
targets = list(sorted(vocab.keys()))[:3]
targets = [targets[0], targets[1], targets[0], targets[2], targets[1]]
outputs = fill_masker(f"My name is {tokenizer.mask_token}", targets=targets, top_k=10)
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(outputs), 3)
def fill_mask_with_multiple_masks(self, model, tokenizer):
fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer)
outputs = fill_masker(
f"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}", top_k=2
)
self.assertEqual(
outputs,
[
[
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
],
[
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
],
[
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
{"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)},
],
],
)
| 17,646 | 44.132992 | 119 | py |
robust-transformers | robust-transformers-main/tests/pipelines/test_pipelines_question_answering.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import (
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
LxmertConfig,
QuestionAnsweringPipeline,
)
from transformers.data.processors.squad import SquadExample
from transformers.pipelines import QuestionAnsweringArgumentHandler, pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY, PipelineTestCaseMeta
@is_pipeline_test
class QAPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta):
model_mapping = MODEL_FOR_QUESTION_ANSWERING_MAPPING
tf_model_mapping = TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING
def get_test_pipeline(self, model, tokenizer, feature_extractor):
if isinstance(model.config, LxmertConfig):
# This is an bimodal model, we need to find a more consistent way
# to switch on those models.
return None, None
question_answerer = QuestionAnsweringPipeline(model, tokenizer)
examples = [
{"question": "Where was HuggingFace founded ?", "context": "HuggingFace was founded in Paris."},
{"question": "In what field is HuggingFace ?", "context": "HuggingFace is an AI startup."},
]
return question_answerer, examples
def run_pipeline_test(self, question_answerer, _):
outputs = question_answerer(
question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris."
)
self.assertEqual(outputs, {"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)})
outputs = question_answerer(
question="Where was HuggingFace founded ?",
context="HuggingFace was founded in Paris.",
handle_impossible_answer=True,
)
self.assertEqual(outputs, {"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)})
outputs = question_answerer(
question=["In what field is HuggingFace working ?", "In what field is HuggingFace working ?"],
context="HuggingFace was founded in Paris.",
)
self.assertEqual(
outputs,
[
{"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)},
{"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)},
],
)
outputs = question_answerer(
question=["What field is HuggingFace working ?", "In what field is HuggingFace ?"],
context=[
"HuggingFace is a startup based in New-York",
"HuggingFace is a startup founded in Paris",
],
)
self.assertEqual(
outputs,
[
{"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)},
{"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)},
],
)
with self.assertRaises(ValueError):
question_answerer(question="", context="HuggingFace was founded in Paris.")
with self.assertRaises(ValueError):
question_answerer(question=None, context="HuggingFace was founded in Paris.")
with self.assertRaises(ValueError):
question_answerer(question="In what field is HuggingFace working ?", context="")
with self.assertRaises(ValueError):
question_answerer(question="In what field is HuggingFace working ?", context=None)
outputs = question_answerer(
question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris.", topk=20
)
self.assertEqual(
outputs, [{"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)} for i in range(20)]
)
# Very long context require multiple features
outputs = question_answerer(
question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris." * 20
)
self.assertEqual(outputs, {"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)})
@require_torch
def test_small_model_pt(self):
question_answerer = pipeline(
"question-answering", model="sshleifer/tiny-distilbert-base-cased-distilled-squad"
)
outputs = question_answerer(
question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris."
)
self.assertEqual(nested_simplify(outputs), {"score": 0.01, "start": 0, "end": 11, "answer": "HuggingFace"})
@require_tf
def test_small_model_tf(self):
question_answerer = pipeline(
"question-answering", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="tf"
)
outputs = question_answerer(
question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris."
)
self.assertEqual(nested_simplify(outputs), {"score": 0.011, "start": 0, "end": 11, "answer": "HuggingFace"})
@slow
@require_torch
def test_large_model_pt(self):
question_answerer = pipeline(
"question-answering",
)
outputs = question_answerer(
question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris."
)
self.assertEqual(nested_simplify(outputs), {"score": 0.979, "start": 27, "end": 32, "answer": "Paris"})
@slow
@require_torch
def test_large_model_issue(self):
qa_pipeline = pipeline(
"question-answering",
model="mrm8488/bert-multi-cased-finetuned-xquadv1",
)
outputs = qa_pipeline(
{
"context": "Yes Bank founder Rana Kapoor has approached the Bombay High Court, challenging a special court's order from August this year that had remanded him in police custody for a week in a multi-crore loan fraud case. Kapoor, who is currently lodged in Taloja Jail, is an accused in the loan fraud case and some related matters being probed by the CBI and Enforcement Directorate. A single bench presided over by Justice S K Shinde on Tuesday posted the plea for further hearing on October 14. In his plea filed through advocate Vijay Agarwal, Kapoor claimed that the special court's order permitting the CBI's request for police custody on August 14 was illegal and in breach of the due process of law. Therefore, his police custody and subsequent judicial custody in the case were all illegal. Kapoor has urged the High Court to quash and set aside the special court's order dated August 14. As per his plea, in August this year, the CBI had moved two applications before the special court, one seeking permission to arrest Kapoor, who was already in judicial custody at the time in another case, and the other, seeking his police custody. While the special court refused to grant permission to the CBI to arrest Kapoor, it granted the central agency's plea for his custody. Kapoor, however, said in his plea that before filing an application for his arrest, the CBI had not followed the process of issuing him a notice under Section 41 of the CrPC for appearance before it. He further said that the CBI had not taken prior sanction as mandated under section 17 A of the Prevention of Corruption Act for prosecuting him. The special court, however, had said in its order at the time that as Kapoor was already in judicial custody in another case and was not a free man the procedure mandated under Section 41 of the CrPC need not have been adhered to as far as issuing a prior notice of appearance was concerned. ADVERTISING It had also said that case records showed that the investigating officer had taken an approval from a managing director of Yes Bank before beginning the proceedings against Kapoor and such a permission was a valid sanction. However, Kapoor in his plea said that the above order was bad in law and sought that it be quashed and set aside. The law mandated that if initial action was not in consonance with legal procedures, then all subsequent actions must be held as illegal, he said, urging the High Court to declare the CBI remand and custody and all subsequent proceedings including the further custody as illegal and void ab-initio. In a separate plea before the High Court, Kapoor's daughter Rakhee Kapoor-Tandon has sought exemption from in-person appearance before a special PMLA court. Rakhee has stated that she is a resident of the United Kingdom and is unable to travel to India owing to restrictions imposed due to the COVID-19 pandemic. According to the CBI, in the present case, Kapoor had obtained a gratification or pecuniary advantage of ₹ 307 crore, and thereby caused Yes Bank a loss of ₹ 1,800 crore by extending credit facilities to Avantha Group, when it was not eligible for the same",
"question": "Is this person invovled in fraud?",
}
)
self.assertEqual(
nested_simplify(outputs),
{"answer": "an accused in the loan fraud case", "end": 294, "score": 0.001, "start": 261},
)
@slow
@require_torch
def test_large_model_course(self):
question_answerer = pipeline("question-answering")
long_context = """
🤗 Transformers: State of the Art NLP
🤗 Transformers provides thousands of pretrained models to perform tasks on texts such as classification, information extraction,
question answering, summarization, translation, text generation and more in over 100 languages.
Its aim is to make cutting-edge NLP easier to use for everyone.
🤗 Transformers provides APIs to quickly download and use those pretrained models on a given text, fine-tune them on your own datasets and
then share them with the community on our model hub. At the same time, each python module defining an architecture is fully standalone and
can be modified to enable quick research experiments.
Why should I use transformers?
1. Easy-to-use state-of-the-art models:
- High performance on NLU and NLG tasks.
- Low barrier to entry for educators and practitioners.
- Few user-facing abstractions with just three classes to learn.
- A unified API for using all our pretrained models.
- Lower compute costs, smaller carbon footprint:
2. Researchers can share trained models instead of always retraining.
- Practitioners can reduce compute time and production costs.
- Dozens of architectures with over 10,000 pretrained models, some in more than 100 languages.
3. Choose the right framework for every part of a model's lifetime:
- Train state-of-the-art models in 3 lines of code.
- Move a single model between TF2.0/PyTorch frameworks at will.
- Seamlessly pick the right framework for training, evaluation and production.
4. Easily customize a model or an example to your needs:
- We provide examples for each architecture to reproduce the results published by its original authors.
- Model internals are exposed as consistently as possible.
- Model files can be used independently of the library for quick experiments.
🤗 Transformers is backed by the three most popular deep learning libraries — Jax, PyTorch and TensorFlow — with a seamless integration
between them. It's straightforward to train your models with one before loading them for inference with the other.
"""
question = "Which deep learning libraries back 🤗 Transformers?"
outputs = question_answerer(question=question, context=long_context)
self.assertEqual(
nested_simplify(outputs),
{"answer": "Jax, PyTorch and TensorFlow", "end": 1919, "score": 0.971, "start": 1892},
)
@slow
@require_tf
def test_large_model_tf(self):
question_answerer = pipeline("question-answering", framework="tf")
outputs = question_answerer(
question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris."
)
self.assertEqual(nested_simplify(outputs), {"score": 0.979, "start": 27, "end": 32, "answer": "Paris"})
@is_pipeline_test
class QuestionAnsweringArgumentHandlerTests(unittest.TestCase):
def test_argument_handler(self):
qa = QuestionAnsweringArgumentHandler()
Q = "Where was HuggingFace founded ?"
C = "HuggingFace was founded in Paris"
normalized = qa(Q, C)
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 1)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
normalized = qa(question=Q, context=C)
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 1)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
normalized = qa(question=Q, context=C)
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 1)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
normalized = qa(question=[Q, Q], context=C)
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 2)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
normalized = qa({"question": Q, "context": C})
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 1)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
normalized = qa([{"question": Q, "context": C}])
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 1)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
normalized = qa([{"question": Q, "context": C}, {"question": Q, "context": C}])
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 2)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
normalized = qa(X={"question": Q, "context": C})
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 1)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
normalized = qa(X=[{"question": Q, "context": C}])
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 1)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
normalized = qa(data={"question": Q, "context": C})
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 1)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
def test_argument_handler_error_handling(self):
qa = QuestionAnsweringArgumentHandler()
Q = "Where was HuggingFace founded ?"
C = "HuggingFace was founded in Paris"
with self.assertRaises(KeyError):
qa({"context": C})
with self.assertRaises(KeyError):
qa({"question": Q})
with self.assertRaises(KeyError):
qa([{"context": C}])
with self.assertRaises(ValueError):
qa(None, C)
with self.assertRaises(ValueError):
qa("", C)
with self.assertRaises(ValueError):
qa(Q, None)
with self.assertRaises(ValueError):
qa(Q, "")
with self.assertRaises(ValueError):
qa(question=None, context=C)
with self.assertRaises(ValueError):
qa(question="", context=C)
with self.assertRaises(ValueError):
qa(question=Q, context=None)
with self.assertRaises(ValueError):
qa(question=Q, context="")
with self.assertRaises(ValueError):
qa({"question": None, "context": C})
with self.assertRaises(ValueError):
qa({"question": "", "context": C})
with self.assertRaises(ValueError):
qa({"question": Q, "context": None})
with self.assertRaises(ValueError):
qa({"question": Q, "context": ""})
with self.assertRaises(ValueError):
qa([{"question": Q, "context": C}, {"question": None, "context": C}])
with self.assertRaises(ValueError):
qa([{"question": Q, "context": C}, {"question": "", "context": C}])
with self.assertRaises(ValueError):
qa([{"question": Q, "context": C}, {"question": Q, "context": None}])
with self.assertRaises(ValueError):
qa([{"question": Q, "context": C}, {"question": Q, "context": ""}])
with self.assertRaises(ValueError):
qa(question={"This": "Is weird"}, context="This is a context")
with self.assertRaises(ValueError):
qa(question=[Q, Q], context=[C, C, C])
with self.assertRaises(ValueError):
qa(question=[Q, Q, Q], context=[C, C])
def test_argument_handler_old_format(self):
qa = QuestionAnsweringArgumentHandler()
Q = "Where was HuggingFace founded ?"
C = "HuggingFace was founded in Paris"
# Backward compatibility for this
normalized = qa(question=[Q, Q], context=[C, C])
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 2)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
def test_argument_handler_error_handling_odd(self):
qa = QuestionAnsweringArgumentHandler()
with self.assertRaises(ValueError):
qa(None)
with self.assertRaises(ValueError):
qa(Y=None)
with self.assertRaises(ValueError):
qa(1)
| 18,250 | 49.980447 | 3,155 | py |
robust-transformers | robust-transformers-main/tests/pipelines/test_pipelines_common.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib
import logging
import random
import string
import unittest
from abc import abstractmethod
from functools import lru_cache
from unittest import skipIf
from transformers import (
FEATURE_EXTRACTOR_MAPPING,
TOKENIZER_MAPPING,
AutoFeatureExtractor,
AutoTokenizer,
DistilBertForSequenceClassification,
IBertConfig,
RobertaConfig,
TextClassificationPipeline,
pipeline,
)
from transformers.pipelines import get_task
from transformers.pipelines.base import _pad
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch
logger = logging.getLogger(__name__)
def get_checkpoint_from_architecture(architecture):
try:
module = importlib.import_module(architecture.__module__)
except ImportError:
logger.error(f"Ignoring architecture {architecture}")
return
if hasattr(module, "_CHECKPOINT_FOR_DOC"):
return module._CHECKPOINT_FOR_DOC
else:
logger.warning(f"Can't retrieve checkpoint from {architecture.__name__}")
def get_tiny_config_from_class(configuration_class):
if "OpenAIGPT" in configuration_class.__name__:
# This is the only file that is inconsistent with the naming scheme.
# Will rename this file if we decide this is the way to go
return
model_type = configuration_class.model_type
camel_case_model_name = configuration_class.__name__.split("Config")[0]
try:
model_slug = model_type.replace("-", "_")
module = importlib.import_module(f".test_modeling_{model_slug}", package=f"tests.{model_slug}")
model_tester_class = getattr(module, f"{camel_case_model_name}ModelTester", None)
except (ImportError, AttributeError):
logger.error(f"No model tester class for {configuration_class.__name__}")
return
if model_tester_class is None:
logger.warning(f"No model tester class for {configuration_class.__name__}")
return
model_tester = model_tester_class(parent=None)
if hasattr(model_tester, "get_pipeline_config"):
config = model_tester.get_pipeline_config()
elif hasattr(model_tester, "get_config"):
config = model_tester.get_config()
else:
config = None
logger.warning(f"Model tester {model_tester_class.__name__} has no `get_config()`.")
return config
@lru_cache(maxsize=100)
def get_tiny_tokenizer_from_checkpoint(checkpoint):
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
if tokenizer.vocab_size < 300:
# Wav2Vec2ForCTC for instance
# ByT5Tokenizer
# all are already small enough and have no Fast version that can
# be retrained
return tokenizer
logger.info("Training new from iterator ...")
vocabulary = string.ascii_letters + string.digits + " "
tokenizer = tokenizer.train_new_from_iterator(vocabulary, vocab_size=len(vocabulary), show_progress=False)
logger.info("Trained.")
return tokenizer
def get_tiny_feature_extractor_from_checkpoint(checkpoint, tiny_config, feature_extractor_class):
try:
feature_extractor = AutoFeatureExtractor.from_pretrained(checkpoint)
except Exception:
try:
if feature_extractor_class is not None:
feature_extractor = feature_extractor_class()
else:
feature_extractor = None
except Exception:
feature_extractor = None
if hasattr(tiny_config, "image_size") and feature_extractor:
feature_extractor = feature_extractor.__class__(size=tiny_config.image_size, crop_size=tiny_config.image_size)
# Speech2TextModel specific.
if hasattr(tiny_config, "input_feat_per_channel") and feature_extractor:
feature_extractor = feature_extractor.__class__(
feature_size=tiny_config.input_feat_per_channel, num_mel_bins=tiny_config.input_feat_per_channel
)
return feature_extractor
class ANY:
def __init__(self, *_types):
self._types = _types
def __eq__(self, other):
return isinstance(other, self._types)
def __repr__(self):
return f"ANY({', '.join(_type.__name__ for _type in self._types)})"
class PipelineTestCaseMeta(type):
def __new__(mcs, name, bases, dct):
def gen_test(ModelClass, checkpoint, tiny_config, tokenizer_class, feature_extractor_class):
@skipIf(tiny_config is None, "TinyConfig does not exist")
@skipIf(checkpoint is None, "checkpoint does not exist")
def test(self):
if ModelClass.__name__.endswith("ForCausalLM"):
tiny_config.is_encoder_decoder = False
if hasattr(tiny_config, "encoder_no_repeat_ngram_size"):
# specific for blenderbot which supports both decoder-only
# encoder/decoder but the test config only reflects
# encoder/decoder arch
tiny_config.encoder_no_repeat_ngram_size = 0
if ModelClass.__name__.endswith("WithLMHead"):
tiny_config.is_decoder = True
try:
model = ModelClass(tiny_config)
except ImportError as e:
self.skipTest(
f"Cannot run with {tiny_config} as the model requires a library that isn't installed: {e}"
)
if hasattr(model, "eval"):
model = model.eval()
if tokenizer_class is not None:
try:
tokenizer = get_tiny_tokenizer_from_checkpoint(checkpoint)
# XLNet actually defines it as -1.
if isinstance(model.config, (RobertaConfig, IBertConfig)):
tokenizer.model_max_length = model.config.max_position_embeddings - 2
elif (
hasattr(model.config, "max_position_embeddings")
and model.config.max_position_embeddings > 0
):
tokenizer.model_max_length = model.config.max_position_embeddings
# Rust Panic exception are NOT Exception subclass
# Some test tokenizer contain broken vocabs or custom PreTokenizer, so we
# provide some default tokenizer and hope for the best.
except: # noqa: E722
self.skipTest(f"Ignoring {ModelClass}, cannot create a simple tokenizer")
else:
tokenizer = None
feature_extractor = get_tiny_feature_extractor_from_checkpoint(
checkpoint, tiny_config, feature_extractor_class
)
if tokenizer is None and feature_extractor is None:
self.skipTest(
f"Ignoring {ModelClass}, cannot create a tokenizer or feature_extractor (PerceiverConfig with no FastTokenizer ?)"
)
pipeline, examples = self.get_test_pipeline(model, tokenizer, feature_extractor)
if pipeline is None:
# The test can disable itself, but it should be very marginal
# Concerns: Wav2Vec2ForCTC without tokenizer test (FastTokenizer don't exist)
return
self.run_pipeline_test(pipeline, examples)
def run_batch_test(pipeline, examples):
# Need to copy because `Conversation` are stateful
if pipeline.tokenizer is not None and pipeline.tokenizer.pad_token_id is None:
return # No batching for this and it's OK
# 10 examples with batch size 4 means there needs to be a unfinished batch
# which is important for the unbatcher
def data(n):
for _ in range(n):
# Need to copy because Conversation object is mutated
yield copy.deepcopy(random.choice(examples))
out = []
for item in pipeline(data(10), batch_size=4):
out.append(item)
self.assertEqual(len(out), 10)
run_batch_test(pipeline, examples)
return test
for prefix, key in [("pt", "model_mapping"), ("tf", "tf_model_mapping")]:
mapping = dct.get(key, {})
if mapping:
for configuration, model_architectures in mapping.items():
if not isinstance(model_architectures, tuple):
model_architectures = (model_architectures,)
for model_architecture in model_architectures:
checkpoint = get_checkpoint_from_architecture(model_architecture)
tiny_config = get_tiny_config_from_class(configuration)
tokenizer_classes = TOKENIZER_MAPPING.get(configuration, [])
feature_extractor_class = FEATURE_EXTRACTOR_MAPPING.get(configuration, None)
feature_extractor_name = (
feature_extractor_class.__name__ if feature_extractor_class else "nofeature_extractor"
)
if not tokenizer_classes:
# We need to test even if there are no tokenizers.
tokenizer_classes = [None]
else:
# Remove the non defined tokenizers
# ByT5 and Perceiver are bytes-level and don't define
# FastTokenizer, we can just ignore those.
tokenizer_classes = [
tokenizer_class for tokenizer_class in tokenizer_classes if tokenizer_class is not None
]
for tokenizer_class in tokenizer_classes:
if tokenizer_class is not None:
tokenizer_name = tokenizer_class.__name__
else:
tokenizer_name = "notokenizer"
test_name = f"test_{prefix}_{configuration.__name__}_{model_architecture.__name__}_{tokenizer_name}_{feature_extractor_name}"
if tokenizer_class is not None or feature_extractor_class is not None:
dct[test_name] = gen_test(
model_architecture,
checkpoint,
tiny_config,
tokenizer_class,
feature_extractor_class,
)
@abstractmethod
def inner(self):
raise NotImplementedError("Not implemented test")
# Force these 2 methods to exist
dct["test_small_model_pt"] = dct.get("test_small_model_pt", inner)
dct["test_small_model_tf"] = dct.get("test_small_model_tf", inner)
return type.__new__(mcs, name, bases, dct)
@is_pipeline_test
class CommonPipelineTest(unittest.TestCase):
@require_torch
def test_pipeline_iteration(self):
from torch.utils.data import Dataset
class MyDataset(Dataset):
data = [
"This is a test",
"This restaurant is great",
"This restaurant is awful",
]
def __len__(self):
return 3
def __getitem__(self, i):
return self.data[i]
text_classifier = pipeline(
task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt"
)
dataset = MyDataset()
for output in text_classifier(dataset):
self.assertEqual(output, {"label": ANY(str), "score": ANY(float)})
@require_torch
def test_check_task_auto_inference(self):
pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert")
self.assertIsInstance(pipe, TextClassificationPipeline)
@require_torch
def test_pipeline_batch_size_global(self):
pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert")
self.assertEqual(pipe._batch_size, None)
self.assertEqual(pipe._num_workers, None)
pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert", batch_size=2, num_workers=1)
self.assertEqual(pipe._batch_size, 2)
self.assertEqual(pipe._num_workers, 1)
@require_torch
def test_pipeline_override(self):
class MyPipeline(TextClassificationPipeline):
pass
text_classifier = pipeline(model="hf-internal-testing/tiny-random-distilbert", pipeline_class=MyPipeline)
self.assertIsInstance(text_classifier, MyPipeline)
def test_check_task(self):
task = get_task("gpt2")
self.assertEqual(task, "text-generation")
with self.assertRaises(RuntimeError):
# Wrong framework
get_task("espnet/siddhana_slurp_entity_asr_train_asr_conformer_raw_en_word_valid.acc.ave_10best")
@require_torch
def test_iterator_data(self):
def data(n: int):
for _ in range(n):
yield "This is a test"
pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert")
results = []
for out in pipe(data(10)):
self.assertEqual(nested_simplify(out), {"label": "LABEL_0", "score": 0.504})
results.append(out)
self.assertEqual(len(results), 10)
# When using multiple workers on streamable data it should still work
# This will force using `num_workers=1` with a warning for now.
results = []
for out in pipe(data(10), num_workers=2):
self.assertEqual(nested_simplify(out), {"label": "LABEL_0", "score": 0.504})
results.append(out)
self.assertEqual(len(results), 10)
@require_tf
def test_iterator_data_tf(self):
def data(n: int):
for _ in range(n):
yield "This is a test"
pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert", framework="tf")
out = pipe("This is a test")
results = []
for out in pipe(data(10)):
self.assertEqual(nested_simplify(out), {"label": "LABEL_0", "score": 0.504})
results.append(out)
self.assertEqual(len(results), 10)
@require_torch
def test_unbatch_attentions_hidden_states(self):
model = DistilBertForSequenceClassification.from_pretrained(
"hf-internal-testing/tiny-random-distilbert", output_hidden_states=True, output_attentions=True
)
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-distilbert")
text_classifier = TextClassificationPipeline(model=model, tokenizer=tokenizer)
# Used to throw an error because `hidden_states` are a tuple of tensors
# instead of the expected tensor.
outputs = text_classifier(["This is great !"] * 20, batch_size=32)
self.assertEqual(len(outputs), 20)
@is_pipeline_test
class PipelinePadTest(unittest.TestCase):
@require_torch
def test_pipeline_padding(self):
import torch
items = [
{
"label": "label1",
"input_ids": torch.LongTensor([[1, 23, 24, 2]]),
"attention_mask": torch.LongTensor([[0, 1, 1, 0]]),
},
{
"label": "label2",
"input_ids": torch.LongTensor([[1, 23, 24, 43, 44, 2]]),
"attention_mask": torch.LongTensor([[0, 1, 1, 1, 1, 0]]),
},
]
self.assertEqual(_pad(items, "label", 0, "right"), ["label1", "label2"])
self.assertTrue(
torch.allclose(
_pad(items, "input_ids", 10, "right"),
torch.LongTensor([[1, 23, 24, 2, 10, 10], [1, 23, 24, 43, 44, 2]]),
)
)
self.assertTrue(
torch.allclose(
_pad(items, "input_ids", 10, "left"),
torch.LongTensor([[10, 10, 1, 23, 24, 2], [1, 23, 24, 43, 44, 2]]),
)
)
self.assertTrue(
torch.allclose(
_pad(items, "attention_mask", 0, "right"), torch.LongTensor([[0, 1, 1, 0, 0, 0], [0, 1, 1, 1, 1, 0]])
)
)
@require_torch
def test_pipeline_image_padding(self):
import torch
items = [
{
"label": "label1",
"pixel_values": torch.zeros((1, 3, 10, 10)),
},
{
"label": "label2",
"pixel_values": torch.zeros((1, 3, 10, 10)),
},
]
self.assertEqual(_pad(items, "label", 0, "right"), ["label1", "label2"])
self.assertTrue(
torch.allclose(
_pad(items, "pixel_values", 10, "right"),
torch.zeros((2, 3, 10, 10)),
)
)
@require_torch
def test_pipeline_offset_mapping(self):
import torch
items = [
{
"offset_mappings": torch.zeros([1, 11, 2], dtype=torch.long),
},
{
"offset_mappings": torch.zeros([1, 4, 2], dtype=torch.long),
},
]
self.assertTrue(
torch.allclose(
_pad(items, "offset_mappings", 0, "right"),
torch.zeros((2, 11, 2), dtype=torch.long),
),
)
@is_pipeline_test
@require_torch
class PipelineUtilsTest(unittest.TestCase):
def test_pipeline_dataset(self):
from transformers.pipelines.pt_utils import PipelineDataset
dummy_dataset = [0, 1, 2, 3]
def add(number, extra=0):
return number + extra
dataset = PipelineDataset(dummy_dataset, add, {"extra": 2})
self.assertEqual(len(dataset), 4)
outputs = [dataset[i] for i in range(4)]
self.assertEqual(outputs, [2, 3, 4, 5])
def test_pipeline_iterator(self):
from transformers.pipelines.pt_utils import PipelineIterator
dummy_dataset = [0, 1, 2, 3]
def add(number, extra=0):
return number + extra
dataset = PipelineIterator(dummy_dataset, add, {"extra": 2})
self.assertEqual(len(dataset), 4)
outputs = [item for item in dataset]
self.assertEqual(outputs, [2, 3, 4, 5])
def test_pipeline_iterator_no_len(self):
from transformers.pipelines.pt_utils import PipelineIterator
def dummy_dataset():
for i in range(4):
yield i
def add(number, extra=0):
return number + extra
dataset = PipelineIterator(dummy_dataset(), add, {"extra": 2})
with self.assertRaises(TypeError):
len(dataset)
outputs = [item for item in dataset]
self.assertEqual(outputs, [2, 3, 4, 5])
def test_pipeline_batch_unbatch_iterator(self):
from transformers.pipelines.pt_utils import PipelineIterator
dummy_dataset = [{"id": [0, 1, 2]}, {"id": [3]}]
def add(number, extra=0):
return {"id": [i + extra for i in number["id"]]}
dataset = PipelineIterator(dummy_dataset, add, {"extra": 2}, loader_batch_size=3)
outputs = [item for item in dataset]
self.assertEqual(outputs, [{"id": 2}, {"id": 3}, {"id": 4}, {"id": 5}])
def test_pipeline_batch_unbatch_iterator_tensors(self):
import torch
from transformers.pipelines.pt_utils import PipelineIterator
dummy_dataset = [{"id": torch.LongTensor([[10, 20], [0, 1], [0, 2]])}, {"id": torch.LongTensor([[3]])}]
def add(number, extra=0):
return {"id": number["id"] + extra}
dataset = PipelineIterator(dummy_dataset, add, {"extra": 2}, loader_batch_size=3)
outputs = [item for item in dataset]
self.assertEqual(
nested_simplify(outputs), [{"id": [[12, 22]]}, {"id": [[2, 3]]}, {"id": [[2, 4]]}, {"id": [[5]]}]
)
def test_pipeline_chunk_iterator(self):
from transformers.pipelines.pt_utils import PipelineChunkIterator
def preprocess_chunk(n: int):
for i in range(n):
yield i
dataset = [2, 3]
dataset = PipelineChunkIterator(dataset, preprocess_chunk, {}, loader_batch_size=3)
outputs = [item for item in dataset]
self.assertEqual(outputs, [0, 1, 0, 1, 2])
def test_pipeline_pack_iterator(self):
from transformers.pipelines.pt_utils import PipelinePackIterator
def pack(item):
return {"id": item["id"] + 1, "is_last": item["is_last"]}
dataset = [
{"id": 0, "is_last": False},
{"id": 1, "is_last": True},
{"id": 0, "is_last": False},
{"id": 1, "is_last": False},
{"id": 2, "is_last": True},
]
dataset = PipelinePackIterator(dataset, pack, {})
outputs = [item for item in dataset]
self.assertEqual(
outputs,
[
[
{"id": 1},
{"id": 2},
],
[
{"id": 1},
{"id": 2},
{"id": 3},
],
],
)
def test_pipeline_pack_unbatch_iterator(self):
from transformers.pipelines.pt_utils import PipelinePackIterator
dummy_dataset = [{"id": [0, 1, 2], "is_last": [False, True, False]}, {"id": [3], "is_last": [True]}]
def add(number, extra=0):
return {"id": [i + extra for i in number["id"]], "is_last": number["is_last"]}
dataset = PipelinePackIterator(dummy_dataset, add, {"extra": 2}, loader_batch_size=3)
outputs = [item for item in dataset]
self.assertEqual(outputs, [[{"id": 2}, {"id": 3}], [{"id": 4}, {"id": 5}]])
# is_false Across batch
dummy_dataset = [{"id": [0, 1, 2], "is_last": [False, False, False]}, {"id": [3], "is_last": [True]}]
def add(number, extra=0):
return {"id": [i + extra for i in number["id"]], "is_last": number["is_last"]}
dataset = PipelinePackIterator(dummy_dataset, add, {"extra": 2}, loader_batch_size=3)
outputs = [item for item in dataset]
self.assertEqual(outputs, [[{"id": 2}, {"id": 3}, {"id": 4}, {"id": 5}]])
| 23,340 | 37.326765 | 153 | py |
robust-transformers | robust-transformers-main/tests/pipelines/test_pipelines_audio_classification.py | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY, PipelineTestCaseMeta
@is_pipeline_test
@require_torch
class AudioClassificationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta):
model_mapping = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def get_test_pipeline(self, model, tokenizer, feature_extractor):
audio_classifier = AudioClassificationPipeline(model=model, feature_extractor=feature_extractor)
# test with a raw waveform
audio = np.zeros((34000,))
audio2 = np.zeros((14000,))
return audio_classifier, [audio2, audio]
def run_pipeline_test(self, audio_classifier, examples):
audio2, audio = examples
output = audio_classifier(audio)
# by default a model is initialized with num_labels=2
self.assertEqual(
output,
[
{"score": ANY(float), "label": ANY(str)},
{"score": ANY(float), "label": ANY(str)},
],
)
output = audio_classifier(audio, top_k=1)
self.assertEqual(
output,
[
{"score": ANY(float), "label": ANY(str)},
],
)
self.run_torchaudio(audio_classifier)
@require_torchaudio
def run_torchaudio(self, audio_classifier):
import datasets
# test with a local file
dataset = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
audio = dataset[0]["audio"]["array"]
output = audio_classifier(audio)
self.assertEqual(
output,
[
{"score": ANY(float), "label": ANY(str)},
{"score": ANY(float), "label": ANY(str)},
],
)
@require_torch
def test_small_model_pt(self):
model = "anton-l/wav2vec2-random-tiny-classifier"
audio_classifier = pipeline("audio-classification", model=model)
audio = np.ones((8000,))
output = audio_classifier(audio, top_k=4)
self.assertEqual(
nested_simplify(output, decimals=4),
[
{"score": 0.0842, "label": "no"},
{"score": 0.0838, "label": "up"},
{"score": 0.0837, "label": "go"},
{"score": 0.0834, "label": "right"},
],
)
@require_torch
@slow
def test_large_model_pt(self):
import datasets
model = "superb/wav2vec2-base-superb-ks"
audio_classifier = pipeline("audio-classification", model=model)
dataset = datasets.load_dataset("anton-l/superb_dummy", "ks", split="test")
audio = np.array(dataset[3]["speech"], dtype=np.float32)
output = audio_classifier(audio, top_k=4)
self.assertEqual(
nested_simplify(output, decimals=3),
[
{"score": 0.981, "label": "go"},
{"score": 0.007, "label": "up"},
{"score": 0.006, "label": "_unknown_"},
{"score": 0.001, "label": "down"},
],
)
@require_tf
@unittest.skip("Audio classification is not implemented for TF")
def test_small_model_tf(self):
pass
| 4,140 | 31.606299 | 113 | py |
robust-transformers | robust-transformers-main/tests/byt5/test_tokenization_byt5.py | # coding=utf-8
# Copyright 2020 Google T5 Authors and HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByT5Tokenizer
from transformers.file_utils import cached_property, is_tf_available, is_torch_available
from ..test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
FRAMEWORK = "pt"
elif is_tf_available():
FRAMEWORK = "tf"
else:
FRAMEWORK = "jax"
class ByT5TokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = ByT5Tokenizer
test_rust_tokenizer = False
def setUp(self):
super().setUp()
tokenizer = ByT5Tokenizer()
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def t5_base_tokenizer(self):
return ByT5Tokenizer.from_pretrained("google/byt5-small")
def get_tokenizer(self, **kwargs) -> ByT5Tokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)
def get_clean_sequence(self, tokenizer, with_prefix_space=False, max_length=20, min_length=5) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
toks = []
for i in range(len(tokenizer)):
try:
tok = tokenizer.decode([i], clean_up_tokenization_spaces=False)
except UnicodeDecodeError:
pass
toks.append((i, tok))
toks = list(filter(lambda t: re.match(r"^[ a-zA-Z]+$", t[1]), toks))
toks = list(filter(lambda t: [t[0]] == tokenizer.encode(t[1], add_special_tokens=False), toks))
if max_length is not None and len(toks) > max_length:
toks = toks[:max_length]
if min_length is not None and len(toks) < min_length and len(toks) > 0:
while len(toks) < min_length:
toks = toks + toks
# toks_str = [t[1] for t in toks]
toks_ids = [t[0] for t in toks]
# Ensure consistency
output_txt = tokenizer.decode(toks_ids, clean_up_tokenization_spaces=False)
if " " not in output_txt and len(toks_ids) > 1:
output_txt = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=False)
+ " "
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=False)
)
if with_prefix_space:
output_txt = " " + output_txt
output_ids = tokenizer.encode(output_txt, add_special_tokens=False)
return output_txt, output_ids
def test_eos_treatment(self):
tokenizer = self.t5_base_tokenizer
batch_with_eos_added = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"])
batch_without_eos_added = tokenizer(["hi", "I went to the gym", ""])
self.assertListEqual(batch_with_eos_added["input_ids"], batch_without_eos_added["input_ids"])
def test_multibytes_char(self):
tokenizer = self.t5_base_tokenizer
src_text = "Unicode €."
encoded = tokenizer(src_text)
encoded_ids = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["input_ids"], encoded_ids)
# decoding
decoded = tokenizer.decode(encoded_ids)
self.assertEqual(decoded, "Unicode €.</s>")
encoded = tokenizer("e è é ê ë")
encoded_ids = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["input_ids"], encoded_ids)
# decoding
decoded = tokenizer.decode(encoded_ids)
self.assertEqual(decoded, "e è é ê ë</s>")
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë")), "e è é ê ë</s>")
def test_prepare_batch_integration(self):
tokenizer = self.t5_base_tokenizer
src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
expected_src_tokens = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
batch = tokenizer(src_text, padding=True, return_tensors=FRAMEWORK)
self.assertIsInstance(batch, BatchEncoding)
if FRAMEWORK != "jax":
result = list(batch.input_ids.numpy()[0])
else:
result = list(batch.input_ids.tolist()[0])
self.assertListEqual(expected_src_tokens, result)
self.assertEqual((2, 37), batch.input_ids.shape)
self.assertEqual((2, 37), batch.attention_mask.shape)
def test_empty_target_text(self):
tokenizer = self.t5_base_tokenizer
src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."]
batch = tokenizer(src_text, padding=True, return_tensors=FRAMEWORK)
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids", batch)
self.assertIn("attention_mask", batch)
self.assertNotIn("decoder_input_ids", batch)
self.assertNotIn("decoder_attention_mask", batch)
def test_max_length_integration(self):
tokenizer = self.t5_base_tokenizer
tgt_text = [
"Summary of the text.",
"Another summary.",
]
with tokenizer.as_target_tokenizer():
targets = tokenizer(
tgt_text, max_length=32, padding="max_length", truncation=True, return_tensors=FRAMEWORK
)
self.assertEqual(32, targets["input_ids"].shape[1])
def test_eos_in_input(self):
tokenizer = self.t5_base_tokenizer
src_text = ["A long paragraph for summarization. </s>"]
tgt_text = ["Summary of the text. </s>"]
# fmt: off
expected_src_tokens = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
expected_tgt_tokens = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
batch = tokenizer(src_text)
with tokenizer.as_target_tokenizer():
targets = tokenizer(tgt_text)
self.assertEqual(expected_src_tokens, batch["input_ids"][0])
self.assertEqual(expected_tgt_tokens, targets["input_ids"][0])
# cannot use default save_and_load_tokenzier test method because tokenzier has no vocab
def test_save_and_load_tokenizer(self):
# safety check on max_len default value so we are sure the test works
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
self.assertNotEqual(tokenizer.model_max_length, 42)
# Now let's start the test
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Isolate this from the other tests because we save additional tokens/etc
tmpdirname = tempfile.mkdtemp()
sample_text = " He is very happy, UNwant\u00E9d,running"
before_tokens = tokenizer.encode(sample_text, add_special_tokens=False)
tokenizer.save_pretrained(tmpdirname)
after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname)
after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False)
self.assertListEqual(before_tokens, after_tokens)
shutil.rmtree(tmpdirname)
tokenizers = self.get_tokenizers(model_max_length=42)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Isolate this from the other tests because we save additional tokens/etc
tmpdirname = tempfile.mkdtemp()
sample_text = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"])
additional_special_tokens = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token")
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens})
before_tokens = tokenizer.encode(sample_text, add_special_tokens=False)
tokenizer.save_pretrained(tmpdirname)
after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname)
after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False)
self.assertListEqual(before_tokens, after_tokens)
self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens)
self.assertEqual(after_tokenizer.model_max_length, 42)
tokenizer = tokenizer.__class__.from_pretrained(tmpdirname, model_max_length=43)
self.assertEqual(tokenizer.model_max_length, 43)
shutil.rmtree(tmpdirname)
# There is a conflict between the default value of extra_ids and adding a new special token through additional_special_tokens
# We need to add the extra_ids in the list of the arg additional_special_tokens
def test_special_tokens_initialization_with_non_empty_additional_special_tokens(self):
tokenizer_list = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(tmp_dir)
with open(os.path.join(tmp_dir, "special_tokens_map.json"), encoding="utf-8") as json_file:
special_tokens_map = json.load(json_file)
with open(os.path.join(tmp_dir, "tokenizer_config.json"), encoding="utf-8") as json_file:
tokenizer_config = json.load(json_file)
added_tokens_extra_ids = [f"<extra_id_{i}>" for i in range(125)]
special_tokens_map["additional_special_tokens"] = added_tokens_extra_ids + [
"an_additional_special_token"
]
tokenizer_config["additional_special_tokens"] = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(tmp_dir, "special_tokens_map.json"), "w", encoding="utf-8") as outfile:
json.dump(special_tokens_map, outfile)
with open(os.path.join(tmp_dir, "tokenizer_config.json"), "w", encoding="utf-8") as outfile:
json.dump(tokenizer_config, outfile)
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
tokenizer_without_change_in_init = tokenizer_class.from_pretrained(
tmp_dir,
)
self.assertIn(
"an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens
)
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["an_additional_special_token"],
tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"])
),
)
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
new_added_tokens = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token", lstrip=True)]
tokenizer = tokenizer_class.from_pretrained(
tmp_dir,
additional_special_tokens=new_added_tokens,
)
self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens)
self.assertEqual(
["a_new_additional_special_token"],
tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"])
),
)
def test_decode_single_bytes(self):
tokenizer_list = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(tmp_dir)
tokenizer = tokenizer_class.from_pretrained(tmp_dir)
self.assertTrue(tokenizer.decode([255]) == "")
# tokenizer can be instantiated without any pretrained files, so no need for pretrained tokenizer list
def test_pretrained_model_lists(self):
pass
# tokenizer does not have vocabulary
def test_get_vocab(self):
pass
# inputs cannot be pretokenized since ids depend on whole input string and not just on single characters
def test_pretokenized_inputs(self):
pass
# tests all ids in vocab => vocab doesn't exist so unnecessary to test
def test_conversion_reversible(self):
pass
| 14,880 | 44.929012 | 206 | py |
robust-transformers | robust-transformers-main/tests/vit/test_modeling_flax_vit.py | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ..test_configuration_common import ConfigTester
from ..test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class FlaxViTModelTester(unittest.TestCase):
def __init__(
self,
parent,
batch_size=13,
image_size=30,
patch_size=2,
num_channels=3,
is_training=True,
use_labels=True,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
type_sequence_label_size=10,
initializer_range=0.02,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.use_labels = use_labels
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
config = ViTConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
is_decoder=False,
initializer_range=self.initializer_range,
)
return config, pixel_values
def create_and_check_model(self, config, pixel_values, labels):
model = FlaxViTModel(config=config)
result = model(pixel_values)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
image_size = (self.image_size, self.image_size)
patch_size = (self.patch_size, self.patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
pixel_values,
) = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class FlaxViTModelTest(FlaxModelTesterMixin, unittest.TestCase):
all_model_classes = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def setUp(self) -> None:
self.model_tester = FlaxViTModelTester(self)
self.config_tester = ConfigTester(self, config_class=ViTConfig, has_text_modality=False, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
# We need to override this test because in ViT, the seq_len equals the number of patches + 1
# we compute that here
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
num_patches = (config.image_size // config.patch_size) ** 2
seq_length = num_patches + 1
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_length, seq_length],
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_length, seq_length],
)
# We neeed to override this test because ViT's forward signature is different than text models.
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
# We neeed to override this test because ViT expects pixel_values instead of input_ids
def test_jit_compilation(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
@jax.jit
def model_jitted(pixel_values, **kwargs):
return model(pixel_values=pixel_values, **kwargs)
with self.subTest("JIT Enabled"):
jitted_outputs = model_jitted(**prepared_inputs_dict).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
outputs = model_jitted(**prepared_inputs_dict).to_tuple()
self.assertEqual(len(outputs), len(jitted_outputs))
for jitted_output, output in zip(jitted_outputs, outputs):
self.assertEqual(jitted_output.shape, output.shape)
# We need to override this test because in ViT, the seq_len equals the number of patches + 1
# we compute that here
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
num_patches = (config.image_size // config.patch_size) ** 2
seq_length = num_patches + 1 # we add 1 for the [CLS] token
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
self.assertEqual(len(hidden_states), self.model_tester.num_hidden_layers + 1)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
@slow
def test_model_from_pretrained(self):
for model_class_name in self.all_model_classes:
model = model_class_name.from_pretrained("google/vit-base-patch16-224")
outputs = model(np.ones((1, 3, 224, 224)))
self.assertIsNotNone(outputs)
| 9,884 | 40.016598 | 117 | py |
robust-transformers | robust-transformers-main/tests/vit/test_modeling_vit.py | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch ViT model. """
import inspect
import unittest
from transformers import ViTConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ..test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST, to_2tuple
if is_vision_available():
from PIL import Image
from transformers import ViTFeatureExtractor
class ViTModelTester:
def __init__(
self,
parent,
batch_size=13,
image_size=30,
patch_size=2,
num_channels=3,
is_training=True,
use_labels=True,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
type_sequence_label_size=10,
initializer_range=0.02,
num_labels=3,
scope=None,
encoder_stride=2,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.use_labels = use_labels
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.scope = scope
self.encoder_stride = encoder_stride
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return ViTConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
is_decoder=False,
initializer_range=self.initializer_range,
encoder_stride=self.encoder_stride,
)
def create_and_check_model(self, config, pixel_values, labels):
model = ViTModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
image_size = to_2tuple(self.image_size)
patch_size = to_2tuple(self.patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size))
def create_and_check_for_image_classification(self, config, pixel_values, labels):
config.num_labels = self.type_sequence_label_size
model = ViTForImageClassification(config)
model.to(torch_device)
model.eval()
result = model(pixel_values, labels=labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
pixel_values,
labels,
) = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class ViTModelTest(ModelTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as ViT does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
test_pruning = False
test_torchscript = False
test_resize_embeddings = False
test_head_masking = False
def setUp(self):
self.model_tester = ViTModelTester(self)
self.config_tester = ConfigTester(self, config_class=ViTConfig, has_text_modality=False, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_inputs_embeds(self):
# ViT does not use inputs_embeds
pass
def test_model_common_attributes(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
image_size = to_2tuple(self.model_tester.image_size)
patch_size = to_2tuple(self.model_tester.patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
seq_len = num_patches + 1
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
chunk_length = getattr(self.model_tester, "chunk_length", None)
if chunk_length is not None and hasattr(self.model_tester, "num_hashes"):
encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]),
[self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length],
)
else:
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if hasattr(self.model_tester, "num_hidden_states_types"):
added_hidden_states = self.model_tester.num_hidden_states_types
elif self.is_encoder_decoder:
added_hidden_states = 2
else:
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
if chunk_length is not None:
self.assertListEqual(
list(self_attentions[0].shape[-4:]),
[self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length],
)
else:
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
# ViT has a different seq_length
image_size = to_2tuple(self.model_tester.image_size)
patch_size = to_2tuple(self.model_tester.patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
seq_length = num_patches + 1
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_for_image_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = ViTModel.from_pretrained(model_name)
self.assertIsNotNone(model)
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
class ViTModelIntegrationTest(unittest.TestCase):
@cached_property
def default_feature_extractor(self):
return ViTFeatureExtractor.from_pretrained("google/vit-base-patch16-224") if is_vision_available() else None
@slow
def test_inference_image_classification_head(self):
model = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224").to(torch_device)
feature_extractor = self.default_feature_extractor
image = prepare_img()
inputs = feature_extractor(images=image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
expected_shape = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([-0.2744, 0.8215, -0.0836]).to(torch_device)
self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
| 14,533 | 38.928571 | 118 | py |
robust-transformers | robust-transformers-main/tests/vit/test_feature_extraction_vit.py | # coding=utf-8
# Copyright 2021 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ..test_feature_extraction_common import FeatureExtractionSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTFeatureExtractor
class ViTFeatureExtractionTester(unittest.TestCase):
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=18,
do_normalize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
def prepare_feat_extract_dict(self):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class ViTFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase):
feature_extraction_class = ViTFeatureExtractor if is_vision_available() else None
def setUp(self):
self.feature_extract_tester = ViTFeatureExtractionTester(self)
@property
def feat_extract_dict(self):
return self.feature_extract_tester.prepare_feat_extract_dict()
def test_feat_extract_properties(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(feature_extractor, "image_mean"))
self.assertTrue(hasattr(feature_extractor, "image_std"))
self.assertTrue(hasattr(feature_extractor, "do_normalize"))
self.assertTrue(hasattr(feature_extractor, "do_resize"))
self.assertTrue(hasattr(feature_extractor, "size"))
def test_batch_feature(self):
pass
def test_call_pil(self):
# Initialize feature_extractor
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
# create random PIL images
image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
# Test not batched input
encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape,
(
1,
self.feature_extract_tester.num_channels,
self.feature_extract_tester.size,
self.feature_extract_tester.size,
),
)
# Test batched
encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape,
(
self.feature_extract_tester.batch_size,
self.feature_extract_tester.num_channels,
self.feature_extract_tester.size,
self.feature_extract_tester.size,
),
)
def test_call_numpy(self):
# Initialize feature_extractor
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
# create random numpy tensors
image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
# Test not batched input
encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape,
(
1,
self.feature_extract_tester.num_channels,
self.feature_extract_tester.size,
self.feature_extract_tester.size,
),
)
# Test batched
encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape,
(
self.feature_extract_tester.batch_size,
self.feature_extract_tester.num_channels,
self.feature_extract_tester.size,
self.feature_extract_tester.size,
),
)
def test_call_pytorch(self):
# Initialize feature_extractor
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
# create random PyTorch tensors
image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True)
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
# Test not batched input
encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape,
(
1,
self.feature_extract_tester.num_channels,
self.feature_extract_tester.size,
self.feature_extract_tester.size,
),
)
# Test batched
encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape,
(
self.feature_extract_tester.batch_size,
self.feature_extract_tester.num_channels,
self.feature_extract_tester.size,
self.feature_extract_tester.size,
),
)
| 6,658 | 33.682292 | 111 | py |
robust-transformers | robust-transformers-main/tests/vit/test_modeling_tf_vit.py | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the TensorFlow ViT model. """
import inspect
import os
import tempfile
import unittest
from transformers import ViTConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow, tooslow
from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
from transformers.models.vit.modeling_tf_vit import to_2tuple
if is_vision_available():
from PIL import Image
from transformers import ViTFeatureExtractor
class TFViTModelTester:
def __init__(
self,
parent,
batch_size=13,
image_size=30,
patch_size=2,
num_channels=3,
is_training=True,
use_labels=True,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
type_sequence_label_size=10,
initializer_range=0.02,
num_labels=3,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.use_labels = use_labels
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.scope = scope
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return ViTConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
is_decoder=False,
initializer_range=self.initializer_range,
)
def create_and_check_model(self, config, pixel_values, labels):
model = TFViTModel(config=config)
result = model(pixel_values, training=False)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
image_size = to_2tuple(self.image_size)
patch_size = to_2tuple(self.patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size))
# Test with an image with different size than the one specified in config.
image_size = self.image_size // 2
pixel_values = pixel_values[:, :, :image_size, :image_size]
result = model(pixel_values, interpolate_pos_encoding=True, training=False)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
image_size = to_2tuple(image_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size))
def create_and_check_for_image_classification(self, config, pixel_values, labels):
config.num_labels = self.type_sequence_label_size
model = TFViTForImageClassification(config)
result = model(pixel_values, labels=labels, training=False)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
# Test with an image with different size than the one specified in config.
image_size = self.image_size // 2
pixel_values = pixel_values[:, :, :image_size, :image_size]
result = model(pixel_values, interpolate_pos_encoding=True, training=False)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, labels = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class TFViTModelTest(TFModelTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_tf_common.py, as ViT does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
test_resize_embeddings = False
test_head_masking = False
test_onnx = False
def setUp(self):
self.model_tester = TFViTModelTester(self)
self.config_tester = ConfigTester(self, config_class=ViTConfig, has_text_modality=False, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_inputs_embeds(self):
# ViT does not use inputs_embeds
pass
def test_graph_mode_with_inputs_embeds(self):
# ViT does not use inputs_embeds
pass
def test_model_common_attributes(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, tf.keras.layers.Layer))
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
# overwrite from common since `encoder_seq_length` and `encoder_key_length` are calculated
# in a different way than in text models.
@tooslow
def test_saved_model_creation_extended(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
if hasattr(config, "use_cache"):
config.use_cache = True
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
image_size = to_2tuple(self.model_tester.image_size)
patch_size = to_2tuple(self.model_tester.patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
seq_len = num_patches + 1
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
for model_class in self.all_model_classes:
class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
num_out = len(model(class_inputs_dict))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, saved_model=True)
saved_model_dir = os.path.join(tmpdirname, "saved_model", "1")
model = tf.keras.models.load_model(saved_model_dir)
outputs = model(class_inputs_dict)
if self.is_encoder_decoder:
output_hidden_states = outputs["encoder_hidden_states"]
output_attentions = outputs["encoder_attentions"]
else:
output_hidden_states = outputs["hidden_states"]
output_attentions = outputs["attentions"]
self.assertEqual(len(outputs), num_out)
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(output_hidden_states), expected_num_layers)
self.assertListEqual(
list(output_hidden_states[0].shape[-2:]),
[seq_len, self.model_tester.hidden_size],
)
self.assertEqual(len(output_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(output_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
image_size = to_2tuple(self.model_tester.image_size)
patch_size = to_2tuple(self.model_tester.patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
seq_len = num_patches + 1
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False)
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False)
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False)
if hasattr(self.model_tester, "num_hidden_states_types"):
added_hidden_states = self.model_tester.num_hidden_states_types
elif self.is_encoder_decoder:
added_hidden_states = 2
else:
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
# ViT has a different seq_length
image_size = to_2tuple(self.model_tester.image_size)
patch_size = to_2tuple(self.model_tester.patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
seq_length = num_patches + 1
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_for_image_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
model = TFViTModel.from_pretrained("google/vit-base-patch16-224")
self.assertIsNotNone(model)
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_tf
@require_vision
class TFViTModelIntegrationTest(unittest.TestCase):
@cached_property
def default_feature_extractor(self):
return ViTFeatureExtractor.from_pretrained("google/vit-base-patch16-224") if is_vision_available() else None
@slow
def test_inference_image_classification_head(self):
model = TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224")
feature_extractor = self.default_feature_extractor
image = prepare_img()
inputs = feature_extractor(images=image, return_tensors="tf")
# forward pass
outputs = model(**inputs)
# verify the logits
expected_shape = tf.TensorShape((1, 1000))
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = tf.constant([-0.2744, 0.8215, -0.0836])
tf.debugging.assert_near(outputs.logits[0, :3], expected_slice, atol=1e-4)
| 16,592 | 41.43734 | 121 | py |
robust-transformers | robust-transformers-main/tests/generation/test_generation_flax_logits_process.py | # coding=utf-8
# Copyright 2021 The HuggingFace Team Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation_flax_logits_process import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class LogitsProcessorTest(unittest.TestCase):
def _get_uniform_logits(self, batch_size: int, length: int):
scores = np.ones((batch_size, length)) / length
return scores
def test_temperature_dist_warper(self):
input_ids = None
length = 20
scores = self._get_uniform_logits(batch_size=2, length=length)
# tweak scores to not be uniform anymore
scores[1, 5] = (1 / length) + 0.1 # peak, 1st batch
scores[1, 10] = (1 / length) - 0.4 # valley, 1st batch
# compute softmax
probs = jax.nn.softmax(scores, axis=-1)
temp_dist_warper_sharper = FlaxTemperatureLogitsWarper(temperature=0.5)
temp_dist_warper_smoother = FlaxTemperatureLogitsWarper(temperature=1.3)
warped_prob_sharp = jax.nn.softmax(temp_dist_warper_sharper(input_ids, scores.copy(), cur_len=None), axis=-1)
warped_prob_smooth = jax.nn.softmax(temp_dist_warper_smoother(input_ids, scores.copy(), cur_len=None), axis=-1)
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :], warped_prob_sharp[0, :], atol=1e-3))
self.assertTrue(jnp.allclose(probs[0, :], warped_prob_smooth[0, :], atol=1e-3))
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max(), warped_prob_sharp[1, :].max())
self.assertGreater(probs[1, :].min(), warped_prob_sharp[1, :].min())
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max(), warped_prob_smooth[1, :].max())
self.assertLess(probs[1, :].min(), warped_prob_smooth[1, :].min())
def test_top_k_dist_warper(self):
input_ids = None
vocab_size = 10
batch_size = 2
# create ramp distribution
ramp_logits = np.broadcast_to(np.arange(vocab_size)[None, :], (batch_size, vocab_size)).copy()
ramp_logits[1:, : vocab_size // 2] = ramp_logits[1:, : vocab_size // 2] + vocab_size
top_k_warp = FlaxTopKLogitsWarper(3)
scores = top_k_warp(input_ids, ramp_logits, cur_len=None)
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0]).tolist(), 7 * [True] + 3 * [False])
self.assertListEqual(jnp.isinf(scores[1]).tolist(), 2 * [True] + 3 * [False] + 5 * [True])
# check special case
length = 5
top_k_warp_safety_check = FlaxTopKLogitsWarper(top_k=1, filter_value=0.0, min_tokens_to_keep=3)
ramp_logits = np.broadcast_to(np.arange(length)[None, :], (batch_size, length)).copy()
scores = top_k_warp_safety_check(input_ids, ramp_logits, cur_len=None)
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1).tolist(), [2, 2])
def test_top_p_dist_warper(self):
input_ids = None
vocab_size = 10
batch_size = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
dist = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]]))
top_p_warp = FlaxTopPLogitsWarper(0.7)
filtered_dist = np.exp(top_p_warp(input_ids, dist, cur_len=None))
# dist should be filtered to keep min num values so that sum is >= 0.7
# exp (-inf) => 0
EXPECTED_FILTERED_DIST = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]])
self.assertTrue(np.allclose(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3))
# check edge cases with negative and extreme logits
ramp_logits = np.broadcast_to(np.arange(vocab_size)[None, :], (batch_size, vocab_size)).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
ramp_logits[1] = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
top_p_warp = FlaxTopPLogitsWarper(0.9, min_tokens_to_keep=2, filter_value=0.0)
filtered_dist = top_p_warp(input_ids, ramp_logits, cur_len=None)
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1).tolist(), [3, 2])
def test_min_length_dist_processor(self):
vocab_size = 20
batch_size = 4
eos_token_id = 0
min_dist_processor = FlaxMinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id)
# check that min length is applied at length 5
input_ids = ids_tensor((batch_size, 20), vocab_size=20)
cur_len = 5
scores = self._get_uniform_logits(batch_size, vocab_size)
scores_before_min_length = min_dist_processor(input_ids, scores, cur_len=cur_len)
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist(), 4 * [-float("inf")])
# check that min length is not applied anymore at length 15
scores = self._get_uniform_logits(batch_size, vocab_size)
cur_len = 15
scores_before_min_length = min_dist_processor(input_ids, scores, cur_len=cur_len)
self.assertFalse(jnp.isinf(scores_before_min_length).any())
def test_forced_bos_token_logits_processor(self):
vocab_size = 20
batch_size = 4
bos_token_id = 0
logits_processor = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=bos_token_id)
# check that all scores are -inf except the bos_token_id score
input_ids = ids_tensor((batch_size, 1), vocab_size=20)
cur_len = 1
scores = self._get_uniform_logits(batch_size, vocab_size)
scores = logits_processor(input_ids, scores, cur_len=cur_len)
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :]).all())
self.assertListEqual(scores[:, bos_token_id].tolist(), 4 * [0]) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
cur_len = 3
scores = self._get_uniform_logits(batch_size, vocab_size)
scores = logits_processor(input_ids, scores, cur_len=cur_len)
self.assertFalse(jnp.isinf(scores).any())
def test_forced_eos_token_logits_processor(self):
vocab_size = 20
batch_size = 4
eos_token_id = 0
max_length = 5
logits_processor = FlaxForcedEOSTokenLogitsProcessor(max_length=max_length, eos_token_id=eos_token_id)
# check that all scores are -inf except the eos_token_id when max_length is reached
input_ids = ids_tensor((batch_size, 4), vocab_size=20)
cur_len = 4
scores = self._get_uniform_logits(batch_size, vocab_size)
scores = logits_processor(input_ids, scores, cur_len=cur_len)
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :]).all())
self.assertListEqual(scores[:, eos_token_id].tolist(), 4 * [0]) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
cur_len = 3
scores = self._get_uniform_logits(batch_size, vocab_size)
scores = logits_processor(input_ids, scores, cur_len=cur_len)
self.assertFalse(jnp.isinf(scores).any())
def test_processor_list(self):
batch_size = 4
sequence_length = 10
vocab_size = 15
eos_token_id = 2
bos_token_id = 1
max_length = 15
# dummy input_ids and scores
input_ids = ids_tensor((batch_size, sequence_length), vocab_size)
input_ids_comp = input_ids.copy()
scores = self._get_uniform_logits(batch_size, vocab_size)
scores_comp = scores.copy()
# instantiate all dist processors
temp_dist_warp = FlaxTemperatureLogitsWarper(temperature=0.5)
top_k_warp = FlaxTopKLogitsWarper(3)
top_p_warp = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
min_dist_proc = FlaxMinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id)
bos_dist_proc = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=bos_token_id)
eos_dist_proc = FlaxForcedEOSTokenLogitsProcessor(max_length=max_length, eos_token_id=eos_token_id)
cur_len = 10
# no processor list
scores = temp_dist_warp(input_ids, scores, cur_len=cur_len)
scores = top_k_warp(input_ids, scores, cur_len=cur_len)
scores = top_p_warp(input_ids, scores, cur_len=cur_len)
scores = min_dist_proc(input_ids, scores, cur_len=cur_len)
scores = bos_dist_proc(input_ids, scores, cur_len=cur_len)
scores = eos_dist_proc(input_ids, scores, cur_len=cur_len)
# with processor list
processor = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc]
)
scores_comp = processor(input_ids, scores_comp, cur_len=cur_len)
# scores should be equal
self.assertTrue(jnp.allclose(scores, scores_comp, atol=1e-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist(), input_ids_comp.tolist())
def test_processor_list_jitted(self):
batch_size = 4
sequence_length = 10
vocab_size = 15
eos_token_id = 2
bos_token_id = 1
max_length = 15
# dummy input_ids and scores
input_ids = ids_tensor((batch_size, sequence_length), vocab_size)
input_ids_comp = input_ids.copy()
scores = self._get_uniform_logits(batch_size, vocab_size)
scores_comp = scores.copy()
# instantiate all dist processors
temp_dist_warp = FlaxTemperatureLogitsWarper(temperature=0.5)
top_k_warp = FlaxTopKLogitsWarper(3)
top_p_warp = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
min_dist_proc = FlaxMinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id)
bos_dist_proc = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=bos_token_id)
eos_dist_proc = FlaxForcedEOSTokenLogitsProcessor(max_length=max_length, eos_token_id=eos_token_id)
cur_len = 10
# no processor list
def run_no_processor_list(input_ids, scores, cur_len):
scores = temp_dist_warp(input_ids, scores, cur_len=cur_len)
scores = top_k_warp(input_ids, scores, cur_len=cur_len)
scores = top_p_warp(input_ids, scores, cur_len=cur_len)
scores = min_dist_proc(input_ids, scores, cur_len=cur_len)
scores = bos_dist_proc(input_ids, scores, cur_len=cur_len)
scores = eos_dist_proc(input_ids, scores, cur_len=cur_len)
return scores
# with processor list
def run_processor_list(input_ids, scores, cur_len):
processor = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc]
)
scores = processor(input_ids, scores, cur_len=cur_len)
return scores
jitted_run_no_processor_list = jax.jit(run_no_processor_list)
jitted_run_processor_list = jax.jit(run_processor_list)
scores = jitted_run_no_processor_list(input_ids, scores, cur_len)
scores_comp = jitted_run_processor_list(input_ids, scores_comp, cur_len)
# scores should be equal
self.assertTrue(jnp.allclose(scores, scores_comp, atol=1e-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist(), input_ids_comp.tolist())
| 12,690 | 41.023179 | 122 | py |
robust-transformers | robust-transformers-main/tests/generation/test_generation_stopping_criteria.py | # coding=utf-8
# Copyright 2020 The HuggingFace Team Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation_stopping_criteria import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class StoppingCriteriaTestCase(unittest.TestCase):
def _get_tensors(self, length):
batch_size = 3
vocab_size = 250
input_ids = ids_tensor((batch_size, length), vocab_size)
scores = torch.ones((batch_size, length), device=torch_device, dtype=torch.float) / length
return input_ids, scores
def test_list_criteria(self):
input_ids, scores = self._get_tensors(5)
criteria = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10),
MaxTimeCriteria(max_time=0.1),
]
)
self.assertFalse(criteria(input_ids, scores))
input_ids, scores = self._get_tensors(9)
self.assertFalse(criteria(input_ids, scores))
input_ids, scores = self._get_tensors(10)
self.assertTrue(criteria(input_ids, scores))
def test_max_length_criteria(self):
criteria = MaxLengthCriteria(max_length=10)
input_ids, scores = self._get_tensors(5)
self.assertFalse(criteria(input_ids, scores))
input_ids, scores = self._get_tensors(9)
self.assertFalse(criteria(input_ids, scores))
input_ids, scores = self._get_tensors(10)
self.assertTrue(criteria(input_ids, scores))
def test_max_new_tokens_criteria(self):
criteria = MaxNewTokensCriteria(start_length=5, max_new_tokens=5)
input_ids, scores = self._get_tensors(5)
self.assertFalse(criteria(input_ids, scores))
input_ids, scores = self._get_tensors(9)
self.assertFalse(criteria(input_ids, scores))
input_ids, scores = self._get_tensors(10)
self.assertTrue(criteria(input_ids, scores))
criteria_list = StoppingCriteriaList([criteria])
self.assertEqual(criteria_list.max_length, 10)
def test_max_time_criteria(self):
input_ids, scores = self._get_tensors(5)
criteria = MaxTimeCriteria(max_time=0.1)
self.assertFalse(criteria(input_ids, scores))
criteria = MaxTimeCriteria(max_time=0.1, initial_timestamp=time.time() - 0.2)
self.assertTrue(criteria(input_ids, scores))
def test_validate_stopping_criteria(self):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10)]), 10)
with self.assertWarns(UserWarning):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10)]), 11)
stopping_criteria = validate_stopping_criteria(StoppingCriteriaList(), 11)
self.assertEqual(len(stopping_criteria), 1)
| 3,613 | 31.854545 | 98 | py |
robust-transformers | robust-transformers-main/tests/generation/test_generation_utils.py | # coding=utf-8
# Copyright 2020 The HuggingFace Team Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ..test_modeling_common import floats_tensor, ids_tensor
if is_torch_available():
import torch
from transformers import (
AutoModelForSeq2SeqLM,
AutoTokenizer,
BartForConditionalGeneration,
BartTokenizer,
GPT2LMHeadModel,
GPT2Tokenizer,
ImageGPTForCausalImageModeling,
Speech2TextForConditionalGeneration,
SpeechEncoderDecoderModel,
VisionEncoderDecoderModel,
top_k_top_p_filtering,
)
from transformers.generation_beam_constraints import DisjunctiveConstraint, PhrasalConstraint
from transformers.generation_beam_search import BeamSearchScorer, ConstrainedBeamSearchScorer
from transformers.generation_logits_process import (
ForcedBOSTokenLogitsProcessor,
ForcedEOSTokenLogitsProcessor,
HammingDiversityLogitsProcessor,
InfNanRemoveLogitsProcessor,
LogitsProcessorList,
MinLengthLogitsProcessor,
NoBadWordsLogitsProcessor,
NoRepeatNGramLogitsProcessor,
RepetitionPenaltyLogitsProcessor,
TemperatureLogitsWarper,
TopKLogitsWarper,
TopPLogitsWarper,
)
from transformers.generation_stopping_criteria import MaxLengthCriteria, StoppingCriteria, StoppingCriteriaList
from transformers.generation_utils import (
BeamSampleDecoderOnlyOutput,
BeamSampleEncoderDecoderOutput,
BeamSearchDecoderOnlyOutput,
BeamSearchEncoderDecoderOutput,
GreedySearchDecoderOnlyOutput,
GreedySearchEncoderDecoderOutput,
SampleDecoderOnlyOutput,
SampleEncoderDecoderOutput,
)
class GenerationTesterMixin:
model_tester = None
all_generative_model_classes = ()
input_name = "input_ids"
def _get_input_ids_and_config(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
input_ids = inputs_dict[self.input_name]
attention_mask = torch.ones_like(input_ids, dtype=torch.long)
# cut to half length & take max batch_size 3
max_batch_size = 2
sequence_length = input_ids.shape[-1] // 2
input_ids = input_ids[:max_batch_size, :sequence_length]
attention_mask = attention_mask[:max_batch_size, :sequence_length]
# generate max 3 tokens
max_length = input_ids.shape[-1] + 3
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
config.pad_token_id = config.eos_token_id
return config, input_ids, attention_mask, max_length
@staticmethod
def _get_logits_processor_and_kwargs(
input_length,
eos_token_id,
forced_bos_token_id=None,
forced_eos_token_id=None,
max_length=None,
diversity_penalty=None,
):
process_kwargs = {
"min_length": input_length + 1,
"bad_words_ids": [[1, 0]],
"no_repeat_ngram_size": 2,
"repetition_penalty": 1.2,
}
logits_processor = LogitsProcessorList(
(
[
HammingDiversityLogitsProcessor(diversity_penalty, num_beams=2, num_beam_groups=2),
]
if diversity_penalty is not None
else []
)
+ (
[
MinLengthLogitsProcessor(process_kwargs["min_length"], eos_token_id),
]
if eos_token_id is not None
else []
)
+ (
[
ForcedBOSTokenLogitsProcessor(forced_bos_token_id),
]
if forced_bos_token_id is not None
else []
)
+ (
[ForcedEOSTokenLogitsProcessor(max_length, forced_eos_token_id)]
if forced_eos_token_id is not None
else []
)
+ [
NoBadWordsLogitsProcessor(process_kwargs["bad_words_ids"], eos_token_id),
NoRepeatNGramLogitsProcessor(process_kwargs["no_repeat_ngram_size"]),
RepetitionPenaltyLogitsProcessor(process_kwargs["repetition_penalty"]),
]
)
return process_kwargs, logits_processor
@staticmethod
def _get_warper_and_kwargs(num_beams):
warp_kwargs = {"top_k": 10, "top_p": 0.7, "temperature": 0.7}
logits_warper = LogitsProcessorList(
[
TemperatureLogitsWarper(warp_kwargs["temperature"]),
TopKLogitsWarper(top_k=warp_kwargs["top_k"], min_tokens_to_keep=(2 if num_beams > 1 else 1)),
TopPLogitsWarper(top_p=warp_kwargs["top_p"], min_tokens_to_keep=(2 if num_beams > 1 else 1)),
]
)
return warp_kwargs, logits_warper
@staticmethod
def _get_beam_scorer_and_kwargs(batch_size, max_length, num_return_sequences=1):
beam_kwargs = {
"early_stopping": False,
"length_penalty": 2.0,
"num_beams": 2,
"num_return_sequences": num_return_sequences,
}
beam_scorer = BeamSearchScorer(
batch_size=batch_size,
num_beams=beam_kwargs["num_beams"],
device=torch_device,
length_penalty=beam_kwargs["length_penalty"],
do_early_stopping=beam_kwargs["early_stopping"],
num_beam_hyps_to_keep=num_return_sequences,
)
return beam_kwargs, beam_scorer
@staticmethod
def _get_diverse_beam_scorer_and_kwargs(batch_size, max_length, num_return_sequences=1):
beam_kwargs = {
"early_stopping": False,
"length_penalty": 2.0,
"num_beams": 2,
"num_return_sequences": num_return_sequences,
"num_beam_groups": 2, # one beam per group
"diversity_penalty": 2.0,
}
beam_scorer = BeamSearchScorer(
batch_size=batch_size,
num_beams=beam_kwargs["num_beams"],
device=torch_device,
length_penalty=beam_kwargs["length_penalty"],
do_early_stopping=beam_kwargs["early_stopping"],
num_beam_hyps_to_keep=num_return_sequences,
num_beam_groups=beam_kwargs["num_beam_groups"],
)
return beam_kwargs, beam_scorer
@staticmethod
def _get_constrained_beam_scorer_and_kwargs(batch_size, max_length, constraints, num_return_sequences=1):
beam_kwargs = {
"early_stopping": False,
"length_penalty": 2.0,
"num_beams": num_return_sequences * 4,
"num_return_sequences": num_return_sequences,
}
beam_scorer = ConstrainedBeamSearchScorer(
batch_size=batch_size,
constraints=constraints,
num_beams=beam_kwargs["num_beams"],
device=torch_device,
length_penalty=beam_kwargs["length_penalty"],
do_early_stopping=beam_kwargs["early_stopping"],
num_beam_hyps_to_keep=num_return_sequences,
)
return beam_kwargs, beam_scorer
@staticmethod
def _get_encoder_outputs(
model, input_ids, attention_mask, output_attentions=None, output_hidden_states=None, num_interleave=1
):
encoder = model.get_encoder()
encoder_outputs = encoder(
input_ids,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.repeat_interleave(
num_interleave, dim=0
)
input_ids = torch.zeros_like(input_ids[:, :1]) + model._get_decoder_start_token_id()
attention_mask = None
return encoder_outputs, input_ids, attention_mask
def _greedy_generate(
self,
model,
input_ids,
attention_mask,
max_length,
output_scores=False,
output_attentions=False,
output_hidden_states=False,
return_dict_in_generate=False,
):
if model.config.is_encoder_decoder:
max_length = 4
logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
eos_token_id=model.config.eos_token_id,
forced_bos_token_id=model.config.forced_bos_token_id,
forced_eos_token_id=model.config.forced_eos_token_id,
max_length=max_length,
)
kwargs = {}
output_generate = model.generate(
input_ids,
attention_mask=attention_mask,
do_sample=False,
num_beams=1,
max_length=max_length,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
output_scores=output_scores,
return_dict_in_generate=return_dict_in_generate,
remove_invalid_values=True,
**logits_process_kwargs,
)
if model.config.is_encoder_decoder:
encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs(
model,
input_ids,
attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
kwargs["encoder_outputs"] = encoder_outputs
with torch.no_grad():
output_greedy = model.greedy_search(
input_ids,
max_length=max_length,
attention_mask=attention_mask,
logits_processor=logits_processor,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
output_scores=output_scores,
return_dict_in_generate=return_dict_in_generate,
**kwargs,
)
return output_greedy, output_generate
def _sample_generate(
self,
model,
input_ids,
attention_mask,
max_length,
num_return_sequences,
logits_processor,
logits_warper,
logits_warper_kwargs,
process_kwargs,
output_scores=False,
output_attentions=False,
output_hidden_states=False,
return_dict_in_generate=False,
):
torch.manual_seed(0)
output_generate = model.generate(
input_ids,
do_sample=True,
num_beams=1,
max_length=max_length,
num_return_sequences=num_return_sequences,
attention_mask=attention_mask,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
remove_invalid_values=True,
**logits_warper_kwargs,
**process_kwargs,
)
torch.manual_seed(0)
kwargs = {}
if model.config.is_encoder_decoder:
encoder_outputs, input_ids_clone, attention_mask_clone = self._get_encoder_outputs(
model,
input_ids,
attention_mask,
num_interleave=num_return_sequences,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
kwargs["encoder_outputs"] = encoder_outputs
input_ids_clone = input_ids_clone.repeat_interleave(num_return_sequences, dim=0)
else:
attention_mask_clone = attention_mask.repeat_interleave(num_return_sequences, dim=0)
input_ids_clone = input_ids.repeat_interleave(num_return_sequences, dim=0)
# prevent flaky generation test failures
logits_processor.append(InfNanRemoveLogitsProcessor())
with torch.no_grad():
output_sample = model.sample(
input_ids_clone,
attention_mask=attention_mask_clone,
max_length=max_length,
logits_processor=logits_processor,
logits_warper=logits_warper,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
**kwargs,
)
return output_sample, output_generate
def _beam_search_generate(
self,
model,
input_ids,
attention_mask,
max_length,
beam_scorer,
beam_kwargs,
logits_processor,
logits_process_kwargs,
output_scores=False,
output_attentions=False,
output_hidden_states=False,
return_dict_in_generate=False,
):
output_generate = model.generate(
input_ids,
attention_mask=attention_mask,
do_sample=False,
max_length=max_length,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
remove_invalid_values=True,
**beam_kwargs,
**logits_process_kwargs,
)
# beam_search does not automatically interleave `batch_size` dim for `num_beams`
kwargs = {}
if model.config.is_encoder_decoder:
encoder_outputs, input_ids_clone, attention_mask_clone = self._get_encoder_outputs(
model,
input_ids,
attention_mask,
num_interleave=beam_scorer.num_beams,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
kwargs["encoder_outputs"] = encoder_outputs
input_ids_clone = input_ids_clone.repeat_interleave(beam_scorer.num_beams, dim=0)
else:
attention_mask_clone = attention_mask.repeat_interleave(beam_scorer.num_beams, dim=0)
input_ids_clone = input_ids.repeat_interleave(beam_scorer.num_beams, dim=0)
with torch.no_grad():
output_beam_search = model.beam_search(
input_ids_clone,
beam_scorer,
max_length=max_length,
attention_mask=attention_mask_clone,
logits_processor=logits_processor,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
**kwargs,
)
return output_generate, output_beam_search
def _beam_sample_generate(
self,
model,
input_ids,
attention_mask,
max_length,
num_return_sequences,
beam_scorer,
beam_kwargs,
logits_warper,
logits_warper_kwargs,
output_scores=False,
output_attentions=False,
output_hidden_states=False,
return_dict_in_generate=False,
):
torch.manual_seed(0)
output_generate = model.generate(
input_ids,
attention_mask=attention_mask,
do_sample=True,
max_length=max_length,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
remove_invalid_values=True,
**beam_kwargs,
**logits_warper_kwargs,
)
# beam_search does not automatically interleave `batch_size` dim for `num_beams * num_return_sequences`
kwargs = {}
if model.config.is_encoder_decoder:
encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs(
model,
input_ids,
attention_mask,
num_interleave=beam_scorer.num_beams * num_return_sequences,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
kwargs["encoder_outputs"] = encoder_outputs
else:
attention_mask = attention_mask.repeat_interleave(beam_scorer.num_beams * num_return_sequences, dim=0)
# prevent flaky generation test failures
logits_processor = LogitsProcessorList()
logits_processor.append(InfNanRemoveLogitsProcessor())
torch.manual_seed(0)
with torch.no_grad():
output_beam_sample = model.beam_sample(
input_ids.repeat_interleave(beam_scorer.num_beams * num_return_sequences, dim=0),
beam_scorer,
max_length=max_length,
attention_mask=attention_mask,
logits_warper=logits_warper,
logits_processor=logits_processor,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
**kwargs,
)
return output_generate, output_beam_sample
def _group_beam_search_generate(
self,
model,
input_ids,
attention_mask,
max_length,
beam_scorer,
beam_kwargs,
logits_processor,
logits_process_kwargs,
output_scores=False,
output_attentions=False,
output_hidden_states=False,
return_dict_in_generate=False,
):
output_generate = model.generate(
input_ids,
attention_mask=attention_mask,
do_sample=False,
max_length=max_length,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
remove_invalid_values=True,
**beam_kwargs,
**logits_process_kwargs,
)
# group_beam_search does not automatically interleave `batch_size` dim for `num_beams`
kwargs = {}
if model.config.is_encoder_decoder:
encoder_outputs, input_ids_clone, attention_mask_clone = self._get_encoder_outputs(
model,
input_ids,
attention_mask,
num_interleave=beam_scorer.num_beams,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
kwargs["encoder_outputs"] = encoder_outputs
input_ids_clone = input_ids_clone.repeat_interleave(beam_scorer.num_beams, dim=0)
else:
attention_mask_clone = attention_mask.repeat_interleave(beam_scorer.num_beams, dim=0)
input_ids_clone = input_ids.repeat_interleave(beam_scorer.num_beams, dim=0)
with torch.no_grad():
output_group_beam_search = model.group_beam_search(
input_ids_clone,
beam_scorer,
max_length=max_length,
attention_mask=attention_mask_clone,
logits_processor=logits_processor,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
**kwargs,
)
return output_generate, output_group_beam_search
def _constrained_beam_search_generate(
self,
model,
input_ids,
attention_mask,
max_length,
constrained_beam_scorer,
constraints,
beam_kwargs,
logits_processor,
logits_process_kwargs,
output_scores=False,
output_attentions=False,
output_hidden_states=False,
return_dict_in_generate=False,
):
output_generate = model.generate(
input_ids,
attention_mask=attention_mask,
do_sample=False,
max_length=max_length,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
remove_invalid_values=True,
constraints=constraints,
**beam_kwargs,
**logits_process_kwargs,
)
# group_beam_search does not automatically interleave `batch_size` dim for `num_beams`
kwargs = {}
if model.config.is_encoder_decoder:
encoder_outputs, input_ids_clone, attention_mask_clone = self._get_encoder_outputs(
model,
input_ids,
attention_mask,
num_interleave=constrained_beam_scorer.num_beams,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
kwargs["encoder_outputs"] = encoder_outputs
input_ids_clone = input_ids_clone.repeat_interleave(constrained_beam_scorer.num_beams, dim=0)
else:
attention_mask_clone = attention_mask.repeat_interleave(constrained_beam_scorer.num_beams, dim=0)
input_ids_clone = input_ids.repeat_interleave(constrained_beam_scorer.num_beams, dim=0)
with torch.no_grad():
output_group_beam_search = model.constrained_beam_search(
input_ids_clone,
constrained_beam_scorer,
max_length=max_length,
attention_mask=attention_mask_clone,
logits_processor=logits_processor,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
**kwargs,
)
return output_generate, output_group_beam_search
def test_greedy_generate(self):
# check `generate()` and `greedy_search()` are equal
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# test old generation output for backwards compatibility
model = model_class(config).to(torch_device).eval()
output_greedy, output_generate = self._greedy_generate(
model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length
)
self.assertListEqual(output_greedy.tolist(), output_generate.tolist())
def test_greedy_generate_dict_outputs(self):
for model_class in self.all_generative_model_classes:
# disable cache
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
config.use_cache = False
model = model_class(config).to(torch_device).eval()
output_greedy, output_generate = self._greedy_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
if model.config.is_encoder_decoder:
self.assertIsInstance(output_greedy, GreedySearchEncoderDecoderOutput)
self.assertIsInstance(output_generate, GreedySearchEncoderDecoderOutput)
else:
self.assertIsInstance(output_greedy, GreedySearchDecoderOnlyOutput)
self.assertIsInstance(output_generate, GreedySearchDecoderOnlyOutput)
self.assertListEqual(output_generate.sequences.tolist(), output_greedy.sequences.tolist())
for output in (output_greedy, output_generate):
self._check_outputs(output, input_ids, model.config)
def test_greedy_generate_dict_outputs_use_cache(self):
for model_class in self.all_generative_model_classes:
# enable cache
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
if not hasattr(config, "use_cache"):
# only relevant if model has "use_cache"
return
config.use_cache = True
config.is_decoder = True
model = model_class(config).to(torch_device).eval()
output_greedy, output_generate = self._greedy_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
self.assertListEqual(output_generate.sequences.tolist(), output_greedy.sequences.tolist())
for output in (output_greedy, output_generate):
self._check_outputs(output, input_ids, model.config, use_cache=True)
def test_sample_generate(self):
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
model = model_class(config).to(torch_device).eval()
if model.config.is_encoder_decoder:
max_length = 4
process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
model.config.eos_token_id,
forced_bos_token_id=model.config.forced_bos_token_id,
forced_eos_token_id=model.config.forced_eos_token_id,
max_length=max_length,
)
logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1)
# check `generate()` and `sample()` are equal
output_sample, output_generate = self._sample_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
num_return_sequences=1,
logits_processor=logits_processor,
logits_warper=logits_warper,
logits_warper_kwargs=logits_warper_kwargs,
process_kwargs=process_kwargs,
)
self.assertListEqual(output_sample.tolist(), output_generate.tolist())
# check `generate()` and `sample()` yield equal results for `num_return_sequences`
output_sample, output_generate = self._sample_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
num_return_sequences=3,
logits_processor=logits_processor,
logits_warper=logits_warper,
logits_warper_kwargs=logits_warper_kwargs,
process_kwargs=process_kwargs,
)
self.assertListEqual(output_sample.tolist(), output_generate.tolist())
def test_sample_generate_dict_output(self):
for model_class in self.all_generative_model_classes:
# disable cache
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
config.use_cache = False
model = model_class(config).to(torch_device).eval()
if model.config.is_encoder_decoder:
max_length = 4
process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
model.config.eos_token_id,
forced_bos_token_id=model.config.forced_bos_token_id,
forced_eos_token_id=model.config.forced_eos_token_id,
max_length=max_length,
)
logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1)
output_sample, output_generate = self._sample_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
num_return_sequences=2,
logits_processor=logits_processor,
logits_warper=logits_warper,
logits_warper_kwargs=logits_warper_kwargs,
process_kwargs=process_kwargs,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
if model.config.is_encoder_decoder:
self.assertIsInstance(output_sample, SampleEncoderDecoderOutput)
self.assertIsInstance(output_generate, SampleEncoderDecoderOutput)
else:
self.assertIsInstance(output_sample, SampleDecoderOnlyOutput)
self.assertIsInstance(output_generate, SampleDecoderOnlyOutput)
self.assertListEqual(output_generate.sequences.tolist(), output_sample.sequences.tolist())
for output in (output_sample, output_generate):
self._check_outputs(output, input_ids, model.config, num_return_sequences=2)
def test_beam_search_generate(self):
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# It is important set set the eos_token_id to None to ensure that no sequences
# shorter than `max_length` can be generated which could lead to flaky circle ci
# failures if the top `num_return_sequences` beams are all shorter than the longest beam
config.eos_token_id = None
config.forced_eos_token_id = None
model = model_class(config).to(torch_device).eval()
if model.config.is_encoder_decoder:
max_length = 4
logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
config.eos_token_id,
config.forced_bos_token_id,
config.forced_eos_token_id,
max_length,
)
beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length)
# check `generate()` and `beam_search()` are equal
output_generate, output_beam_search = self._beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_process_kwargs=logits_process_kwargs,
logits_processor=logits_processor,
)
self.assertListEqual(output_generate.tolist(), output_beam_search.tolist())
# check `generate()` and `beam_search()` are equal for `num_return_sequences`
num_return_sequences = 2
if model.config.is_encoder_decoder:
max_length = 4
beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(
input_ids.shape[0], max_length, num_return_sequences=num_return_sequences
)
output_generate, output_beam_search = self._beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_process_kwargs=logits_process_kwargs,
logits_processor=logits_processor,
)
self.assertListEqual(output_generate.tolist(), output_beam_search.tolist())
def test_beam_search_generate_dict_output(self):
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# disable cache
config.use_cache = False
# It is important set set the eos_token_id to None to ensure that no sequences
# shorter than `max_length` can be generated which could lead to flaky circle ci
# failures if the top `num_return_sequences` beams are all shorter than the longest beam
config.eos_token_id = None
config.forced_eos_token_id = None
model = model_class(config).to(torch_device).eval()
if model.config.is_encoder_decoder:
max_length = 4
logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
config.eos_token_id,
config.forced_bos_token_id,
config.forced_eos_token_id,
max_length,
)
beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length)
output_generate, output_beam_search = self._beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_process_kwargs=logits_process_kwargs,
logits_processor=logits_processor,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
if model.config.is_encoder_decoder:
self.assertIsInstance(output_beam_search, BeamSearchEncoderDecoderOutput)
self.assertIsInstance(output_generate, BeamSearchEncoderDecoderOutput)
else:
self.assertIsInstance(output_beam_search, BeamSearchDecoderOnlyOutput)
self.assertIsInstance(output_generate, BeamSearchDecoderOnlyOutput)
self.assertListEqual(output_generate.sequences.tolist(), output_beam_search.sequences.tolist())
self.assertTrue(
torch.allclose(output_generate["sequences_scores"], output_beam_search["sequences_scores"], atol=1e-3)
)
self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],))
self.assertTrue((output_generate["sequences_scores"] < 0).all().item())
for output in (output_beam_search, output_generate):
self._check_outputs(output, input_ids, model.config, num_return_sequences=beam_scorer.num_beams)
def test_beam_search_generate_dict_outputs_use_cache(self):
for model_class in self.all_generative_model_classes:
# enable cache
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# It is important set set the eos_token_id to None to ensure that no sequences
# shorter than `max_length` can be generated which could lead to flaky circle ci
# failures if the top `num_return_sequences` beams are all shorter than the longest beam
config.eos_token_id = None
config.forced_eos_token_id = None
if not hasattr(config, "use_cache"):
# only relevant if model has "use_cache"
return
model = model_class(config).to(torch_device).eval()
if model.config.is_encoder_decoder:
max_length = 4
logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
config.eos_token_id,
config.forced_bos_token_id,
config.forced_eos_token_id,
max_length,
)
beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length)
config.use_cache = True
config.is_decoder = True
model = model_class(config).to(torch_device).eval()
output_beam, output_generate = self._beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_process_kwargs=logits_process_kwargs,
logits_processor=logits_processor,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
self.assertListEqual(output_generate.sequences.tolist(), output_beam.sequences.tolist())
for output in (output_beam, output_generate):
self._check_outputs(
output, input_ids, model.config, use_cache=True, num_return_sequences=beam_scorer.num_beams
)
def test_beam_sample_generate(self):
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# It is important set set the eos_token_id to None to ensure that no sequences
# shorter than `max_length` can be generated which could lead to flaky circle ci
# failures if the top `num_return_sequences` beams are all shorter than the longest beam
config.eos_token_id = None
config.forced_eos_token_id = None
logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1)
model = model_class(config).to(torch_device).eval()
# check `generate()` and `beam_search()` are equal
# change `num_return_sequences = 2` but not for `beam_scorer`
num_return_sequences = 2
if model.config.is_encoder_decoder:
max_length = 4
beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(
input_ids.shape[0] * num_return_sequences, max_length
)
beam_kwargs["num_return_sequences"] = num_return_sequences
output_generate, output_beam_sample = self._beam_sample_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
num_return_sequences=num_return_sequences,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_warper=logits_warper,
logits_warper_kwargs=logits_warper_kwargs,
)
self.assertListEqual(output_generate.tolist(), output_beam_sample.tolist())
def test_beam_sample_generate_dict_output(self):
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# disable cache
config.use_cache = False
# It is important set set the eos_token_id to None to ensure that no sequences
# shorter than `max_length` can be generated which could lead to flaky circle ci
# failures if the top `num_return_sequences` beams are all shorter than the longest beam
config.eos_token_id = None
config.forced_eos_token_id = None
model = model_class(config).to(torch_device).eval()
logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1)
num_return_sequences = 2
if model.config.is_encoder_decoder:
max_length = 4
beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(
input_ids.shape[0] * num_return_sequences, max_length
)
beam_kwargs["num_return_sequences"] = num_return_sequences
output_beam_sample, output_generate = self._beam_sample_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
num_return_sequences=num_return_sequences,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_warper=logits_warper,
logits_warper_kwargs=logits_warper_kwargs,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
if model.config.is_encoder_decoder:
self.assertIsInstance(output_beam_sample, BeamSampleEncoderDecoderOutput)
self.assertIsInstance(output_generate, BeamSampleEncoderDecoderOutput)
else:
self.assertIsInstance(output_beam_sample, BeamSampleDecoderOnlyOutput)
self.assertIsInstance(output_generate, BeamSampleDecoderOnlyOutput)
self.assertListEqual(output_generate.sequences.tolist(), output_beam_sample.sequences.tolist())
self.assertTrue(
torch.allclose(output_generate["sequences_scores"], output_beam_sample["sequences_scores"], atol=1e-3)
)
self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],))
self.assertTrue((output_generate["sequences_scores"] < 0).all().item())
for output in (output_beam_sample, output_generate):
self._check_outputs(
output, input_ids, model.config, num_return_sequences=num_return_sequences * beam_scorer.num_beams
)
def test_generate_without_input_ids(self):
config, _, _, max_length = self._get_input_ids_and_config()
# if no bos token id => cannot generate from None
if config.bos_token_id is None:
return
for model_class in self.all_generative_model_classes:
model = model_class(config).to(torch_device)
model.eval()
output_ids_generate = model.generate(
do_sample=False,
max_length=max_length,
remove_invalid_values=True,
)
self.assertIsNotNone(output_ids_generate)
def test_group_beam_search_generate(self):
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# It is important set set the eos_token_id to None to ensure that no sequences
# shorter than `max_length` can be generated which could lead to flaky circle ci
# failures if the top `num_return_sequences` beams are all shorter than the longest beam
config.eos_token_id = None
config.forced_eos_token_id = None
model = model_class(config).to(torch_device).eval()
if model.config.is_encoder_decoder:
max_length = 4
logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
config.eos_token_id,
config.forced_bos_token_id,
config.forced_eos_token_id,
max_length,
diversity_penalty=2.0,
)
# check `generate()` and `group_beam_search()` are equal
beam_kwargs, beam_scorer = self._get_diverse_beam_scorer_and_kwargs(input_ids.shape[0], max_length)
output_generate, output_group_beam_search = self._group_beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_processor=logits_processor,
logits_process_kwargs=logits_process_kwargs,
)
self.assertListEqual(output_generate.tolist(), output_group_beam_search.tolist())
# check `generate()` and `group_beam_search()` are equal for `num_return_sequences`
num_return_sequences = 2
if model.config.is_encoder_decoder:
max_length = 4
beam_kwargs, beam_scorer = self._get_diverse_beam_scorer_and_kwargs(
input_ids.shape[0], max_length, num_return_sequences=num_return_sequences
)
output_generate, output_group_beam_search = self._group_beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_processor=logits_processor,
logits_process_kwargs=logits_process_kwargs,
)
self.assertListEqual(output_generate.tolist(), output_group_beam_search.tolist())
def test_group_beam_search_generate_dict_output(self):
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
config.use_cache = False
# It is important set set the eos_token_id to None to ensure that no sequences
# shorter than `max_length` can be generated which could lead to flaky circle ci
# failures if the top `num_return_sequences` beams are all shorter than the longest beam
config.eos_token_id = None
config.forced_eos_token_id = None
model = model_class(config).to(torch_device).eval()
if model.config.is_encoder_decoder:
max_length = 4
logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
config.eos_token_id,
config.forced_bos_token_id,
config.forced_eos_token_id,
max_length,
diversity_penalty=2.0,
)
num_return_sequences = 1
beam_kwargs, beam_scorer = self._get_diverse_beam_scorer_and_kwargs(
input_ids.shape[0], max_length, num_return_sequences=num_return_sequences
)
output_generate, output_group_beam_search = self._group_beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_processor=logits_processor,
logits_process_kwargs=logits_process_kwargs,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
if model.config.is_encoder_decoder:
self.assertIsInstance(output_group_beam_search, BeamSearchEncoderDecoderOutput)
self.assertIsInstance(output_generate, BeamSearchEncoderDecoderOutput)
else:
self.assertIsInstance(output_group_beam_search, BeamSearchDecoderOnlyOutput)
self.assertIsInstance(output_generate, BeamSearchDecoderOnlyOutput)
self.assertListEqual(output_generate.sequences.tolist(), output_group_beam_search.sequences.tolist())
self.assertTrue(
torch.allclose(
output_generate["sequences_scores"], output_group_beam_search["sequences_scores"], atol=1e-3
)
)
self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],))
self.assertTrue((output_generate["sequences_scores"] < 0).all().item())
for output in (output_group_beam_search, output_generate):
self._check_outputs(
output, input_ids, model.config, num_return_sequences=num_return_sequences * beam_scorer.num_beams
)
def test_constrained_beam_search_generate(self):
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# It is important set set the eos_token_id to None to ensure that no sequences
# shorter than `max_length` can be generated which could lead to flaky circle ci
# failures if the top `num_return_sequences` beams are all shorter than the longest beam
config.eos_token_id = None
config.forced_eos_token_id = None
model = model_class(config).to(torch_device).eval()
max_length = 20
logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
config.eos_token_id,
config.forced_bos_token_id,
config.forced_eos_token_id,
max_length,
)
# check `generate()` and `constrained_beam_search()` are equal
# Sample constraints
if not input_ids.dtype == torch.float32:
min_id = torch.min(input_ids) + 3
max_id = torch.max(input_ids)
else:
# otherwise this throws an error for Speech2TextModel since its inputs are floating points
min_id = 3
max_id = 100
force_tokens = torch.randint(min_id, max_id, (1, 2)).tolist()[0]
constraints = [
PhrasalConstraint(force_tokens),
]
beam_kwargs, beam_scorer = self._get_constrained_beam_scorer_and_kwargs(
input_ids.shape[0], max_length, constraints, num_return_sequences=1
)
output_generate, output_beam_search = self._constrained_beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
constrained_beam_scorer=beam_scorer,
constraints=constraints,
beam_kwargs=beam_kwargs,
logits_processor=logits_processor,
logits_process_kwargs=logits_process_kwargs,
)
self.assertListEqual(output_generate.tolist(), output_beam_search.tolist())
for generation_output in output_generate:
self._check_sequence_inside_sequence(force_tokens, generation_output)
# check `generate()` and `constrained_beam_search()` are equal for `num_return_sequences`
# Sample constraints
force_tokens = torch.randint(min_id, max_id, (1, 2)).tolist()[0]
constraints = [
PhrasalConstraint(force_tokens),
]
num_return_sequences = 2
max_length = 20
beam_kwargs, beam_scorer = self._get_constrained_beam_scorer_and_kwargs(
input_ids.shape[0], max_length, constraints, num_return_sequences=num_return_sequences
)
output_generate, output_beam_search = self._constrained_beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
constrained_beam_scorer=beam_scorer,
constraints=constraints,
beam_kwargs=beam_kwargs,
logits_processor=logits_processor,
logits_process_kwargs=logits_process_kwargs,
)
self.assertListEqual(output_generate.tolist(), output_beam_search.tolist())
for generation_output in output_generate:
self._check_sequence_inside_sequence(force_tokens, generation_output)
def test_constrained_beam_search_generate_dict_output(self):
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# disable cache
config.use_cache = False
# It is important set set the eos_token_id to None to ensure that no sequences
# shorter than `max_length` can be generated which could lead to flaky circle ci
# failures if the top `num_return_sequences` beams are all shorter than the longest beam
config.eos_token_id = None
config.forced_eos_token_id = None
model = model_class(config).to(torch_device).eval()
if model.config.is_encoder_decoder:
max_length = 20
logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
config.eos_token_id,
config.forced_bos_token_id,
config.forced_eos_token_id,
max_length,
)
# Sample constraints
if not input_ids.dtype == torch.float32:
min_id = torch.min(input_ids) + 3
max_id = torch.max(input_ids)
else:
# otherwise this throws an error for Speech2TextModel since its inputs are floating points
min_id = 3
max_id = 100
force_tokens = torch.randint(min_id, max_id, (1, 2)).tolist()[0]
constraints = [
PhrasalConstraint(force_tokens),
]
beam_kwargs, beam_scorer = self._get_constrained_beam_scorer_and_kwargs(
input_ids.shape[0], max_length, constraints, num_return_sequences=1
)
output_generate, output_beam_search = self._constrained_beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
constrained_beam_scorer=beam_scorer,
constraints=constraints,
beam_kwargs=beam_kwargs,
logits_processor=logits_processor,
logits_process_kwargs=logits_process_kwargs,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
if model.config.is_encoder_decoder:
self.assertIsInstance(output_beam_search, BeamSearchEncoderDecoderOutput)
self.assertIsInstance(output_generate, BeamSearchEncoderDecoderOutput)
else:
self.assertIsInstance(output_beam_search, BeamSearchDecoderOnlyOutput)
self.assertIsInstance(output_generate, BeamSearchDecoderOnlyOutput)
self.assertListEqual(output_generate.sequences.tolist(), output_beam_search.sequences.tolist())
self.assertTrue(
torch.allclose(output_generate["sequences_scores"], output_beam_search["sequences_scores"], atol=1e-3)
)
self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],))
self.assertTrue((output_generate["sequences_scores"] < 0).all().item())
for output in (output_beam_search, output_generate):
self._check_outputs(output, input_ids, model.config, num_return_sequences=beam_scorer.num_beams)
def test_generate_with_head_masking(self):
"""Test designed for encoder-decoder models to ensure the attention head masking is used."""
attention_names = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
model = model_class(config).to(torch_device)
# We want to test only encoder-decoder models
if not config.is_encoder_decoder:
continue
head_masking = {
"head_mask": torch.zeros(config.encoder_layers, config.encoder_attention_heads, device=torch_device),
"decoder_head_mask": torch.zeros(
config.decoder_layers, config.decoder_attention_heads, device=torch_device
),
"cross_attn_head_mask": torch.zeros(
config.decoder_layers, config.decoder_attention_heads, device=torch_device
),
}
signature = inspect.signature(model.forward)
# We want to test only models where encoder/decoder head masking is implemented
if not set(head_masking.keys()) < set([*signature.parameters.keys()]):
continue
for attn_name, (name, mask) in zip(attention_names, head_masking.items()):
out = model.generate(
input_ids,
attention_mask=attention_mask,
num_beams=1,
output_attentions=True,
return_dict_in_generate=True,
remove_invalid_values=True,
**{name: mask},
)
# We check the state of decoder_attentions and cross_attentions just from the last step
attn_weights = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights]), 0.0)
def _check_outputs(self, output, input_ids, config, use_cache=False, num_return_sequences=1):
batch_size, seq_length = input_ids.shape
num_sequences_in_output = batch_size * num_return_sequences
gen_len = (
output.sequences.shape[-1] - 1 if config.is_encoder_decoder else output.sequences.shape[-1] - seq_length
)
# scores
self._check_scores(num_sequences_in_output, output.scores, length=gen_len, config=config)
# Attentions
if config.is_encoder_decoder:
# encoder
self._check_encoder_attention_for_generate(output.encoder_attentions, batch_size, config, seq_length)
# decoder
self._check_attentions_for_generate(
num_sequences_in_output,
output.decoder_attentions,
min_length=1,
max_length=output.sequences.shape[-1],
config=config,
use_cache=use_cache,
)
else:
# if use_cache first input is equal to no use_cache, so skip here
attentions = output.attentions if not use_cache else output.attentions[1:]
min_length = seq_length if not use_cache else seq_length + 1
self._check_attentions_for_generate(
num_sequences_in_output,
attentions=attentions,
min_length=min_length,
max_length=output.sequences.shape[-1],
config=config,
use_cache=use_cache,
)
# Hidden States
if config.is_encoder_decoder:
# encoder
self._check_encoder_hidden_states_for_generate(
output.encoder_hidden_states, batch_size, config, seq_length
)
# decoder
self._check_hidden_states_for_generate(
num_sequences_in_output,
output.decoder_hidden_states,
min_length=1,
max_length=output.sequences.shape[-1],
config=config,
use_cache=use_cache,
)
else:
# if use_cache first input is equal to no use_cache, so skip here
hidden_states = output.hidden_states if not use_cache else output.hidden_states[1:]
min_length = seq_length if not use_cache else seq_length + 1
self._check_hidden_states_for_generate(
num_sequences_in_output,
hidden_states,
min_length=min_length,
max_length=output.sequences.shape[-1],
config=config,
use_cache=use_cache,
)
def _check_scores(self, batch_size, scores, length, config):
expected_shape = (batch_size, config.vocab_size)
self.assertIsInstance(scores, tuple)
self.assertEqual(len(scores), length)
self.assertListEqual([iter_scores.shape for iter_scores in scores], [expected_shape] * len(scores))
def _check_attentions_for_generate(
self, batch_size, attentions, min_length, max_length, config, use_cache=False, num_beam_groups=1
):
self.assertIsInstance(attentions, tuple)
self.assertListEqual(
[isinstance(iter_attentions, tuple) for iter_attentions in attentions], [True] * len(attentions)
)
self.assertEqual(len(attentions), (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(attentions):
tgt_len = min_length + idx if not use_cache else 1
src_len = min_length + idx
expected_shape = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(iter_attentions)
)
def _check_encoder_attention_for_generate(self, attentions, batch_size, config, seq_length):
encoder_expected_shape = (batch_size, config.num_attention_heads, seq_length, seq_length)
self.assertIsInstance(attentions, tuple)
self.assertListEqual(
[layer_attentions.shape for layer_attentions in attentions],
[encoder_expected_shape] * len(attentions),
)
def _check_hidden_states_for_generate(
self, batch_size, hidden_states, min_length, max_length, config, use_cache=False, num_beam_groups=1
):
self.assertIsInstance(hidden_states, tuple)
self.assertListEqual(
[isinstance(iter_hidden_states, tuple) for iter_hidden_states in hidden_states],
[True] * len(hidden_states),
)
self.assertEqual(len(hidden_states), (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(hidden_states):
seq_len = min_length + idx if not use_cache else 1
expected_shape = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states],
[expected_shape] * len(iter_hidden_states),
)
def _check_encoder_hidden_states_for_generate(self, hidden_states, batch_size, config, seq_length):
encoder_expected_shape = (batch_size, seq_length, config.hidden_size)
self.assertIsInstance(hidden_states, tuple)
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in hidden_states],
[encoder_expected_shape] * len(hidden_states),
)
def _check_sequence_inside_sequence(self, tensor_1, tensor_2):
# check if tensor_1 inside tensor_2 or tensor_2 inside tensor_1.
# set to same device. we don't care what device.
if not isinstance(tensor_1, list):
tensor_1 = tensor_1.cpu().tolist()
if not isinstance(tensor_2, list):
tensor_2 = tensor_2.cpu().tolist()
in_order = len(tensor_1) <= len(tensor_2)
longer = tensor_2 if in_order else tensor_1
shorter = tensor_1 if in_order else tensor_2
flag = False
chunk_size = len(shorter)
for chunk_idx in range(len(longer) - chunk_size + 1):
subseq = longer[chunk_idx : chunk_idx + chunk_size]
if subseq == shorter:
flag = True
break
self.assertTrue(flag)
@require_torch
class UtilsFunctionsTest(unittest.TestCase):
# tests whether the top_k_top_p function behaves as expected
def test_top_k_top_p_filtering(self):
logits = torch.tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276,
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 4 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958,
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 4 highest values <= 0.6
],
dtype=torch.float,
device=torch_device,
)
non_inf_expected_idx = torch.tensor(
[[0, 0], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 20], [1, 27]],
dtype=torch.long,
device=torch_device,
) # expected non filtered idx as noted above
non_inf_expected_output = torch.tensor(
[
8.2221,
8.4321,
7.4402,
9.3845,
6.2712,
8.8275,
7.3858,
9.6770,
], # expected non filtered values as noted above
dtype=torch.float,
device=torch_device,
)
output = top_k_top_p_filtering(logits, top_k=10, top_p=0.6, min_tokens_to_keep=4)
non_inf_output = output[output != -float("inf")].to(device=torch_device)
non_inf_idx = (output != -float("inf")).nonzero().to(device=torch_device)
self.assertTrue(torch.allclose(non_inf_expected_output, non_inf_output, atol=1e-12))
self.assertTrue(torch.all(torch.eq(non_inf_expected_idx, non_inf_idx)))
@require_torch
class GenerationIntegrationTests(unittest.TestCase):
@slow
def test_diverse_beam_search(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood.
The celebrity couple announced the arrival of their son, Silas Randall Timberlake, in statements to People.
"Silas was the middle name of Timberlake's maternal grandfather Bill Bomar, who died in 2012, while Randall is the musician's own middle name, as well as his father's first," People reports.
The couple announced the pregnancy in January, with an Instagram post. It is the first baby for both."""
bart_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")
bart_model = BartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn").to(torch_device)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
outputs = bart_model.generate(
input_ids,
num_beams=4,
num_return_sequences=2,
num_beam_groups=4,
diversity_penalty=2.0,
remove_invalid_values=True,
)
generated_text = bart_tokenizer.batch_decode(outputs, skip_special_tokens=True)
self.assertListEqual(
generated_text,
[
"The couple announced the birth of their son, Silas Randall Timberlake, in a statement. Silas was the middle name of Timberlake's maternal grandfather Bill Bomar. Randall is the musician's own middle name, as well as his father's first. It is the first baby for both of them.",
"Justin Timberlake and Jessica Biel have a son. The baby is named Silas Randall Timberlake. It is the first child for both. The couple announced the pregnancy in January. The name Silas is the middle name of Timberlake's maternal grandfather. It's also his own middle name.",
],
)
def test_max_length_backward_compat_greedy(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to(
torch_device
)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
max_length = 20
input_ids = input_ids.expand(2, -1)
model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {})
input_ids = bart_model._prepare_decoder_input_ids_for_generation(
input_ids.shape[0],
decoder_start_token_id=bart_model.config.decoder_start_token_id,
bos_token_id=bart_model.config.bos_token_id,
)
with self.assertWarns(UserWarning):
bart_model.greedy_search(
input_ids,
max_length=max_length,
pad_token_id=bart_model.config.pad_token_id,
eos_token_id=bart_model.config.eos_token_id,
**model_kwargs,
)
def test_max_length_backward_compat_sample(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to(
torch_device
)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
max_length = 20
input_ids = input_ids.expand(2, -1)
model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {})
input_ids = bart_model._prepare_decoder_input_ids_for_generation(
input_ids.shape[0],
decoder_start_token_id=bart_model.config.decoder_start_token_id,
bos_token_id=bart_model.config.bos_token_id,
)
with torch.no_grad():
with self.assertWarns(UserWarning):
bart_model.sample(
input_ids,
max_length=max_length,
pad_token_id=bart_model.config.pad_token_id,
eos_token_id=bart_model.config.eos_token_id,
**model_kwargs,
)
def test_max_length_backward_compat_beam_search(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to(
torch_device
)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
batch_size = 1
max_length = 20
num_beams = 2
input_ids = input_ids.expand(2, -1)
model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {})
input_ids = bart_model._prepare_decoder_input_ids_for_generation(
input_ids.shape[0],
decoder_start_token_id=bart_model.config.decoder_start_token_id,
bos_token_id=bart_model.config.bos_token_id,
)
beam_scorer = BeamSearchScorer(
batch_size=batch_size,
num_beams=num_beams,
device=torch_device,
)
with self.assertWarns(UserWarning):
_ = bart_model.beam_search(
input_ids, num_beams=num_beams, max_length=max_length, beam_scorer=beam_scorer, **model_kwargs
)
def test_max_length_backward_compat_group_beam_search(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to(
torch_device
)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
batch_size = 1
max_length = 20
num_beams = 6
num_beam_groups = 3
num_return_sequences = num_beams * batch_size
input_ids = input_ids.expand(6, -1)
model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {})
input_ids = bart_model._prepare_decoder_input_ids_for_generation(
input_ids.shape[0],
decoder_start_token_id=bart_model.config.decoder_start_token_id,
bos_token_id=bart_model.config.bos_token_id,
)
diverse_beam_scorer = BeamSearchScorer(
batch_size=batch_size,
num_beams=num_beams,
device=torch_device,
num_beam_hyps_to_keep=num_return_sequences,
num_beam_groups=num_beam_groups,
)
with self.assertWarns(UserWarning):
bart_model.group_beam_search(
input_ids, diverse_beam_scorer, num_beams=num_beams, max_length=max_length, **model_kwargs
)
def test_max_length_warning_if_different(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to(
torch_device
)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
batch_size = 1
max_length = 20
num_beams = 6
num_beam_groups = 3
num_return_sequences = num_beams * batch_size
stopping_criteria_max_length = 18
stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=stopping_criteria_max_length)])
# Greedy
input_ids = input_ids.expand(6, -1)
model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {})
input_ids = bart_model._prepare_decoder_input_ids_for_generation(
input_ids.shape[0],
decoder_start_token_id=bart_model.config.decoder_start_token_id,
bos_token_id=bart_model.config.bos_token_id,
)
with self.assertWarns(UserWarning):
bart_model.greedy_search(
input_ids,
max_length=max_length,
pad_token_id=bart_model.config.pad_token_id,
stopping_criteria=stopping_criteria,
eos_token_id=bart_model.config.eos_token_id,
**model_kwargs,
)
# Sample
with self.assertWarns(UserWarning):
with torch.no_grad():
bart_model.sample(
input_ids,
max_length=max_length,
stopping_criteria=stopping_criteria,
pad_token_id=bart_model.config.pad_token_id,
eos_token_id=bart_model.config.eos_token_id,
**model_kwargs,
)
# Beam
beam_scorer = BeamSearchScorer(
batch_size=batch_size,
num_beams=num_beams,
device=torch_device,
)
with self.assertWarns(UserWarning):
with torch.no_grad():
bart_model.beam_search(
input_ids,
num_beams=num_beams,
stopping_criteria=stopping_criteria,
max_length=max_length,
beam_scorer=beam_scorer,
**model_kwargs,
)
# Grouped beam search
diverse_beam_scorer = BeamSearchScorer(
batch_size=batch_size,
num_beams=num_beams,
device=torch_device,
num_beam_hyps_to_keep=num_return_sequences,
num_beam_groups=num_beam_groups,
)
with self.assertWarns(UserWarning):
bart_model.group_beam_search(
input_ids,
diverse_beam_scorer,
stopping_criteria=stopping_criteria,
num_beams=num_beams,
max_length=max_length,
**model_kwargs,
)
def test_beam_search_warning_if_max_length_is_passed(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to(
torch_device
)
batch_size = 1
num_beams = 3
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
input_ids = input_ids.expand(num_beams, -1)
model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {})
# pretend decoder_input_ids correspond to first encoder input id
decoder_input_ids = input_ids[:, :1]
stopping_criteria_max_length = 18
stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=stopping_criteria_max_length)])
with self.assertWarns(UserWarning):
beam_scorer = BeamSearchScorer(
batch_size=batch_size,
num_beams=num_beams,
device=torch_device,
max_length=10,
)
generated_ids = bart_model.beam_search(
decoder_input_ids,
num_beams=num_beams,
stopping_criteria=stopping_criteria,
beam_scorer=beam_scorer,
**model_kwargs,
)
beam_scorer_no_max_len = BeamSearchScorer(
batch_size=batch_size,
num_beams=num_beams,
device=torch_device,
)
generated_ids_no_max_len = bart_model.beam_search(
decoder_input_ids,
num_beams=num_beams,
stopping_criteria=stopping_criteria,
beam_scorer=beam_scorer_no_max_len,
**model_kwargs,
)
# BeamSearchScorer max_length should not influence "real" max_length
self.assertEqual(generated_ids.tolist(), generated_ids_no_max_len.tolist())
def test_custom_stopping_criteria_overload_error(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random")
bart_model = BartForConditionalGeneration.from_pretrained("sshleifer/bart-tiny-random").to(torch_device)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
stopping_criteria = StoppingCriteriaList()
stopping_criteria.append(MaxLengthCriteria(max_length=42))
with self.assertRaises(ValueError):
bart_model.generate(input_ids, stopping_criteria=stopping_criteria)
with self.assertRaises(ValueError):
bart_model.generate(input_ids, stopping_criteria=stopping_criteria, max_length=32)
def test_custom_stopping_criteria(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random")
bart_model = BartForConditionalGeneration.from_pretrained("sshleifer/bart-tiny-random").to(torch_device)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
class DummyCriteria(StoppingCriteria):
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
return input_ids.shape[-1] >= 20
stopping_criteria = StoppingCriteriaList()
stopping_criteria.append(DummyCriteria())
self.assertEqual(
list(bart_model.generate(input_ids, stopping_criteria=stopping_criteria, max_length=22).shape),
[1, 20],
)
self.assertEqual(
list(bart_model.generate(input_ids, stopping_criteria=stopping_criteria, max_length=18).shape),
[1, 18],
)
def test_custom_logits_processor(self):
bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random")
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_model = BartForConditionalGeneration.from_pretrained("sshleifer/bart-tiny-random").to(torch_device)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
logits_processor = LogitsProcessorList()
logits_processor.append(MinLengthLogitsProcessor(min_length=10, eos_token_id=0))
with self.assertRaises(ValueError):
bart_model.generate(input_ids, logits_processor=logits_processor)
bart_model.config.min_length = None
bart_model.generate(input_ids, logits_processor=logits_processor)
def test_max_new_tokens_encoder_decoder(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to(
torch_device
)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
self.assertEqual(list(input_ids.shape), [1, 29])
max_new_tokens = 3
bart_model.config.max_length = 20
bart_model.config.eos_token_id = None
# Encoder decoder call
outputs = bart_model.generate(input_ids, max_new_tokens=max_new_tokens)
# 1 BOS + 3 new tokens
self.assertEqual(list(outputs.shape), [1, 4])
# Decoder only call
outputs = bart_model.generate(decoder_input_ids=input_ids, max_new_tokens=max_new_tokens)
# 29 + 3 new tokens
self.assertEqual(list(outputs.shape), [1, 32])
# Encoder decoder call > 20
outputs = bart_model.generate(max_new_tokens=max_new_tokens + 20)
# 1 BOS + 20 + 3 new tokens
self.assertEqual(list(outputs.shape), [1, 24])
# max_new_tokens and max_length serve the same purpose and should not be used together.
with self.assertWarns(UserWarning):
bart_model.generate(decoder_input_ids=input_ids, max_new_tokens=10, max_length=20)
def test_max_new_tokens_decoder_only(self):
article = """Justin Timberlake."""
gpt2_tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
gpt2_model = GPT2LMHeadModel.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device)
input_ids = gpt2_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
self.assertEqual(list(input_ids.shape), [1, 9])
max_new_tokens = 3
gpt2_model.config.max_length = 20
# call < 20
outputs = gpt2_model.generate(input_ids, max_new_tokens=max_new_tokens)
# 9 input_ids + 3 new tokens
self.assertEqual(list(outputs.shape), [1, 12])
# call > 20
outputs = gpt2_model.generate(max_new_tokens=max_new_tokens + 20)
# 1 BOS token + 23 new tokens
self.assertEqual(list(outputs.shape), [1, 24])
# max_new_tokens and max_length serve the same purpose and should not be used together.
with self.assertWarns(UserWarning):
gpt2_model.generate(decoder_input_ids=input_ids, max_new_tokens=10, max_length=20)
def test_encoder_decoder_generate_with_inputs_embeds(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart", max_length=5).to(
torch_device
)
model.config.eos_token_id = None
input_ids = tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
inputs_embeds = model.get_input_embeddings()(input_ids)
output_sequences = model.generate(inputs_embeds=inputs_embeds)
# make sure model generated correctly until `max_length`
self.assertEqual(output_sequences.shape, (1, 5))
def test_encoder_decoder_generate_attention_mask(self):
articles = ["Timberlake", "Jessica Biel, welcome to parenthood among other things"]
tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
# need extrem generation values here to force this test
# to fail when `attention_mask` is not correctly treated in generate
model = BartForConditionalGeneration.from_pretrained(
"hf-internal-testing/tiny-random-bart", max_length=50, num_beams=5, num_return_sequences=5
).to(torch_device)
model.config.eos_token_id = None
input_ids = tokenizer(articles[0], return_tensors="pt").input_ids.to(torch_device)
input_ids_batched = tokenizer(articles, padding=True, return_tensors="pt").input_ids.to(torch_device)
output_sequences_batched = model.generate(
input_ids=input_ids_batched, return_dict_in_generate=True, output_scores=True
)
output_sequences = model.generate(input_ids=input_ids, return_dict_in_generate=True, output_scores=True)
batched_out = output_sequences_batched.sequences_scores
out = output_sequences.sequences_scores
diff = (batched_out[:5].sum() - out.sum()).abs()
self.assertTrue(diff < 1e-4)
def test_decoder_generate_with_inputs_embeds(self):
article = """I need input_ids to generate"""
tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
model = GPT2LMHeadModel.from_pretrained("hf-internal-testing/tiny-random-gpt2", max_length=5).to(torch_device)
input_ids = tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
inputs_embeds = model.get_input_embeddings()(input_ids)
# cannot generate from `inputs_embeds` for decoder only
with self.assertRaises(ValueError):
model.generate(inputs_embeds=inputs_embeds)
def test_generate_input_ids_as_kwarg(self):
article = """I need input_ids to generate"""
tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
model = GPT2LMHeadModel.from_pretrained("hf-internal-testing/tiny-random-gpt2", max_length=15).to(torch_device)
input_ids = tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
output_sequences_kwargs = model.generate(input_ids=input_ids).cpu()
output_sequences = model.generate(input_ids).cpu()
self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist())
self.assertEqual(output_sequences.shape, (1, 15))
def test_generate_non_nlp_input_ids_as_kwarg(self):
model = ImageGPTForCausalImageModeling.from_pretrained(
"hf-internal-testing/tiny-random-imagegpt", max_length=10
).to(torch_device)
input_ids = ids_tensor((3, 5), vocab_size=10)
output_sequences_kwargs = model.generate(input_ids=input_ids).cpu()
output_sequences = model.generate(input_ids).cpu()
self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist())
self.assertEqual(output_sequences.shape, (3, 10))
def test_generate_input_ids_as_encoder_kwarg(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart", max_length=5).to(
torch_device
)
model.config.eos_token_id = None
input_ids = tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
output_sequences_kwargs = model.generate(input_ids=input_ids).cpu()
output_sequences = model.generate(input_ids).cpu()
self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist())
self.assertEqual(output_sequences.shape, (1, 5))
def test_generate_inputs_and_encoder_kwargs(self):
article = """I need input_ids to generate"""
tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
model = GPT2LMHeadModel.from_pretrained("hf-internal-testing/tiny-random-gpt2", max_length=10).to(torch_device)
input_ids = tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
with self.assertRaises(ValueError):
model.generate(input_ids, input_ids=input_ids)
def test_generate_too_many_encoder_kwargs(self):
article = """I need input_ids to generate"""
tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
model = GPT2LMHeadModel.from_pretrained("hf-internal-testing/tiny-random-gpt2", max_length=10).to(torch_device)
input_ids = tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
with self.assertRaises(ValueError):
model.generate(input_ids=input_ids, inputs_embeds=input_ids)
def test_generate_input_values_as_encoder_kwarg(self):
input_values = floats_tensor((2, 250))
model = SpeechEncoderDecoderModel.from_pretrained("hf-internal-testing/tiny-random-speech-encoder-decoder")
model = model.to(torch_device)
output_sequences_kwargs = model.generate(input_values=input_values, max_length=5).cpu()
output_sequences = model.generate(input_values, max_length=5).cpu()
self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist())
self.assertEqual(output_sequences.shape, (2, 5))
def test_generate_input_features_as_encoder_kwarg(self):
input_features = floats_tensor((3, 20, 24))
model = Speech2TextForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-speech_to_text")
model = model.to(torch_device)
output_sequences_kwargs = model.generate(input_features=input_features, max_length=5).cpu()
output_sequences = model.generate(input_features, max_length=5).cpu()
self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist())
self.assertEqual(output_sequences.shape, (3, 5))
def test_generate_pixel_values_as_encoder_kwarg(self):
pixel_values = floats_tensor((2, 3, 30, 30))
model = VisionEncoderDecoderModel.from_pretrained("hf-internal-testing/tiny-random-vision-encoder-decoder")
model = model.to(torch_device)
output_sequences_kwargs = model.generate(pixel_values=pixel_values, max_length=5).cpu()
output_sequences = model.generate(pixel_values, max_length=5).cpu()
self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist())
self.assertEqual(output_sequences.shape, (2, 5))
def test_generate_encoder_outputs_attention_mask(self):
input_values = floats_tensor((2, 250)).to(torch_device)
attention_mask = torch.ones_like(input_values)
model = SpeechEncoderDecoderModel.from_pretrained("hf-internal-testing/tiny-random-speech-encoder-decoder")
model = model.to(torch_device)
encoder = model.get_encoder()
encoder_outputs = encoder(input_values)
output_sequences_no_mask = model.generate(encoder_outputs=encoder_outputs).cpu()
output_sequences_with_mask = model.generate(encoder_outputs=encoder_outputs, attention_mask=attention_mask)
output_sequences_with_mask = output_sequences_with_mask.cpu()
self.assertListEqual(output_sequences_no_mask.tolist(), output_sequences_with_mask.tolist())
def test_transition_scores_beam_search_encoder_decoder(self):
articles = [
"Justin Timberlake and Jessica Biel, welcome to parenthood.",
"Michael Phelps is arguably the most decorated Olympian of all time.",
]
tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
model = BartForConditionalGeneration.from_pretrained(
"hf-internal-testing/tiny-random-bart",
max_length=10,
num_beams=4,
num_return_sequences=2,
eos_token_id=None,
return_dict_in_generate=True,
output_scores=True,
length_penalty=0.0,
)
model = model.to(torch_device)
input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids.to(torch_device)
outputs = model.generate(input_ids=input_ids)
transition_scores = model.compute_transition_beam_scores(
outputs.sequences, outputs.scores, outputs.beam_indices
)
transition_scores_sum = transition_scores.sum(-1)
self.assertTrue(torch.allclose(transition_scores_sum, outputs.sequences_scores, atol=1e-3))
def test_transition_scores_beam_search_encoder_decoder_with_eos(self):
articles = [
"Justin Timberlake and Jessica Biel, welcome to parenthood.",
"Michael Phelps is arguably the most decorated Olympian of all time.",
]
tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
model = BartForConditionalGeneration.from_pretrained(
"hf-internal-testing/tiny-random-bart",
max_length=10,
num_beams=4,
num_return_sequences=2,
return_dict_in_generate=True,
output_scores=True,
length_penalty=0.0,
)
model = model.to(torch_device)
input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids.to(torch_device)
outputs = model.generate(input_ids=input_ids)
transition_scores = model.compute_transition_beam_scores(
outputs.sequences, outputs.scores, outputs.beam_indices
)
transition_scores_sum = transition_scores.sum(-1)
self.assertTrue(torch.allclose(transition_scores_sum, outputs.sequences_scores, atol=1e-3))
def test_transition_scores_beam_search_decoder_only(self):
articles = [
"Justin Timberlake",
"Michael Phelps",
]
tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
tokenizer.pad_token = tokenizer.eos_token
model = GPT2LMHeadModel.from_pretrained(
"hf-internal-testing/tiny-random-gpt2",
max_length=10,
num_beams=4,
num_return_sequences=2,
pad_token_id=tokenizer.eos_token_id,
eos_token_id=None,
return_dict_in_generate=True,
output_scores=True,
length_penalty=0.0,
)
model = model.to(torch_device)
input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids.to(torch_device)
outputs = model.generate(input_ids=input_ids)
transition_scores = model.compute_transition_beam_scores(
outputs.sequences, outputs.scores, outputs.beam_indices
)
transition_scores_sum = transition_scores.sum(-1)
self.assertTrue(torch.allclose(transition_scores_sum, outputs.sequences_scores, atol=1e-3))
def test_transition_scores_beam_sample_encoder_decoder(self):
articles = [
"Justin Timberlake and Jessica Biel, welcome to parenthood.",
"Michael Phelps is arguably the most decorated Olympian of all time.",
]
tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
model = BartForConditionalGeneration.from_pretrained(
"hf-internal-testing/tiny-random-bart",
do_sample=True,
max_length=10,
num_beams=4,
num_return_sequences=2,
eos_token_id=None,
return_dict_in_generate=True,
output_scores=True,
length_penalty=0.0,
)
model = model.to(torch_device)
input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids.to(torch_device)
outputs = model.generate(input_ids=input_ids)
transition_scores = model.compute_transition_beam_scores(
outputs.sequences, outputs.scores, outputs.beam_indices
)
transition_scores_sum = transition_scores.sum(-1)
self.assertTrue(torch.allclose(transition_scores_sum, outputs.sequences_scores, atol=1e-3))
def test_transition_scores_group_beam_search_encoder_decoder(self):
articles = [
"Justin Timberlake and Jessica Biel, welcome to parenthood.",
"Michael Phelps is arguably the most decorated Olympian of all time.",
]
tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
model = BartForConditionalGeneration.from_pretrained(
"hf-internal-testing/tiny-random-bart",
max_length=10,
num_beams=2,
num_beam_groups=2,
num_return_sequences=2,
eos_token_id=None,
return_dict_in_generate=True,
output_scores=True,
length_penalty=0.0,
)
model = model.to(torch_device)
input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids.to(torch_device)
outputs = model.generate(input_ids=input_ids)
transition_scores = model.compute_transition_beam_scores(
outputs.sequences, outputs.scores, outputs.beam_indices
)
transition_scores_sum = transition_scores.sum(-1)
self.assertTrue(torch.allclose(transition_scores_sum, outputs.sequences_scores, atol=1e-3))
@slow
def test_beam_search_example_integration(self):
# exactly the example provided in the docstrings of beam search, which previously
# failed after directly copying from it. Refer to PR #15555
tokenizer = AutoTokenizer.from_pretrained("t5-base")
model = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
encoder_input_str = "translate English to German: How old are you?"
encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids
# lets run beam search using 3 beams
num_beams = 3
# define decoder start token ids
input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long)
input_ids = input_ids * model.config.decoder_start_token_id
# add encoder_outputs to model keyword arguments
model_kwargs = {
"encoder_outputs": model.get_encoder()(
encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True
)
}
# instantiate beam scorer
beam_scorer = BeamSearchScorer(
batch_size=1,
num_beams=num_beams,
device=model.device,
)
# instantiate logits processors
logits_processor = LogitsProcessorList(
[
MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id),
]
)
outputs = model.beam_search(input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs)
outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True)
self.assertListEqual(outputs, ["Wie alt bist du?"])
@slow
def test_constrained_beam_search(self):
model = GPT2LMHeadModel.from_pretrained("../gpt2").to(torch_device)
tokenizer = GPT2Tokenizer.from_pretrained("../gpt2")
force_tokens = tokenizer("scared", add_prefix_space=True, add_special_tokens=False).input_ids
force_tokens_2 = tokenizer("big weapons", add_prefix_space=True, add_special_tokens=False).input_ids
constraints = [
PhrasalConstraint(force_tokens),
PhrasalConstraint(force_tokens_2),
]
starting_text = ["The soldiers were not prepared and"]
input_ids = tokenizer(starting_text, return_tensors="pt").input_ids.to(torch_device)
outputs = model.generate(
input_ids,
constraints=constraints,
num_beams=10,
num_return_sequences=1,
no_repeat_ngram_size=1,
max_length=30,
remove_invalid_values=True,
)
generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True)
self.assertListEqual(
generated_text,
[
"The soldiers were not prepared and didn't know how big the big weapons would be, so they scared them off. They had no idea what to do",
],
)
@slow
def test_constrained_beam_search_mixed(self):
model = GPT2LMHeadModel.from_pretrained("../gpt2").to(torch_device)
tokenizer = GPT2Tokenizer.from_pretrained("../gpt2")
force_phrase = tokenizer("scared", add_prefix_space=True, add_special_tokens=False).input_ids
flexible_phrases = tokenizer(
["scream", "screams", "screaming", "screamed"], add_prefix_space=True, add_special_tokens=False
).input_ids
constraints = [
PhrasalConstraint(force_phrase),
DisjunctiveConstraint(flexible_phrases),
]
starting_text = ["The soldiers", "The child"]
input_ids = tokenizer(starting_text, return_tensors="pt").input_ids.to(torch_device)
outputs = model.generate(
input_ids,
constraints=constraints,
num_beams=10,
num_return_sequences=1,
no_repeat_ngram_size=1,
# max_length=20,
remove_invalid_values=True,
)
generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True)
self.assertListEqual(
generated_text,
[
"The soldiers, who were all scared and screaming at each other as they tried to get out of the",
"The child was taken to a local hospital where she screamed and scared for her life, police said.",
],
)
@slow
def test_constrained_beam_search_mixed_mixin(self):
model = GPT2LMHeadModel.from_pretrained("../gpt2").to(torch_device)
tokenizer = GPT2Tokenizer.from_pretrained("../gpt2")
force_word = "scared"
force_flexible = ["scream", "screams", "screaming", "screamed"]
force_words_ids = [
tokenizer([force_word], add_prefix_space=True, add_special_tokens=False).input_ids,
tokenizer(force_flexible, add_prefix_space=True, add_special_tokens=False).input_ids,
]
starting_text = ["The soldiers", "The child"]
input_ids = tokenizer(starting_text, return_tensors="pt").input_ids.to(torch_device)
outputs = model.generate(
input_ids,
force_words_ids=force_words_ids,
num_beams=10,
num_return_sequences=1,
no_repeat_ngram_size=1,
remove_invalid_values=True,
)
generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True)
self.assertListEqual(
generated_text,
[
"The soldiers, who were all scared and screaming at each other as they tried to get out of the",
"The child was taken to a local hospital where she screamed and scared for her life, police said.",
],
)
@slow
def test_constrained_beam_search_example_translation_mixin(self):
tokenizer = AutoTokenizer.from_pretrained("t5-base")
model = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
encoder_input_str = "translate English to German: How old are you?"
force_words = ["sind"]
input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids
force_words_ids = tokenizer(force_words, add_special_tokens=False).input_ids
outputs = model.generate(
input_ids,
force_words_ids=force_words_ids,
num_beams=10,
num_return_sequences=1,
no_repeat_ngram_size=1,
remove_invalid_values=True,
)
outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True)
self.assertListEqual(outputs, ["Wie alter sind Sie?"])
@slow
def test_constrained_beam_search_example_integration(self):
tokenizer = AutoTokenizer.from_pretrained("t5-base")
model = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
encoder_input_str = "translate English to German: How old are you?"
encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids
# lets run beam search using 5 beams
num_beams = 5
# define decoder start token ids
input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long)
input_ids = input_ids * model.config.decoder_start_token_id
# add encoder_outputs to model keyword arguments
model_kwargs = {
"encoder_outputs": model.get_encoder()(
encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True
)
}
constraint_str = "sind"
constraint_token_ids = tokenizer.encode(constraint_str)[:-1] # remove eos token
constraints = [PhrasalConstraint(token_ids=constraint_token_ids)]
# instantiate beam scorer
beam_scorer = ConstrainedBeamSearchScorer(
batch_size=1, num_beams=num_beams, device=model.device, constraints=constraints
)
# instantiate logits processors
logits_processor = LogitsProcessorList(
[
MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id),
]
)
outputs = model.constrained_beam_search(
input_ids, beam_scorer, constraints=constraints, logits_processor=logits_processor, **model_kwargs
)
outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True)
self.assertListEqual(outputs, ["Wie alter sind Sie?"])
def test_constrained_beam_search_mixin_type_checks(self):
tokenizer = AutoTokenizer.from_pretrained("t5-base")
model = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
encoder_input_str = "translate English to German: How old are you?"
input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids
with self.assertRaises(ValueError):
force_words = ["sind"]
force_words_ids = tokenizer(force_words, return_tensors="pt").input_ids
model.generate(
input_ids,
force_words_ids=force_words_ids,
num_beams=10,
num_return_sequences=1,
no_repeat_ngram_size=1,
remove_invalid_values=True,
)
with self.assertRaises(ValueError):
force_words = ["sind"]
force_words_ids = [tokenizer(force_words, return_tensors="pt").input_ids]
model.generate(
input_ids,
force_words_ids=force_words_ids,
num_beams=10,
num_return_sequences=1,
no_repeat_ngram_size=1,
remove_invalid_values=True,
)
with self.assertRaises(ValueError):
model.generate(input_ids, force_words_ids=[])
with self.assertRaises(ValueError):
model.generate(input_ids, force_words_ids=[[-1]])
with self.assertRaises(ValueError):
model.generate(input_ids, force_words_ids=[[[-1]]])
| 111,079 | 42.087665 | 293 | py |
robust-transformers | robust-transformers-main/tests/generation/test_generation_flax_utils.py | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax
import jax.numpy as jnp
from jax import jit
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def ids_tensor(shape, vocab_size, rng=None):
"""Creates a random int32 tensor of the shape within the vocab size."""
if rng is None:
rng = random.Random()
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(rng.randint(0, vocab_size - 1))
output = np.array(values, dtype=jnp.int32).reshape(shape)
return output
def random_attention_mask(shape, rng=None):
attn_mask = ids_tensor(shape, vocab_size=2, rng=rng)
# make sure that at least one token is attended to for each batch
attn_mask[:, -1] = 1
return attn_mask
@require_flax
class FlaxGenerationTesterMixin:
model_tester = None
all_generative_model_classes = ()
def _get_input_ids_and_config(self):
config, inputs = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
max_batch_size = 2
sequence_length = inputs["input_ids"].shape[-1] // 2
input_ids = inputs["input_ids"][:max_batch_size, :sequence_length]
attention_mask = jnp.ones_like(input_ids)
attention_mask = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
max_length = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
config.pad_token_id = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def test_greedy_generate_pt_fx(self):
config, input_ids, _, max_length = self._get_input_ids_and_config()
config.do_sample = False
config.max_length = max_length
config.decoder_start_token_id = 0
for model_class in self.all_generative_model_classes:
flax_model = model_class(config)
pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning
pt_model_class = getattr(transformers, pt_model_class_name)
pt_model = pt_model_class(config).eval()
pt_model = load_flax_weights_in_pytorch_model(pt_model, flax_model.params)
flax_generation_outputs = flax_model.generate(input_ids).sequences
pt_generation_outputs = pt_model.generate(torch.tensor(input_ids, dtype=torch.long))
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
flax_generation_outputs = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist(), flax_generation_outputs.tolist())
def test_greedy_generate(self):
config, input_ids, _, max_length = self._get_input_ids_and_config()
config.do_sample = False
config.max_length = max_length
for model_class in self.all_generative_model_classes:
model = model_class(config)
generation_outputs = model.generate(input_ids).sequences
self.assertEqual(generation_outputs.shape[-1], max_length)
jit_generate = jit(model.generate)
jit_generation_outputs = jit_generate(input_ids).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist())
def test_sample_generate(self):
config, input_ids, _, max_length = self._get_input_ids_and_config()
config.do_sample = True
config.max_length = max_length
for model_class in self.all_generative_model_classes:
model = model_class(config)
generation_outputs = model.generate(input_ids).sequences
self.assertEqual(generation_outputs.shape[-1], max_length)
jit_generate = jit(model.generate)
jit_generation_outputs = jit_generate(input_ids).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist())
def test_beam_search_generate(self):
config, input_ids, _, max_length = self._get_input_ids_and_config()
config.do_sample = False
config.max_length = max_length
config.num_beams = 2
for model_class in self.all_generative_model_classes:
model = model_class(config)
generation_outputs = model.generate(input_ids).sequences
self.assertEqual(generation_outputs.shape[-1], max_length)
jit_generate = jit(model.generate)
jit_generation_outputs = jit_generate(input_ids).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist())
def test_sample_generate_logits_warper(self):
config, input_ids, _, max_length = self._get_input_ids_and_config()
config.do_sample = True
config.max_length = max_length
config.temperature = 0.8
config.top_k = 10
config.top_p = 0.3
config.min_length = 1
config.forced_bos_token_id = 8
config.forced_eos_token_id = 9
for model_class in self.all_generative_model_classes:
model = model_class(config)
generation_outputs = model.generate(input_ids).sequences
self.assertEqual(generation_outputs.shape[-1], max_length)
jit_generate = jit(model.generate)
jit_generation_outputs = jit_generate(input_ids).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist())
def test_greedy_generate_logits_warper(self):
config, input_ids, _, max_length = self._get_input_ids_and_config()
config.max_length = max_length
config.min_length = 1
config.forced_bos_token_id = 8
config.forced_eos_token_id = 9
for model_class in self.all_generative_model_classes:
model = model_class(config)
generation_outputs = model.generate(input_ids).sequences
self.assertEqual(generation_outputs.shape[-1], max_length)
jit_generate = jit(model.generate)
jit_generation_outputs = jit_generate(input_ids).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist())
def test_beam_search_generate_logits_warper(self):
config, input_ids, _, max_length = self._get_input_ids_and_config()
config.max_length = max_length
config.num_beams = 2
config.min_length = 1
config.forced_bos_token_id = 8
config.forced_eos_token_id = 9
for model_class in self.all_generative_model_classes:
model = model_class(config)
generation_outputs = model.generate(input_ids).sequences
self.assertEqual(generation_outputs.shape[-1], max_length)
jit_generate = jit(model.generate)
jit_generation_outputs = jit_generate(input_ids).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist())
def test_greedy_generate_attn_mask(self):
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# pad attention mask on the left
attention_mask = jax.ops.index_update(attention_mask, (0, 0), 0)
config.do_sample = False
config.max_length = max_length
for model_class in self.all_generative_model_classes:
model = model_class(config)
generation_outputs = model.generate(input_ids, attention_mask=attention_mask).sequences
self.assertEqual(generation_outputs.shape[-1], max_length)
jit_generate = jit(model.generate)
jit_generation_outputs = jit_generate(input_ids, attention_mask=attention_mask).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist())
def test_sample_generate_attn_mask(self):
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# pad attention mask on the left
attention_mask = jax.ops.index_update(attention_mask, (0, 0), 0)
config.do_sample = True
config.max_length = max_length
for model_class in self.all_generative_model_classes:
model = model_class(config)
generation_outputs = model.generate(input_ids, attention_mask=attention_mask).sequences
self.assertEqual(generation_outputs.shape[-1], max_length)
jit_generate = jit(model.generate)
jit_generation_outputs = jit_generate(input_ids, attention_mask=attention_mask).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist())
def test_beam_search_generate_attn_mask(self):
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# pad attention mask on the left
attention_mask = jax.ops.index_update(attention_mask, (0, 0), 0)
config.num_beams = 2
config.max_length = max_length
for model_class in self.all_generative_model_classes:
model = model_class(config)
generation_outputs = model.generate(input_ids, attention_mask=attention_mask).sequences
self.assertEqual(generation_outputs.shape[-1], max_length)
jit_generate = jit(model.generate)
jit_generation_outputs = jit_generate(input_ids, attention_mask=attention_mask).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist())
| 10,710 | 37.66787 | 106 | py |
robust-transformers | robust-transformers-main/tests/generation/test_generation_logits_process.py | # coding=utf-8
# Copyright 2020 The HuggingFace Team Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from torch import nn
from transformers.generation_logits_process import (
EncoderNoRepeatNGramLogitsProcessor,
ForcedBOSTokenLogitsProcessor,
ForcedEOSTokenLogitsProcessor,
HammingDiversityLogitsProcessor,
InfNanRemoveLogitsProcessor,
LogitsProcessorList,
MinLengthLogitsProcessor,
NoBadWordsLogitsProcessor,
NoRepeatNGramLogitsProcessor,
PrefixConstrainedLogitsProcessor,
RepetitionPenaltyLogitsProcessor,
TemperatureLogitsWarper,
TopKLogitsWarper,
TopPLogitsWarper,
TypicalLogitsWarper,
)
@require_torch
class LogitsProcessorTest(unittest.TestCase):
def _get_uniform_logits(self, batch_size: int, length: int):
scores = torch.ones((batch_size, length), device=torch_device, dtype=torch.float) / length
return scores
def test_min_length_dist_processor(self):
vocab_size = 20
batch_size = 4
eos_token_id = 0
min_dist_processor = MinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id)
# check that min length is applied at length 5
input_ids = ids_tensor((batch_size, 5), vocab_size=20)
scores = self._get_uniform_logits(batch_size, vocab_size)
scores_before_min_length = min_dist_processor(input_ids, scores)
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist(), 4 * [-float("inf")])
# check that min length is not applied anymore at length 15
input_ids = ids_tensor((batch_size, 15), vocab_size=20)
scores = self._get_uniform_logits(batch_size, vocab_size)
scores_before_min_length = min_dist_processor(input_ids, scores)
self.assertFalse(torch.isinf(scores_before_min_length).any())
def test_temperature_dist_warper(self):
input_ids = None
length = 20
scores = self._get_uniform_logits(batch_size=2, length=length)
# tweak scores to not be uniform anymore
scores[1, 5] = (1 / length) + 0.1 # peak, 1st batch
scores[1, 10] = (1 / length) - 0.4 # valley, 1st batch
# compute softmax
probs = nn.functional.softmax(scores, dim=-1)
temp_dist_warper_sharper = TemperatureLogitsWarper(temperature=0.5)
temp_dist_warper_smoother = TemperatureLogitsWarper(temperature=1.3)
warped_prob_sharp = nn.functional.softmax(temp_dist_warper_sharper(input_ids, scores.clone()), dim=-1)
warped_prob_smooth = nn.functional.softmax(temp_dist_warper_smoother(input_ids, scores.clone()), dim=-1)
# uniform distribution stays uniform
self.assertTrue(torch.allclose(probs[0, :], warped_prob_sharp[0, :], atol=1e-3))
self.assertTrue(torch.allclose(probs[0, :], warped_prob_smooth[0, :], atol=1e-3))
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max(), warped_prob_sharp[1, :].max())
self.assertGreater(probs[1, :].min(), warped_prob_sharp[1, :].min())
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max(), warped_prob_smooth[1, :].max())
self.assertLess(probs[1, :].min(), warped_prob_smooth[1, :].min())
def test_repetition_penalty_dist_process(self):
input_ids = torch.tensor([[0, 1], [5, 0]], device=torch_device, dtype=torch.long)
vocab_size = 10
scores = self._get_uniform_logits(batch_size=2, length=vocab_size)
# give values special values
scores[0, 0] = -(1 / vocab_size)
scores[1, 5] = 4 / vocab_size
rep_penalty_proc = RepetitionPenaltyLogitsProcessor(penalty=2.0)
scores = rep_penalty_proc(input_ids, scores.clone())
# check that values were correctly changed
self.assertAlmostEqual(scores[0, 0].item(), -(1 / vocab_size) * 2)
self.assertAlmostEqual(scores[0, 1].item(), (1 / vocab_size) / 2)
self.assertAlmostEqual(scores[1, 0].item(), (1 / vocab_size) / 2)
self.assertAlmostEqual(scores[1, 5].item(), (4 / vocab_size) / 2)
def test_top_k_dist_warper(self):
input_ids = None
vocab_size = 10
batch_size = 2
# create ramp distribution
ramp_logits = (
torch.arange(vocab_size, device=torch_device, dtype=torch.float).unsqueeze(0).repeat(batch_size, 1)
)
ramp_logits[1:, : vocab_size // 2] = ramp_logits[1:, : vocab_size // 2] + vocab_size
top_k_warp = TopKLogitsWarper(3)
scores = top_k_warp(input_ids, ramp_logits)
# check that correct tokens are filtered
self.assertListEqual(torch.isinf(scores[0]).tolist(), 7 * [True] + 3 * [False])
self.assertListEqual(torch.isinf(scores[1]).tolist(), 2 * [True] + 3 * [False] + 5 * [True])
# check special cases
length = 5
logits = self._get_uniform_logits(batch_size=batch_size, length=length)
top_k_warp_safety_check = TopKLogitsWarper(top_k=1, filter_value=0.0, min_tokens_to_keep=3)
scores = top_k_warp_safety_check(input_ids, logits)
# uniform dist is not changed
self.assertListEqual((scores == 0.0).to(torch.long).sum(dim=-1).tolist(), [0, 0])
ramp_logits = torch.arange(length, device=torch_device, dtype=torch.float).unsqueeze(0).repeat(batch_size, 1)
scores = top_k_warp_safety_check(input_ids, ramp_logits)
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).to(torch.long).sum(dim=-1).tolist(), [2, 2])
def test_top_p_dist_warper(self):
input_ids = None
vocab_size = 10
batch_size = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
dist = torch.log(
torch.tensor([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]], device=torch_device, dtype=torch.float)
)
top_p_warp = TopPLogitsWarper(0.7)
filtered_dist = torch.exp(top_p_warp(input_ids, dist))
# dist should be filtered to keep min num values so that sum is >= 0.7
# exp (-inf) => 0
EXPECTED_FILTERED_DIST = torch.tensor(
[[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]], device=torch_device, dtype=torch.float
)
self.assertTrue(torch.allclose(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3))
# check edge cases with negative and extreme logits
ramp_logits = torch.arange(vocab_size, device=torch_device, dtype=torch.float).unsqueeze(0).repeat(
batch_size, 1
) - (vocab_size // 2)
# make ramp_logits more extreme
ramp_logits[1] = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
top_p_warp = TopPLogitsWarper(0.9, min_tokens_to_keep=2, filter_value=0.0)
filtered_dist = top_p_warp(input_ids, ramp_logits)
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).to(torch.long).sum(dim=-1).tolist(), [3, 2])
def test_typical_dist_warper(self):
input_ids = None
vocab_size = 10
batch_size = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
dist = torch.log(
torch.tensor([[0.97, 0.01, 0.01, 0.01], [0.4, 0.2, 0.2, 0.2]], device=torch_device, dtype=torch.float)
)
typical_warp = TypicalLogitsWarper(0.5)
filtered_dist = torch.exp(typical_warp(input_ids, dist))
# dist should be filtered to keep min num values so that sum is >= 0.7
# exp (-inf) => 0
EXPECTED_FILTERED_DIST = torch.tensor(
[[0.97, 0.0, 0.0, 0.0], [0.0, 0.2, 0.2, 0.2]], device=torch_device, dtype=torch.float
)
self.assertTrue(torch.allclose(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3))
# check special cases
length = 5
logits = self._get_uniform_logits(batch_size=batch_size, length=length)
typical_warp_safety_check = TypicalLogitsWarper(mass=0.5, filter_value=0.0, min_tokens_to_keep=3)
scores = typical_warp_safety_check(input_ids, logits)
# uniform dist is not changed
self.assertListEqual((scores == 0.0).to(torch.long).sum(dim=-1).tolist(), [0, 0])
# check edge cases with negative and extreme logits
ramp_logits = torch.arange(vocab_size, device=torch_device, dtype=torch.float).unsqueeze(0).repeat(
batch_size, 1
) - (vocab_size // 2)
# make ramp_logits more extreme
ramp_logits[1] = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
typical_warp = TypicalLogitsWarper(0.7, min_tokens_to_keep=2, filter_value=0.0)
filtered_dist = typical_warp(input_ids, ramp_logits)
# first batch should keep two tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).to(torch.long).sum(dim=-1).tolist(), [2, 2])
def test_no_repeat_ngram_dist_processor(self):
vocab_size = 3
batch_size = 2
input_ids = torch.tensor([[1, 1, 2, 1], [0, 1, 0, 1]], device=torch_device, dtype=torch.long)
scores = self._get_uniform_logits(batch_size, vocab_size)
no_repeat_proc_2_gram = NoRepeatNGramLogitsProcessor(2)
no_repeat_proc_3_gram = NoRepeatNGramLogitsProcessor(3)
filtered_scores_2_gram = no_repeat_proc_2_gram(input_ids, scores.clone())
filtered_scores_3_gram = no_repeat_proc_3_gram(input_ids, scores.clone())
# 2-gram would forbid 2nd and 3rd token (1,2) at 1st batch and 1st token (0) at 2nd batch
self.assertListEqual(torch.isinf(filtered_scores_2_gram).tolist(), [[False, True, True], [True, False, False]])
# 3-gram would forbid no token at 1st batch and 1st token (0) at 2nd batch
self.assertListEqual(
torch.isinf(filtered_scores_3_gram).tolist(), [[False, False, False], [True, False, False]]
)
def test_encoder_no_repeat_ngram_dist_processor(self):
vocab_size = 3
num_beams = 2
batch_size = 1
encoder_input_ids = torch.tensor([1, 2, 1, 1], device=torch_device, dtype=torch.long)
input_ids = torch.tensor([[1, 2, 1], [8, 0, 2]], device=torch_device, dtype=torch.long)
scores = self._get_uniform_logits(batch_size * num_beams, vocab_size)
no_repeat_proc_2_gram = EncoderNoRepeatNGramLogitsProcessor(2, encoder_input_ids=encoder_input_ids)
no_repeat_proc_3_gram = EncoderNoRepeatNGramLogitsProcessor(3, encoder_input_ids=encoder_input_ids)
filtered_scores_2_gram = no_repeat_proc_2_gram(input_ids, scores.clone())
filtered_scores_3_gram = no_repeat_proc_3_gram(input_ids, scores.clone())
# 2-gram would forbid 1st and 2nd token at 1st beam and 1st token (0) at 2nd beam
self.assertListEqual(torch.isinf(filtered_scores_2_gram).tolist(), [[False, True, True], [False, True, False]])
# 3-gram would forbid 1st token at 1st beam and no token at 2nd beam
self.assertListEqual(
torch.isinf(filtered_scores_3_gram).tolist(), [[False, True, False], [False, False, False]]
)
# Batched input
vocab_size = 3
num_beams = 2
batch_size = 2
encoder_input_ids = torch.tensor([[1, 2, 1, 1], [0, 0, 2, 1]], device=torch_device, dtype=torch.long)
input_ids = torch.tensor([[1, 2, 1], [1, 0, 2], [0, 0, 0], [0, 2, 2]], device=torch_device, dtype=torch.long)
scores = self._get_uniform_logits(batch_size * num_beams, vocab_size)
no_repeat_proc_2_gram = EncoderNoRepeatNGramLogitsProcessor(2, encoder_input_ids=encoder_input_ids)
no_repeat_proc_3_gram = EncoderNoRepeatNGramLogitsProcessor(3, encoder_input_ids=encoder_input_ids)
filtered_scores_2_gram = no_repeat_proc_2_gram(input_ids, scores.clone())
filtered_scores_3_gram = no_repeat_proc_3_gram(input_ids, scores.clone())
# 2gram
# Batch 1
# - Beam 1: tokens (1, 2) forbidden
# - Beam 2: tokens (1) forbidden
# Batch 2
# - Beam 1: tokens (0, 2) forbidden
# - Beam 2: tokens (1) forbidden
self.assertListEqual(
torch.isinf(filtered_scores_2_gram).tolist(),
[[False, True, True], [False, True, False], [True, False, True], [False, True, False]],
)
# Batch 1
# - Beam 1: tokens (1) forbidden
# - Beam 2: tokens () forbidden
# Batch 2
# - Beam 1: tokens (2) forbidden
# - Beam 2: tokens () forbidden
self.assertListEqual(
torch.isinf(filtered_scores_3_gram).tolist(),
[[False, True, False], [False, False, False], [False, False, True], [False, False, False]],
)
def test_no_bad_words_dist_processor(self):
vocab_size = 5
batch_size = 2
eos_token_id = 4
input_ids = torch.tensor([[0, 1, 3, 1], [0, 1, 0, 1]], device=torch_device, dtype=torch.long)
bad_word_tokens = [[1], [4], [1, 0], [0, 1, 2], [1, 3, 1, 3]]
scores = self._get_uniform_logits(batch_size, vocab_size)
no_bad_words_dist_proc = NoBadWordsLogitsProcessor(bad_words_ids=bad_word_tokens, eos_token_id=eos_token_id)
filtered_scores = no_bad_words_dist_proc(input_ids, scores.clone())
# batch 1: 1st, 2nd, and 4th (0, 1, 3) token are forbidden
# batch 2: 1st, 2nd, and 3rd (0, 1, 2) token are forbidden
# Note that 5th element cannot be forbidden as it is EOS token
self.assertListEqual(
torch.isinf(filtered_scores).tolist(), [[True, True, False, True, False], [True, True, True, False, False]]
)
# check edge case
no_bad_words_dist_proc = NoBadWordsLogitsProcessor(bad_words_ids=[[4]], eos_token_id=eos_token_id)
filtered_scores = no_bad_words_dist_proc(input_ids, scores.clone())
self.assertTrue(torch.allclose(scores, filtered_scores, atol=1e-3))
def test_processor_list(self):
batch_size = 4
sequence_length = 10
vocab_size = 15
eos_token_id = 0
# dummy input_ids and scores
input_ids = ids_tensor((batch_size, sequence_length), vocab_size)
input_ids_comp = input_ids.clone()
scores = self._get_uniform_logits(batch_size, vocab_size)
scores_comp = scores.clone()
# instantiate all dist processors
min_dist_proc = MinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id)
temp_dist_warp = TemperatureLogitsWarper(temperature=0.5)
rep_penalty_proc = RepetitionPenaltyLogitsProcessor(penalty=2.0)
top_k_warp = TopKLogitsWarper(3)
top_p_warp = TopPLogitsWarper(0.8)
no_repeat_proc = NoRepeatNGramLogitsProcessor(2)
no_bad_words_dist_proc = NoBadWordsLogitsProcessor(bad_words_ids=[[1]], eos_token_id=eos_token_id)
# no processor list
scores = min_dist_proc(input_ids, scores)
scores = temp_dist_warp(input_ids, scores)
scores = rep_penalty_proc(input_ids, scores)
scores = top_k_warp(input_ids, scores)
scores = top_p_warp(input_ids, scores)
scores = no_repeat_proc(input_ids, scores)
scores = no_bad_words_dist_proc(input_ids, scores)
# with processor list
processor = LogitsProcessorList(
[
min_dist_proc,
temp_dist_warp,
rep_penalty_proc,
top_k_warp,
top_p_warp,
no_repeat_proc,
no_bad_words_dist_proc,
]
)
scores_comp = processor(input_ids, scores_comp)
# scores should be equal
self.assertTrue(torch.allclose(scores, scores_comp, atol=1e-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist(), input_ids_comp.tolist())
def test_prefix_constrained_logits_processor(self):
vocab_size = 5
batch_size = 2
input_ids = torch.tensor([[0, 1, 3, 1], [0, 1, 0, 1]], device=torch_device, dtype=torch.long)
scores = self._get_uniform_logits(batch_size, vocab_size)
def prefix_allowed_tokens_fn(batch_id, inputs_ids):
return [[0, 1], [2, 3]][batch_id]
prefix_constrained_logits_proc = PrefixConstrainedLogitsProcessor(prefix_allowed_tokens_fn, 1)
filtered_scores = prefix_constrained_logits_proc(input_ids, scores.clone())
# batch 1: 1st, 2nd (0, 1) token are allowed
# batch 2: 3rd, 4th (2, 3) token are allowed
self.assertListEqual(
torch.isinf(filtered_scores).tolist(), [[False, False, True, True, True], [True, True, False, False, True]]
)
def test_hamming_diversity(self):
vocab_size = 4
num_beams = 2
num_beam_groups = 2
scores = self._get_uniform_logits(num_beams, vocab_size)
# batch_idx = 0 -> index batch_idx * num_beam_groups -> idx = 0 * 2 = 0 -> penalises tokens 1
# batch_idx = 1 -> index batch_idx * num_beam_groups -> idx = 1 * 2 = 2 -> penalises tokens 1
current_tokens = torch.tensor([0, 3, 1, 2], device=torch_device, dtype=torch.long)
diversity_logits_processor = HammingDiversityLogitsProcessor(
diversity_penalty=1.0, num_beams=num_beams, num_beam_groups=num_beam_groups
)
processed_scores = diversity_logits_processor(None, scores, current_tokens, 1)
self.assertTrue(
torch.allclose(
processed_scores[0], torch.tensor([-0.7500, 0.2500, 0.2500, 0.2500], device=torch_device), atol=1e-3
)
)
self.assertTrue(
torch.allclose(
processed_scores[1], torch.tensor([0.2500, -0.7500, 0.2500, 0.2500], device=torch_device), atol=1e-3
)
)
def test_forced_bos_token_logits_processor(self):
vocab_size = 20
batch_size = 4
bos_token_id = 0
logits_processor = ForcedBOSTokenLogitsProcessor(bos_token_id=bos_token_id)
# check that all scores are -inf except the bos_token_id score
input_ids = ids_tensor((batch_size, 1), vocab_size=20)
scores = self._get_uniform_logits(batch_size, vocab_size)
scores = logits_processor(input_ids, scores)
self.assertTrue(torch.isneginf(scores[:, bos_token_id + 1 :]).all())
self.assertListEqual(scores[:, bos_token_id].tolist(), 4 * [0]) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
input_ids = ids_tensor((batch_size, 4), vocab_size=20)
scores = self._get_uniform_logits(batch_size, vocab_size)
scores = logits_processor(input_ids, scores)
self.assertFalse(torch.isinf(scores).any())
def test_forced_eos_token_logits_processor(self):
vocab_size = 20
batch_size = 4
eos_token_id = 0
max_length = 5
logits_processor = ForcedEOSTokenLogitsProcessor(max_length=max_length, eos_token_id=eos_token_id)
# check that all scores are -inf except the eos_token_id when max_length is reached
input_ids = ids_tensor((batch_size, 4), vocab_size=20)
scores = self._get_uniform_logits(batch_size, vocab_size)
scores = logits_processor(input_ids, scores)
self.assertTrue(torch.isneginf(scores[:, eos_token_id + 1 :]).all())
self.assertListEqual(scores[:, eos_token_id].tolist(), 4 * [0]) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
input_ids = ids_tensor((batch_size, 3), vocab_size=20)
scores = self._get_uniform_logits(batch_size, vocab_size)
scores = logits_processor(input_ids, scores)
self.assertFalse(torch.isinf(scores).any())
def test_remove_nan_inf_logits_processor(self):
scores = torch.tensor(
[[0.0, 0.7, 0.8, float("nan")], [0.1, float("inf"), 0.3, float("-inf")]], device=torch_device
)
input_ids = ids_tensor((2, 4), vocab_size=20)
logits_processor = InfNanRemoveLogitsProcessor()
scores = logits_processor(input_ids, scores)
self.assertTrue(
torch.allclose(
scores,
torch.tensor(
[[0.0, 0.7, 0.8, 0.0], [0.1, torch.finfo(scores.dtype).max, 0.3, float("-inf")]],
device=torch_device,
),
atol=1e-6,
)
)
| 21,525 | 41.457594 | 122 | py |
robust-transformers | robust-transformers-main/tests/generation/test_generation_beam_search.py | # coding=utf-8
# Copyright 2020 The HuggingFace Team Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import floats_tensor, ids_tensor
if is_torch_available():
import torch
from transformers.generation_beam_constraints import DisjunctiveConstraint, PhrasalConstraint
from transformers.generation_beam_search import BeamHypotheses, BeamSearchScorer, ConstrainedBeamSearchScorer
class BeamSearchTester:
def __init__(
self,
parent,
batch_size=3,
sequence_length=10,
vocab_size=99,
pad_token_id=0,
max_length=20,
num_beams=4,
length_penalty=2.0,
do_early_stopping=True,
num_beam_hyps_to_keep=2,
):
self.parent = parent
self.batch_size = batch_size
self.sequence_length = sequence_length
self.vocab_size = vocab_size
self.pad_token_id = pad_token_id
self.max_length = max_length
self.num_beams = num_beams
self.length_penalty = length_penalty
self.do_early_stopping = do_early_stopping
self.num_beam_hyps_to_keep = num_beam_hyps_to_keep
# cannot be randomely generated
self.eos_token_id = vocab_size + 1
def prepare_beam_scorer(self, **kwargs):
return BeamSearchScorer(
batch_size=kwargs.get("batch_size", self.batch_size),
num_beams=kwargs.get("num_beams", self.num_beams),
device=torch_device,
length_penalty=kwargs.get("length_penalty", self.length_penalty),
do_early_stopping=kwargs.get("do_early_stopping", self.do_early_stopping),
num_beam_hyps_to_keep=kwargs.get("num_beam_hyps_to_keep", self.num_beam_hyps_to_keep),
)
def prepare_inputs(self):
input_ids = ids_tensor((self.batch_size * self.num_beams, self.sequence_length), self.vocab_size)
next_tokens = ids_tensor((self.batch_size, 2 * self.num_beams), self.vocab_size).to(torch_device)
next_indices = ids_tensor((self.batch_size, 2 * self.num_beams), self.num_beams).to(torch_device)
next_scores, _ = (-floats_tensor((self.batch_size, 2 * self.num_beams)).to(torch_device)).sort(descending=True)
return (input_ids, next_tokens, next_indices, next_scores)
def check_beam_hypotheses(self, input_ids, *args):
# check that correct number of beam hypotheses is set in beam scorer
beam_scorer = self.prepare_beam_scorer(do_early_stopping=True)
beam_hyp = beam_scorer._beam_hyps[0]
self.parent.assertEqual(len(beam_scorer._beam_hyps), self.batch_size)
# check correct type
self.parent.assertTrue(isinstance(beam_hyp, BeamHypotheses))
# check that num_beams is correctly set
self.parent.assertEqual(beam_hyp.num_beams, self.num_beams)
# check for early stopping deactivated
for beam_idx in range(self.num_beams):
beam_hyp.add(input_ids[beam_idx], -10.0)
# if early stopping True -> score does not matter
self.parent.assertTrue(beam_hyp.is_done(-10.0, 5))
# re-init
beam_scorer = self.prepare_beam_scorer(do_early_stopping=False)
beam_hyp = beam_scorer._beam_hyps[0]
# add `num_beams + 1` beams to change `worst_score`
for beam_idx in range(self.num_beams + 1):
beam_hyp.add(input_ids[beam_idx], -10.0 + float(beam_idx))
# -10.0 is removed => -9.0 is worst score
self.parent.assertAlmostEqual(beam_hyp.worst_score, -9.0 / (self.sequence_length**beam_hyp.length_penalty))
# -5.0 is better than worst score => should not be finished
self.parent.assertFalse(beam_hyp.is_done(-5.0, self.sequence_length))
# -20.0 is worse than worst score => should be finished
self.parent.assertTrue(beam_hyp.is_done(-20.0, self.sequence_length))
def check_beam_scorer_update(self, input_ids, next_tokens, next_indices, next_scores):
# check too many eos tokens
beam_scorer = self.prepare_beam_scorer()
tokens = next_tokens.clone()
tokens[0, :] = self.eos_token_id
with self.parent.assertRaises(ValueError):
beam_scorer.process(input_ids, next_scores, tokens, next_indices, eos_token_id=self.eos_token_id)
# check all batches are done
beam_scorer = self.prepare_beam_scorer()
tokens = next_tokens.clone()
tokens[:, : self.num_beams] = self.eos_token_id
beam_scorer.process(input_ids, next_scores, tokens, next_indices, eos_token_id=self.eos_token_id)
# beam scorer should be done
self.parent.assertTrue(beam_scorer.is_done)
# check
beam_scorer = self.prepare_beam_scorer()
tokens = next_tokens.clone()
tokens[:, 1] = self.eos_token_id
beam_outputs = beam_scorer.process(
input_ids, next_scores, tokens, next_indices, eos_token_id=self.eos_token_id
)
output_scores = beam_outputs["next_beam_scores"]
output_tokens = beam_outputs["next_beam_tokens"]
output_indices = beam_outputs["next_beam_indices"]
def cut_expected_tensor(tensor):
return torch.cat([tensor[:, :1], tensor[:, 2 : self.num_beams + 1]], dim=1).flatten()
# check all outptus
# cut out id of eos token and take best `num_beams` outputs
expected_output_tokens = cut_expected_tensor(tokens)
expected_output_scores = cut_expected_tensor(next_scores)
# add num_beams * batch_idx
expected_output_indices = (
cut_expected_tensor(next_indices)
+ (torch.arange(self.num_beams * self.batch_size, device=torch_device) // self.num_beams) * self.num_beams
)
self.parent.assertListEqual(expected_output_tokens.tolist(), output_tokens.tolist())
self.parent.assertListEqual(expected_output_indices.tolist(), output_indices.tolist())
self.parent.assertTrue(torch.allclose(expected_output_scores, output_scores, atol=1e-3))
# make sure ids of eos token are correctly saved in beam_hyps of beam scorer
for batch_idx in range(self.batch_size):
correct_idx = batch_idx * self.num_beams + next_indices[batch_idx, 1]
self.parent.assertListEqual(
input_ids[correct_idx].tolist(), beam_scorer._beam_hyps[batch_idx].beams[0][-1].tolist()
)
def check_beam_scores_finalize(self, input_ids, next_tokens, next_indices, next_scores):
# max_length should be only one more than current input_ids to check that eos is correctly appended
max_length = self.sequence_length + 1
beam_scorer = self.prepare_beam_scorer(num_beam_hyps_to_keep=1, length_penalty=1.0, do_early_stopping=False)
# update beams and append to input_ids
tokens = next_tokens.clone()
# first batch, first output has to finish with eos token id since scores are correctly sorted
tokens[0, 0] = self.eos_token_id
# make sure corresponding score is as good as possible to surely be picked first
next_scores[0, 0] = 0.0
beam_outputs = beam_scorer.process(
input_ids, next_scores, tokens, next_indices, eos_token_id=self.eos_token_id
)
output_scores = beam_outputs["next_beam_scores"]
output_tokens = beam_outputs["next_beam_tokens"]
output_indices = beam_outputs["next_beam_indices"]
input_ids = torch.cat([input_ids[output_indices, :], output_tokens.unsqueeze(-1)], dim=-1)
# finalize
sequence_output = beam_scorer.finalize(
input_ids,
output_scores,
output_tokens,
output_indices,
pad_token_id=self.pad_token_id,
eos_token_id=self.eos_token_id,
max_length=max_length,
)
sequences = sequence_output["sequences"]
sequence_scores = sequence_output["sequence_scores"]
# since `num_beam_hyps_to_keep` = 1 => only return `batch_size` x `max_length`
self.parent.assertListEqual(list(sequences.shape), [self.batch_size, max_length])
self.parent.assertListEqual(list(sequence_scores.shape), [self.batch_size])
# check sequence_scores
self.parent.assertFalse((sequence_scores > 0).any().item())
# first batch has to finish with eos_token
self.parent.assertEqual(sequences[0, -1].item(), self.eos_token_id)
# other batches cannot finish with eos token
self.parent.assertNotEqual(sequences[1, -1].item(), self.eos_token_id)
self.parent.assertNotEqual(sequences[2, -1].item(), self.eos_token_id)
# now test that if `num_beam_hyps_to_keep` is 3 => all beams are returned
beam_scorer.num_beam_hyps_to_keep = self.num_beams
sequence_output = beam_scorer.finalize(
input_ids,
output_scores,
output_tokens,
output_indices,
pad_token_id=self.pad_token_id,
eos_token_id=self.eos_token_id,
max_length=max_length,
)
sequences = sequence_output["sequences"]
sequence_scores = sequence_output["sequence_scores"]
self.parent.assertListEqual(list(sequences.shape), [self.num_beams * self.batch_size, max_length])
self.parent.assertListEqual(list(sequence_scores.shape), [self.num_beams * self.batch_size])
class ConstrainedBeamSearchTester:
def __init__(
self,
parent,
constraints=None,
batch_size=3,
sequence_length=10,
vocab_size=99,
pad_token_id=0,
max_length=20,
num_beams=4,
length_penalty=2.0,
do_early_stopping=True,
num_beam_hyps_to_keep=2,
):
self.parent = parent
self.batch_size = batch_size
self.sequence_length = sequence_length
self.vocab_size = vocab_size
self.pad_token_id = pad_token_id
self.max_length = max_length
self.num_beams = num_beams
self.length_penalty = length_penalty
self.do_early_stopping = do_early_stopping
self.num_beam_hyps_to_keep = num_beam_hyps_to_keep
if constraints is None:
force_tokens = torch.randint(10, 50, (1, 2))[0].tolist()
disjunctive_tokens = torch.randint(10, 50, (2, 2)).tolist()
constraints = [PhrasalConstraint(force_tokens), DisjunctiveConstraint(disjunctive_tokens)]
self.constraints = constraints
# cannot be randomely generated
self.eos_token_id = vocab_size + 1
def prepare_constrained_beam_scorer(self, **kwargs):
return ConstrainedBeamSearchScorer(
constraints=kwargs.get("constraints", self.constraints),
batch_size=kwargs.get("batch_size", self.batch_size),
num_beams=kwargs.get("num_beams", self.num_beams),
device=torch_device,
length_penalty=kwargs.get("length_penalty", self.length_penalty),
do_early_stopping=kwargs.get("do_early_stopping", self.do_early_stopping),
num_beam_hyps_to_keep=kwargs.get("num_beam_hyps_to_keep", self.num_beam_hyps_to_keep),
)
def prepare_inputs(self):
input_ids = ids_tensor((self.batch_size * self.num_beams, self.sequence_length), self.vocab_size)
next_tokens = ids_tensor((self.batch_size, 2 * self.num_beams), self.vocab_size).to(torch_device)
next_indices = ids_tensor((self.batch_size, 2 * self.num_beams), self.num_beams).to(torch_device)
next_scores, _ = (-floats_tensor((self.batch_size, 2 * self.num_beams)).to(torch_device)).sort(descending=True)
scores_for_all_vocab, _ = (
-floats_tensor((self.batch_size * self.num_beams, self.vocab_size)).to(torch_device)
).sort(descending=True)
return (input_ids, next_tokens, next_indices, next_scores, scores_for_all_vocab)
def check_beam_hypotheses(self, input_ids, *args):
# check that correct number of beam hypotheses is set in beam scorer
constrained_beam_scorer = self.prepare_constrained_beam_scorer(do_early_stopping=True)
beam_hyp = constrained_beam_scorer._beam_hyps[0]
self.parent.assertEqual(len(constrained_beam_scorer._beam_hyps), self.batch_size)
# check correct type
self.parent.assertTrue(isinstance(beam_hyp, BeamHypotheses))
# check that num_beams is correctly set
self.parent.assertEqual(beam_hyp.num_beams, self.num_beams)
# check for early stopping deactivated
for beam_idx in range(self.num_beams):
beam_hyp.add(input_ids[beam_idx], -10.0)
# if early stopping True -> score does not matter
self.parent.assertTrue(beam_hyp.is_done(-10.0, 5))
# re-init
constrained_beam_scorer = self.prepare_constrained_beam_scorer(do_early_stopping=False)
beam_hyp = constrained_beam_scorer._beam_hyps[0]
# add `num_beams + 1` beams to change `worst_score`
for beam_idx in range(self.num_beams + 1):
beam_hyp.add(input_ids[beam_idx], -10.0 + float(beam_idx))
# -10.0 is removed => -9.0 is worst score
self.parent.assertAlmostEqual(beam_hyp.worst_score, -9.0 / (self.sequence_length**beam_hyp.length_penalty))
# -5.0 is better than worst score => should not be finished
self.parent.assertFalse(beam_hyp.is_done(-5.0, self.sequence_length))
# -20.0 is worse than worst score => should be finished
self.parent.assertTrue(beam_hyp.is_done(-20.0, self.sequence_length))
def check_constrained_beam_scorer_update(
self, input_ids, next_tokens, next_indices, next_scores, scores_for_all_vocab
):
# check too many eos tokens
constrained_beam_scorer = self.prepare_constrained_beam_scorer()
stacked_token_ids = []
for constraint in self.constraints:
token_ids = constraint.token_ids
token_ids = token_ids[0] if isinstance(token_ids[0], list) else token_ids
stacked_token_ids = stacked_token_ids + token_ids
fulfilling_sequence = torch.LongTensor(stacked_token_ids)
fulfill_len = fulfilling_sequence.size(0)
input_ids[:, :fulfill_len] = fulfilling_sequence
tokens = next_tokens.clone()
tokens[0, :] = self.eos_token_id
with self.parent.assertRaises(ValueError):
constrained_beam_scorer.process(
input_ids, next_scores, tokens, next_indices, scores_for_all_vocab, eos_token_id=self.eos_token_id
)
# check all batches are done
constrained_beam_scorer = self.prepare_constrained_beam_scorer()
tokens = next_tokens.clone()
tokens[:, : self.num_beams] = self.eos_token_id
constrained_beam_scorer.process(
input_ids, next_scores, tokens, next_indices, scores_for_all_vocab, eos_token_id=self.eos_token_id
)
# beam scorer should be done
self.parent.assertTrue(constrained_beam_scorer.is_done)
# check
constrained_beam_scorer = self.prepare_constrained_beam_scorer()
tokens = next_tokens.clone()
tokens[:, 1] = self.eos_token_id
beam_outputs = constrained_beam_scorer.process(
input_ids, next_scores, tokens, next_indices, scores_for_all_vocab, eos_token_id=self.eos_token_id
)
output_scores = beam_outputs["next_beam_scores"]
output_tokens = beam_outputs["next_beam_tokens"]
output_indices = beam_outputs["next_beam_indices"]
def cut_expected_tensor(tensor):
return torch.cat([tensor[:, :1], tensor[:, 2 : self.num_beams + 1]], dim=1).flatten()
# check all outptus
# cut out id of eos token and take best `num_beams` outputs
expected_output_tokens = cut_expected_tensor(tokens)
expected_output_scores = cut_expected_tensor(next_scores)
# add num_beams * batch_idx
expected_output_indices = (
cut_expected_tensor(next_indices)
+ (torch.arange(self.num_beams * self.batch_size, device=torch_device) // self.num_beams) * self.num_beams
)
self.parent.assertListEqual(expected_output_tokens.tolist(), output_tokens.tolist())
self.parent.assertListEqual(expected_output_indices.tolist(), output_indices.tolist())
self.parent.assertTrue(torch.allclose(expected_output_scores, output_scores, atol=1e-3))
# make sure ids of eos token are correctly saved in beam_hyps of beam scorer
for batch_idx in range(self.batch_size):
correct_idx = batch_idx * self.num_beams + next_indices[batch_idx, 1]
self.parent.assertListEqual(
input_ids[correct_idx].tolist(), constrained_beam_scorer._beam_hyps[batch_idx].beams[0][-1].tolist()
)
def check_constrained_beam_scorer_finalize(
self, input_ids, next_tokens, next_indices, next_scores, scores_for_all_vocab
):
# max_length should be only one more than current input_ids to check that eos is correctly appended
max_length = self.sequence_length + 1
# for testing finalize, we do want to have fulfilled constraints
stacked_token_ids = []
for constraint in self.constraints:
token_ids = constraint.token_ids
token_ids = token_ids[0] if isinstance(token_ids[0], list) else token_ids
stacked_token_ids = stacked_token_ids + token_ids
fulfilling_sequence = torch.LongTensor(stacked_token_ids)
fulfill_len = fulfilling_sequence.size(0)
input_ids[:, :fulfill_len] = fulfilling_sequence
constrained_beam_scorer = self.prepare_constrained_beam_scorer(
num_beam_hyps_to_keep=1, length_penalty=1.0, do_early_stopping=False
)
constraints = constrained_beam_scorer.constraints
# update beams and append to input_ids
tokens = next_tokens.clone()
# first batch, first output has to finish with eos token id since scores are correctly sorted
tokens[0, 0] = self.eos_token_id
# make sure corresponding score is as good as possible to surely be picked first
next_scores[0, 0] = 0.0
beam_outputs = constrained_beam_scorer.process(
input_ids, next_scores, tokens, next_indices, scores_for_all_vocab, eos_token_id=self.eos_token_id
)
output_scores = beam_outputs["next_beam_scores"]
output_tokens = beam_outputs["next_beam_tokens"]
output_indices = beam_outputs["next_beam_indices"]
input_ids = torch.cat([input_ids[output_indices, :], output_tokens.unsqueeze(-1)], dim=-1)
# finalize
sequence_output = constrained_beam_scorer.finalize(
input_ids,
output_scores,
output_tokens,
output_indices,
pad_token_id=self.pad_token_id,
eos_token_id=self.eos_token_id,
max_length=max_length,
)
sequences = sequence_output["sequences"]
sequence_scores = sequence_output["sequence_scores"]
# since `num_beam_hyps_to_keep` = 1 => only return `batch_size` x `max_length`
self.parent.assertListEqual(list(sequences.shape), [self.batch_size, max_length])
self.parent.assertListEqual(list(sequence_scores.shape), [self.batch_size])
# check sequence_scores
self.parent.assertFalse((sequence_scores > 0).any().item())
# first batch has to finish with eos_token
self.parent.assertEqual(sequences[0, -1].item(), self.eos_token_id)
# other batches cannot finish with eos token
self.parent.assertNotEqual(sequences[1, -1].item(), self.eos_token_id)
self.parent.assertNotEqual(sequences[2, -1].item(), self.eos_token_id)
# test that the constraint is indeed fulfilled
for (output, constraint) in [(s, c) for s in sequences for c in constraints]:
forced_token_ids = constraint.token_ids
if isinstance(forced_token_ids[0], list):
# disjunctive case
flag = False
for token_ids in forced_token_ids:
if self._check_sequence_inside_sequence(output, token_ids):
flag = True
break
self.parent.assertEqual(flag, True)
else:
self.parent.assertEqual(self._check_sequence_inside_sequence(output, forced_token_ids), True)
# now test that if `num_beam_hyps_to_keep` is 3 => all beams are returned
# constrained_beam_scorer.num_beam_hyps_to_keep = self.num_beams
constrained_beam_scorer = self.prepare_constrained_beam_scorer(
num_beam_hyps_to_keep=self.num_beams, length_penalty=1.0, do_early_stopping=False
)
sequence_output = constrained_beam_scorer.finalize(
input_ids,
output_scores,
output_tokens,
output_indices,
pad_token_id=self.pad_token_id,
eos_token_id=self.eos_token_id,
max_length=max_length,
)
sequences = sequence_output["sequences"]
sequence_scores = sequence_output["sequence_scores"]
self.parent.assertListEqual(list(sequences.shape), [self.num_beams * self.batch_size, max_length])
self.parent.assertListEqual(list(sequence_scores.shape), [self.num_beams * self.batch_size])
def _check_sequence_inside_sequence(self, tensor_1, tensor_2):
# check if tensor_1 inside tensor_2 or tensor_2 inside tensor_1.
# set to same device. we don't care what device.
if not isinstance(tensor_1, list):
tensor_1 = tensor_1.cpu().tolist()
if not isinstance(tensor_2, list):
tensor_2 = tensor_2.cpu().tolist()
in_order = len(tensor_1) <= len(tensor_2)
longer = tensor_2 if in_order else tensor_1
shorter = tensor_1 if in_order else tensor_2
flag = False
chunk_size = len(shorter)
for chunk_idx in range(len(longer) - chunk_size + 1):
subseq = longer[chunk_idx : chunk_idx + chunk_size]
if subseq == shorter:
flag = True
break
return flag
@require_torch
class BeamSearchTest(unittest.TestCase):
def setUp(self):
self.beam_search_tester = BeamSearchTester(self)
def test_beam_hypotheses(self):
inputs = self.beam_search_tester.prepare_inputs()
self.beam_search_tester.check_beam_hypotheses(*inputs)
def test_beam_scorer_update(self):
inputs = self.beam_search_tester.prepare_inputs()
self.beam_search_tester.check_beam_scorer_update(*inputs)
def test_beam_scorer_finalize(self):
inputs = self.beam_search_tester.prepare_inputs()
self.beam_search_tester.check_beam_scores_finalize(*inputs)
@require_torch
class ConstrainedBeamSearchTest(unittest.TestCase):
def setUp(self):
self.constrained_beam_search_tester = ConstrainedBeamSearchTester(self)
def test_constrained_beam_hypotheses(self):
inputs = self.constrained_beam_search_tester.prepare_inputs()
self.constrained_beam_search_tester.check_beam_hypotheses(*inputs)
def test_constrained_beam_scorer_update(self):
inputs = self.constrained_beam_search_tester.prepare_inputs()
self.constrained_beam_search_tester.check_constrained_beam_scorer_update(*inputs)
def test_constrained_beam_scorer_finalize(self):
inputs = self.constrained_beam_search_tester.prepare_inputs()
self.constrained_beam_search_tester.check_constrained_beam_scorer_finalize(*inputs)
| 24,325 | 42.439286 | 119 | py |
robust-transformers | robust-transformers-main/tests/generation/test_generation_beam_constraints.py | # coding=utf-8
# Copyright 2020 The HuggingFace Team Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation_beam_constraints import DisjunctiveConstraint
@require_torch
class ConstraintTest(unittest.TestCase):
def test_input_types(self):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
cset = [[1, 2, 4], [1, 2, 3, 4]]
dc = DisjunctiveConstraint(cset)
self.assertTrue(isinstance(dc.token_ids, list))
with self.assertRaises(ValueError):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]]))
with self.assertRaises(ValueError):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4]), torch.LongTensor([1, 2, 3, 4, 5])])
def test_check_illegal_input(self):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
cset = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(ValueError):
DisjunctiveConstraint(cset) # fails here
def test_example_progression(self):
cset = [[1, 2, 3], [1, 2, 4]]
dc = DisjunctiveConstraint(cset)
stepped, completed, reset = dc.update(1)
desired = stepped is True and completed is False and reset is False
self.assertTrue(desired)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
stepped, completed, reset = dc.update(2)
desired = stepped is True and completed is False and reset is False
self.assertTrue(desired)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
stepped, completed, reset = dc.update(3)
desired = stepped is True and completed is True and reset is False
self.assertTrue(desired)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3])
def test_example_progression_unequal_three_mid_and_reset(self):
cset = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
dc = DisjunctiveConstraint(cset)
stepped, completed, reset = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
stepped, completed, reset = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
stepped, completed, reset = dc.update(4)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2, 4])
stepped, completed, reset = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5])
dc.reset()
stepped, completed, reset = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 3)
self.assertTrue(dc.current_seq == [1])
stepped, completed, reset = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 2)
self.assertTrue(dc.current_seq == [1, 2])
stepped, completed, reset = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.remaining() == 0)
self.assertTrue(dc.current_seq == [1, 2, 5])
| 4,445 | 37.327586 | 118 | py |
robust-transformers | robust-transformers-main/tests/vilt/test_feature_extraction_vilt.py | # coding=utf-8
# Copyright 2021 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ..test_feature_extraction_common import FeatureExtractionSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViltFeatureExtractor
class ViltFeatureExtractionTester(unittest.TestCase):
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=30,
size_divisor=2,
do_normalize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.size_divisor = size_divisor
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
def prepare_feat_extract_dict(self):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def get_expected_values(self, image_inputs, batched=False):
"""
This function computes the expected height and width when providing images to ViltFeatureExtractor,
assuming do_resize is set to True with a scalar size and size_divisor.
"""
if not batched:
image = image_inputs[0]
if isinstance(image, Image.Image):
w, h = image.size
else:
h, w = image.shape[1], image.shape[2]
scale = self.size / min(w, h)
if h < w:
newh, neww = self.size, scale * w
else:
newh, neww = scale * h, self.size
max_size = int((1333 / 800) * self.size)
if max(newh, neww) > max_size:
scale = max_size / max(newh, neww)
newh = newh * scale
neww = neww * scale
newh, neww = int(newh + 0.5), int(neww + 0.5)
expected_height, expected_width = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
expected_values = []
for image in image_inputs:
expected_height, expected_width = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
expected_height = max(expected_values, key=lambda item: item[0])[0]
expected_width = max(expected_values, key=lambda item: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class ViltFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase):
feature_extraction_class = ViltFeatureExtractor if is_vision_available() else None
def setUp(self):
self.feature_extract_tester = ViltFeatureExtractionTester(self)
@property
def feat_extract_dict(self):
return self.feature_extract_tester.prepare_feat_extract_dict()
def test_feat_extract_properties(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(feature_extractor, "image_mean"))
self.assertTrue(hasattr(feature_extractor, "image_std"))
self.assertTrue(hasattr(feature_extractor, "do_normalize"))
self.assertTrue(hasattr(feature_extractor, "do_resize"))
self.assertTrue(hasattr(feature_extractor, "size"))
self.assertTrue(hasattr(feature_extractor, "size_divisor"))
def test_batch_feature(self):
pass
def test_call_pil(self):
# Initialize feature_extractor
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
# create random PIL images
image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
# Test not batched input
encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values
expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs)
self.assertEqual(
encoded_images.shape,
(1, self.feature_extract_tester.num_channels, expected_height, expected_width),
)
# Test batched
encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values
expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs, batched=True)
self.assertEqual(
encoded_images.shape,
(
self.feature_extract_tester.batch_size,
self.feature_extract_tester.num_channels,
expected_height,
expected_width,
),
)
def test_call_numpy(self):
# Initialize feature_extractor
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
# create random numpy tensors
image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
# Test not batched input
encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values
expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs)
self.assertEqual(
encoded_images.shape,
(1, self.feature_extract_tester.num_channels, expected_height, expected_width),
)
# Test batched
encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values
expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs, batched=True)
self.assertEqual(
encoded_images.shape,
(
self.feature_extract_tester.batch_size,
self.feature_extract_tester.num_channels,
expected_height,
expected_width,
),
)
def test_call_pytorch(self):
# Initialize feature_extractor
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
# create random PyTorch tensors
image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True)
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
# Test not batched input
encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values
expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs)
self.assertEqual(
encoded_images.shape,
(1, self.feature_extract_tester.num_channels, expected_height, expected_width),
)
# Test batched
encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values
expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs, batched=True)
self.assertEqual(
encoded_images.shape,
(
self.feature_extract_tester.batch_size,
self.feature_extract_tester.num_channels,
expected_height,
expected_width,
),
)
def test_equivalence_pad_and_create_pixel_mask(self):
# Initialize feature_extractors
feature_extractor_1 = self.feature_extraction_class(**self.feat_extract_dict)
feature_extractor_2 = self.feature_extraction_class(do_resize=False, do_normalize=False)
# create random PyTorch tensors
image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True)
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
# Test whether the method "pad_and_return_pixel_mask" and calling the feature extractor return the same tensors
encoded_images_with_method = feature_extractor_1.pad_and_create_pixel_mask(image_inputs, return_tensors="pt")
encoded_images = feature_extractor_2(image_inputs, return_tensors="pt")
self.assertTrue(
torch.allclose(encoded_images_with_method["pixel_values"], encoded_images["pixel_values"], atol=1e-4)
)
self.assertTrue(
torch.allclose(encoded_images_with_method["pixel_mask"], encoded_images["pixel_mask"], atol=1e-4)
)
| 9,796 | 37.876984 | 119 | py |
robust-transformers | robust-transformers-main/tests/vilt/test_modeling_vilt.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch ViLT model. """
import unittest
from datasets import load_dataset
from transformers import ViltConfig, is_torch_available, is_vision_available
from transformers.file_utils import cached_property
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ..test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltModel,
)
from transformers.models.vilt.modeling_vilt import VILT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViltProcessor
class ViltModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
image_size=30,
patch_size=2,
num_channels=3,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
scope=None,
modality_type_vocab_size=2,
add_multiple_images=False,
num_images=-1,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.scope = scope
self.modality_type_vocab_size = modality_type_vocab_size
self.add_multiple_images = add_multiple_images
self.num_images = num_images
# we set the expected sequence length (which is used in several tests)
# this is equal to the seq length of the text tokens + number of image patches + 1 for the CLS token
self.expected_seq_len = self.seq_length + (self.image_size // self.patch_size) ** 2 + 1
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
if self.add_multiple_images:
pixel_values = floats_tensor([self.batch_size, 2, self.num_channels, self.image_size, self.image_size])
else:
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
if self.use_labels:
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
config = self.get_config()
return (config, input_ids, token_type_ids, input_mask, pixel_values, token_labels)
def get_config(self):
return ViltConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
num_labels=self.num_labels,
modality_type_vocab_size=self.modality_type_vocab_size,
num_images=self.num_images,
)
def create_and_check_model(
self,
config,
input_ids,
token_type_ids,
input_mask,
pixel_values,
token_labels,
):
model = ViltModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, pixel_values=pixel_values)
result = model(input_ids, token_type_ids=token_type_ids, pixel_values=pixel_values)
result = model(input_ids, pixel_values=pixel_values)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.expected_seq_len, self.hidden_size)
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
pixel_values,
token_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
"pixel_values": pixel_values,
}
return config, inputs_dict
def prepare_pixel_values(self):
return floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
@require_torch
class ViltModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
ViltModel,
ViltForQuestionAnswering,
ViltForImageAndTextRetrieval,
ViltForMaskedLM,
)
if is_torch_available()
else ()
)
test_pruning = False
test_headmasking = False
test_torchscript = False
# ViltForMaskedLM, ViltForQuestionAnswering and ViltForImagesAndTextClassification require special treatment
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
# if model_class.__name__ == "ViltForNaturalLanguageVisualReasonining":
# inputs_dict["pixel_values"] = floats_tensor([self.model_tester.batch_size, self.model_tester.num_images, self.model_tester.num_channels, self.model_tester.image_size, self.model_tester.image_size])
if return_labels:
if model_class.__name__ == "ViltForQuestionAnswering":
inputs_dict["labels"] = torch.zeros(
self.model_tester.batch_size, self.model_tester.num_labels, device=torch_device
)
elif model_class.__name__ == "ViltForMaskedLM":
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
)
elif model_class.__name__ == "ViltForImagesAndTextClassification":
inputs_dict["labels"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
return inputs_dict
def setUp(self):
self.model_tester = ViltModelTester(self)
self.config_tester = ConfigTester(self, config_class=ViltConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_training(self):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
if model_class.__name__ == "ViltForImagesAndTextClassification":
config.modality_type_vocab_size = 3
# ViltForImageAndTextRetrieval doesn't support training for now
if model_class in [*get_values(MODEL_MAPPING), ViltForImageAndTextRetrieval]:
continue
model = model_class(config)
model.to(torch_device)
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
for k, v in inputs.items():
print(k, v.shape)
loss = model(**inputs).loss
loss.backward()
def test_training_gradient_checkpointing(self):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.use_cache = False
config.return_dict = True
# ViltForImageAndTextRetrieval doesn't support training for now
if (
model_class in [*get_values(MODEL_MAPPING), ViltForImageAndTextRetrieval]
or not model_class.supports_gradient_checkpointing
):
continue
model = model_class(config)
model.to(torch_device)
model.gradient_checkpointing_enable()
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
loss = model(**inputs).loss
loss.backward()
@unittest.skip(
reason="""VilT samples image tokens from a multinomial distribution, resulting in not deterministic
hidden states"""
)
def test_save_load(self):
pass
@unittest.skip(
reason="""VilT samples image tokens from a multinomial distribution, resulting in not deterministic
hidden states"""
)
def test_determinism(self):
pass
@unittest.skip(
reason="""VilT samples image tokens from a multinomial distribution, resulting in not deterministic
hidden states"""
)
def test_model_outputs_equivalence(self):
pass
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
seq_len = getattr(self.model_tester, "expected_seq_len", None)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
if model_class.__name__ == "ViltForImagesAndTextClassification":
# attentions are a list of length num_images
# each element contains the attentions of a particular image index
self.assertEqual(len(attentions), self.model_tester.num_images)
self.assertEqual(len(attentions[0]), self.model_tester.num_hidden_layers)
else:
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
if model_class.__name__ == "ViltForImagesAndTextClassification":
# attentions are a list of length num_images
# each element contains the attentions of a particular image index
self.assertEqual(len(attentions), self.model_tester.num_images)
self.assertEqual(len(attentions[0]), self.model_tester.num_hidden_layers)
else:
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
if model_class.__name__ == "ViltForImagesAndTextClassification":
self.assertListEqual(
list(attentions[0][0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_len, seq_len],
)
else:
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_len, seq_len],
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(out_len + 1, len(outputs))
self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
if model_class.__name__ == "ViltForImagesAndTextClassification":
self.assertEqual(len(self_attentions), self.model_tester.num_images)
self.assertEqual(len(self_attentions[0]), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0][0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_len, seq_len],
)
else:
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_len, seq_len],
)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
if model_class.__name__ == "ViltForImagesAndTextClassification":
# hidden_states are a list of length num_images
# each element contains the hidden states of a particular image index
self.assertEqual(len(hidden_states), self.model_tester.num_images)
self.assertEqual(len(hidden_states[0]), expected_num_layers)
else:
self.assertEqual(len(hidden_states), expected_num_layers)
seq_length = self.model_tester.expected_seq_len
if model_class.__name__ == "ViltForImagesAndTextClassification":
self.assertListEqual(
list(hidden_states[0][0].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)
else:
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
print("Model class:", model_class)
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_retain_grad_hidden_states_attentions(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
# no need to test all models as different heads yield the same functionality
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
inputs = self._prepare_for_class(inputs_dict, model_class)
outputs = model(**inputs)
output = outputs[0]
# Encoder-/Decoder-only models
hidden_states = outputs.hidden_states[0]
attentions = outputs.attentions[0]
if model_class.__name__ == "ViltForImagesAndTextClassification":
# hidden_states are a list of length num_images
# each element contains the hidden states of a particular image index
hidden_states[0].retain_grad()
attentions[0].retain_grad()
else:
hidden_states.retain_grad()
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=True)
if model_class.__name__ == "ViltForImagesAndTextClassification":
# hidden_states are a list of length num_images
# each element contains the hidden states of a particular image index
self.assertIsNotNone(hidden_states[0].grad)
self.assertIsNotNone(attentions[0].grad)
else:
self.assertIsNotNone(hidden_states.grad)
self.assertIsNotNone(attentions.grad)
@slow
def test_model_from_pretrained(self):
for model_name in VILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = ViltModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_torch
class ViltForImagesAndTextClassificationModelTest(ViltModelTest, unittest.TestCase):
all_model_classes = (ViltForImagesAndTextClassification,) if is_torch_available() else ()
def setUp(self):
self.model_tester = ViltModelTester(self, modality_type_vocab_size=3, add_multiple_images=True, num_images=2)
self.config_tester = ConfigTester(self, config_class=ViltConfig, hidden_size=37)
@unittest.skip("We only test the model that takes in multiple images")
def test_model(self):
pass
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
class ViltModelIntegrationTest(unittest.TestCase):
@cached_property
def default_processor(self):
return ViltProcessor.from_pretrained("dandelin/vilt-b32-finetuned-vqa") if is_vision_available() else None
@slow
def test_inference_masked_lm(self):
model = ViltForMaskedLM.from_pretrained("dandelin/vilt-b32-mlm").to(torch_device)
processor = self.default_processor
image = prepare_img()
text = "a bunch of [MASK] laying on a [MASK]."
inputs = processor(image, text, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
expected_shape = torch.Size([1, 11, 30522])
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([-12.5061, -12.5123, -12.5174]).to(torch_device)
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3], expected_slice, atol=1e-4))
# verify masked token prediction equals "cats"
predicted_id = outputs.logits[0, 4, :].argmax(-1).item()
assert processor.decode([predicted_id]) == "cats"
@slow
def test_inference_visual_question_answering(self):
model = ViltForQuestionAnswering.from_pretrained("dandelin/vilt-b32-finetuned-vqa").to(torch_device)
processor = self.default_processor
image = prepare_img()
text = "How many cats are there?"
inputs = processor(image, text, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
expected_shape = torch.Size((1, 3129))
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([-15.9495, -18.1472, -10.3041]).to(torch_device)
self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
# compute loss
vqa_labels = [[2, 3, 155, 800]]
vqa_scores = [[1.0, 0.3, 0.3, 0.3]]
labels = torch.zeros(1, model.config.num_labels).to(torch_device)
for i, (labels_example, scores_example) in enumerate(zip(vqa_labels, vqa_scores)):
for l, s in zip(labels_example, scores_example):
labels[i, l] = s
# forward pass
outputs = model(**inputs, labels=labels)
# verify we have a positive loss
self.assertTrue(outputs.loss > 0)
@slow
def test_inference_natural_language_visual_reasoning(self):
model = ViltForImagesAndTextClassification.from_pretrained("dandelin/vilt-b32-finetuned-nlvr2").to(
torch_device
)
processor = self.default_processor
dataset = load_dataset("hf-internal-testing/fixtures_nlvr2", split="test")
image1 = Image.open(dataset[0]["file"]).convert("RGB")
image2 = Image.open(dataset[1]["file"]).convert("RGB")
text = "The left image contains twice the number of dogs as the right image, and at least two dogs in total are standing."
encoding_1 = processor(image1, text, return_tensors="pt")
encoding_2 = processor(image2, text, return_tensors="pt")
pixel_values = torch.stack([encoding_1.pixel_values, encoding_2.pixel_values], dim=1)
# forward pass
outputs = model(
input_ids=encoding_1.input_ids.to(torch_device),
pixel_values=pixel_values.to(torch_device),
)
# verify the logits
expected_shape = torch.Size([1, 2])
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([-2.4013, 2.9342]).to(torch_device)
self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
| 24,361 | 39.069079 | 211 | py |
robust-transformers | robust-transformers-main/tests/xlm_roberta_xl/test_modeling_xlm_roberta_xl.py | # coding=utf-8
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import XLMRobertaXLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ..generation.test_generation_utils import GenerationTesterMixin
from ..test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
)
from transformers.models.xlm_roberta_xl.modeling_xlm_roberta_xl import (
XLMRobertaXLEmbeddings,
create_position_ids_from_input_ids,
)
class XLMRobertaXLModelTester:
def __init__(
self,
parent,
):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.is_training = True
self.use_input_mask = True
self.use_token_type_ids = True
self.use_labels = True
self.vocab_size = 99
self.hidden_size = 32
self.num_hidden_layers = 5
self.num_attention_heads = 4
self.intermediate_size = 37
self.hidden_act = "gelu"
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 512
self.type_vocab_size = 16
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.scope = None
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def get_config(self):
return XLMRobertaXLConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
)
def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = self.prepare_config_and_inputs()
config.is_decoder = True
encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def create_and_check_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = XLMRobertaXLModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_model_as_decoder(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
config.add_cross_attention = True
model = XLMRobertaXLModel(config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
)
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
encoder_hidden_states=encoder_hidden_states,
)
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_for_causal_lm(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
model = XLMRobertaXLForCausalLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_decoder_model_past_large_inputs(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
config.is_decoder = True
config.add_cross_attention = True
model = XLMRobertaXLForCausalLM(config=config).to(torch_device).eval()
# make sure that ids don't start with pad token
mask = input_ids.ne(config.pad_token_id).long()
input_ids = input_ids * mask
# first forward pass
outputs = model(
input_ids,
attention_mask=input_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=True,
)
past_key_values = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
# make sure that ids don't start with pad token
mask = next_tokens.ne(config.pad_token_id).long()
next_tokens = next_tokens * mask
next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([input_mask, next_mask], dim=-1)
output_from_no_past = model(
next_input_ids,
attention_mask=next_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_hidden_states=True,
)["hidden_states"][0]
output_from_past = model(
next_tokens,
attention_mask=next_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
output_hidden_states=True,
)["hidden_states"][0]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = XLMRobertaXLForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = XLMRobertaXLForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_for_multiple_choice(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = XLMRobertaXLForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
result = model(
multiple_choice_inputs_ids,
attention_mask=multiple_choice_input_mask,
token_type_ids=multiple_choice_token_type_ids,
labels=choice_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def create_and_check_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = XLMRobertaXLForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class XLMRobertaXLModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (
(
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLModel,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
)
if is_torch_available()
else ()
)
all_generative_model_classes = (XLMRobertaXLForCausalLM,) if is_torch_available() else ()
def setUp(self):
self.model_tester = XLMRobertaXLModelTester(self)
self.config_tester = ConfigTester(self, config_class=XLMRobertaXLConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_model_various_embeddings(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
config_and_inputs[0].position_embedding_type = type
self.model_tester.create_and_check_model(*config_and_inputs)
def test_model_as_decoder(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*config_and_inputs)
def test_model_as_decoder_with_default_input_mask(self):
# This regression test was failing with PyTorch < 1.3
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
input_mask = None
self.model_tester.create_and_check_model_as_decoder(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def test_for_causal_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*config_and_inputs)
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
def test_create_position_ids_respects_padding_index(self):
"""Ensure that the default position ids only assign a sequential . This is a regression
test for https://github.com/huggingface/transformers/issues/1761
The position ids should be masked with the embedding object's padding index. Therefore, the
first available non-padding position index is XLMRobertaXLEmbeddings.padding_idx + 1
"""
config = self.model_tester.prepare_config_and_inputs()[0]
model = XLMRobertaXLEmbeddings(config=config)
input_ids = torch.as_tensor([[12, 31, 13, model.padding_idx]])
expected_positions = torch.as_tensor(
[[0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx]]
)
position_ids = create_position_ids_from_input_ids(input_ids, model.padding_idx)
self.assertEqual(position_ids.shape, expected_positions.shape)
self.assertTrue(torch.all(torch.eq(position_ids, expected_positions)))
def test_create_position_ids_from_inputs_embeds(self):
"""Ensure that the default position ids only assign a sequential . This is a regression
test for https://github.com/huggingface/transformers/issues/1761
The position ids should be masked with the embedding object's padding index. Therefore, the
first available non-padding position index is XLMRobertaXLEmbeddings.padding_idx + 1
"""
config = self.model_tester.prepare_config_and_inputs()[0]
embeddings = XLMRobertaXLEmbeddings(config=config)
inputs_embeds = torch.empty(2, 4, 30)
expected_single_positions = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
expected_positions = torch.as_tensor([expected_single_positions, expected_single_positions])
position_ids = embeddings.create_position_ids_from_inputs_embeds(inputs_embeds)
self.assertEqual(position_ids.shape, expected_positions.shape)
self.assertTrue(torch.all(torch.eq(position_ids, expected_positions)))
@require_torch
class XLMRobertaModelXLIntegrationTest(unittest.TestCase):
@slow
def test_xlm_roberta_xl(self):
model = XLMRobertaXLModel.from_pretrained("facebook/xlm-roberta-xl").to(torch_device)
input_ids = torch.tensor(
[[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]], device=torch_device
)
# The dog is cute and lives in the garden house
expected_output_shape = torch.Size((1, 12, 2560)) # batch_size, sequence_length, embedding_vector_dim
expected_output_values_last_dim = torch.tensor(
[[0.0110, 0.0605, 0.0354, 0.0689, 0.0066, 0.0691, 0.0302, 0.0412, 0.0860, 0.0036, 0.0405, 0.0170]],
device=torch_device,
)
output = model(input_ids)["last_hidden_state"].detach()
self.assertEqual(output.shape, expected_output_shape)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1], expected_output_values_last_dim, atol=1e-3))
@unittest.skip(reason="Model is too large to be tested on the CI")
def test_xlm_roberta_xxl(self):
model = XLMRobertaXLModel.from_pretrained("facebook/xlm-roberta-xxl").to(torch_device)
input_ids = torch.tensor(
[[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]], device=torch_device
)
# The dog is cute and lives in the garden house
expected_output_shape = torch.Size((1, 12, 4096)) # batch_size, sequence_length, embedding_vector_dim
expected_output_values_last_dim = torch.tensor(
[[0.0046, 0.0146, 0.0227, 0.0126, 0.0219, 0.0175, -0.0101, 0.0006, 0.0124, 0.0209, -0.0063, 0.0096]],
device=torch_device,
)
output = model(input_ids)["last_hidden_state"].detach()
self.assertEqual(output.shape, expected_output_shape)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1], expected_output_values_last_dim, atol=1e-3))
| 20,809 | 39.803922 | 117 | py |
robust-transformers | robust-transformers-main/tests/luke/test_modeling_luke.py | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch LUKE model. """
import unittest
from transformers import LukeConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ..test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeModel,
LukeTokenizer,
)
from transformers.models.luke.modeling_luke import LUKE_PRETRAINED_MODEL_ARCHIVE_LIST
class LukeModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
entity_length=3,
mention_length=5,
use_attention_mask=True,
use_token_type_ids=True,
use_entity_ids=True,
use_entity_attention_mask=True,
use_entity_token_type_ids=True,
use_entity_position_ids=True,
use_labels=True,
vocab_size=99,
entity_vocab_size=10,
entity_emb_size=6,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_entity_classification_labels=9,
num_entity_pair_classification_labels=6,
num_entity_span_classification_labels=4,
use_entity_aware_attention=True,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.entity_length = entity_length
self.mention_length = mention_length
self.use_attention_mask = use_attention_mask
self.use_token_type_ids = use_token_type_ids
self.use_entity_ids = use_entity_ids
self.use_entity_attention_mask = use_entity_attention_mask
self.use_entity_token_type_ids = use_entity_token_type_ids
self.use_entity_position_ids = use_entity_position_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.entity_vocab_size = entity_vocab_size
self.entity_emb_size = entity_emb_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_entity_classification_labels = num_entity_classification_labels
self.num_entity_pair_classification_labels = num_entity_pair_classification_labels
self.num_entity_span_classification_labels = num_entity_span_classification_labels
self.scope = scope
self.use_entity_aware_attention = use_entity_aware_attention
self.encoder_seq_length = seq_length
self.key_length = seq_length
self.num_hidden_states_types = 2 # hidden_states and entity_hidden_states
def prepare_config_and_inputs(self):
# prepare words
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
attention_mask = None
if self.use_attention_mask:
attention_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
# prepare entities
entity_ids = ids_tensor([self.batch_size, self.entity_length], self.entity_vocab_size)
entity_attention_mask = None
if self.use_entity_attention_mask:
entity_attention_mask = random_attention_mask([self.batch_size, self.entity_length])
entity_token_type_ids = None
if self.use_token_type_ids:
entity_token_type_ids = ids_tensor([self.batch_size, self.entity_length], self.type_vocab_size)
entity_position_ids = None
if self.use_entity_position_ids:
entity_position_ids = ids_tensor(
[self.batch_size, self.entity_length, self.mention_length], self.mention_length
)
sequence_labels = None
labels = None
entity_labels = None
entity_classification_labels = None
entity_pair_classification_labels = None
entity_span_classification_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
entity_labels = ids_tensor([self.batch_size, self.entity_length], self.entity_vocab_size)
entity_classification_labels = ids_tensor([self.batch_size], self.num_entity_classification_labels)
entity_pair_classification_labels = ids_tensor(
[self.batch_size], self.num_entity_pair_classification_labels
)
entity_span_classification_labels = ids_tensor(
[self.batch_size, self.entity_length], self.num_entity_span_classification_labels
)
config = self.get_config()
return (
config,
input_ids,
attention_mask,
token_type_ids,
entity_ids,
entity_attention_mask,
entity_token_type_ids,
entity_position_ids,
sequence_labels,
labels,
entity_labels,
entity_classification_labels,
entity_pair_classification_labels,
entity_span_classification_labels,
)
def get_config(self):
return LukeConfig(
vocab_size=self.vocab_size,
entity_vocab_size=self.entity_vocab_size,
entity_emb_size=self.entity_emb_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
use_entity_aware_attention=self.use_entity_aware_attention,
)
def create_and_check_model(
self,
config,
input_ids,
attention_mask,
token_type_ids,
entity_ids,
entity_attention_mask,
entity_token_type_ids,
entity_position_ids,
sequence_labels,
labels,
entity_labels,
entity_classification_labels,
entity_pair_classification_labels,
entity_span_classification_labels,
):
model = LukeModel(config=config)
model.to(torch_device)
model.eval()
# test with words + entities
result = model(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
entity_ids=entity_ids,
entity_attention_mask=entity_attention_mask,
entity_token_type_ids=entity_token_type_ids,
entity_position_ids=entity_position_ids,
)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(
result.entity_last_hidden_state.shape, (self.batch_size, self.entity_length, self.hidden_size)
)
# test with words only
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_masked_lm(
self,
config,
input_ids,
attention_mask,
token_type_ids,
entity_ids,
entity_attention_mask,
entity_token_type_ids,
entity_position_ids,
sequence_labels,
labels,
entity_labels,
entity_classification_labels,
entity_pair_classification_labels,
entity_span_classification_labels,
):
config.num_labels = self.num_entity_classification_labels
model = LukeForMaskedLM(config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
entity_ids=entity_ids,
entity_attention_mask=entity_attention_mask,
entity_token_type_ids=entity_token_type_ids,
entity_position_ids=entity_position_ids,
labels=labels,
entity_labels=entity_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(
result.entity_logits.shape, (self.batch_size, self.entity_length, self.entity_vocab_size)
)
def create_and_check_for_entity_classification(
self,
config,
input_ids,
attention_mask,
token_type_ids,
entity_ids,
entity_attention_mask,
entity_token_type_ids,
entity_position_ids,
sequence_labels,
labels,
entity_labels,
entity_classification_labels,
entity_pair_classification_labels,
entity_span_classification_labels,
):
config.num_labels = self.num_entity_classification_labels
model = LukeForEntityClassification(config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
entity_ids=entity_ids,
entity_attention_mask=entity_attention_mask,
entity_token_type_ids=entity_token_type_ids,
entity_position_ids=entity_position_ids,
labels=entity_classification_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_entity_classification_labels))
def create_and_check_for_entity_pair_classification(
self,
config,
input_ids,
attention_mask,
token_type_ids,
entity_ids,
entity_attention_mask,
entity_token_type_ids,
entity_position_ids,
sequence_labels,
labels,
entity_labels,
entity_classification_labels,
entity_pair_classification_labels,
entity_span_classification_labels,
):
config.num_labels = self.num_entity_pair_classification_labels
model = LukeForEntityClassification(config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
entity_ids=entity_ids,
entity_attention_mask=entity_attention_mask,
entity_token_type_ids=entity_token_type_ids,
entity_position_ids=entity_position_ids,
labels=entity_pair_classification_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_entity_pair_classification_labels))
def create_and_check_for_entity_span_classification(
self,
config,
input_ids,
attention_mask,
token_type_ids,
entity_ids,
entity_attention_mask,
entity_token_type_ids,
entity_position_ids,
sequence_labels,
labels,
entity_labels,
entity_classification_labels,
entity_pair_classification_labels,
entity_span_classification_labels,
):
config.num_labels = self.num_entity_span_classification_labels
model = LukeForEntitySpanClassification(config)
model.to(torch_device)
model.eval()
entity_start_positions = ids_tensor([self.batch_size, self.entity_length], self.seq_length)
entity_end_positions = ids_tensor([self.batch_size, self.entity_length], self.seq_length)
result = model(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
entity_ids=entity_ids,
entity_attention_mask=entity_attention_mask,
entity_token_type_ids=entity_token_type_ids,
entity_position_ids=entity_position_ids,
entity_start_positions=entity_start_positions,
entity_end_positions=entity_end_positions,
labels=entity_span_classification_labels,
)
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.entity_length, self.num_entity_span_classification_labels)
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
attention_mask,
token_type_ids,
entity_ids,
entity_attention_mask,
entity_token_type_ids,
entity_position_ids,
sequence_labels,
labels,
entity_labels,
entity_classification_labels,
entity_pair_classification_labels,
entity_span_classification_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
"entity_ids": entity_ids,
"entity_token_type_ids": entity_token_type_ids,
"entity_attention_mask": entity_attention_mask,
"entity_position_ids": entity_position_ids,
}
return config, inputs_dict
@require_torch
class LukeModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
LukeModel,
LukeForMaskedLM,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
)
if is_torch_available()
else ()
)
test_pruning = False
test_torchscript = False
test_resize_embeddings = True
test_head_masking = True
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if model_class == LukeForEntitySpanClassification:
inputs_dict["entity_start_positions"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.entity_length), dtype=torch.long, device=torch_device
)
inputs_dict["entity_end_positions"] = torch.ones(
(self.model_tester.batch_size, self.model_tester.entity_length), dtype=torch.long, device=torch_device
)
if return_labels:
if model_class in (LukeForEntityClassification, LukeForEntityPairClassification):
inputs_dict["labels"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
elif model_class == LukeForEntitySpanClassification:
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.entity_length),
dtype=torch.long,
device=torch_device,
)
elif model_class == LukeForMaskedLM:
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length),
dtype=torch.long,
device=torch_device,
)
inputs_dict["entity_labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.entity_length),
dtype=torch.long,
device=torch_device,
)
return inputs_dict
def setUp(self):
self.model_tester = LukeModelTester(self)
self.config_tester = ConfigTester(self, config_class=LukeConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in LUKE_PRETRAINED_MODEL_ARCHIVE_LIST:
model = LukeModel.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_entity_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_entity_classification(*config_and_inputs)
def test_for_entity_pair_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_entity_pair_classification(*config_and_inputs)
def test_for_entity_span_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_entity_span_classification(*config_and_inputs)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
seq_length = self.model_tester.seq_length
entity_length = self.model_tester.entity_length
key_length = seq_length + entity_length
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_length + entity_length, key_length],
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
added_hidden_states = self.model_tester.num_hidden_states_types
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_length + entity_length, key_length],
)
def test_entity_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
entity_hidden_states = outputs.entity_hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(entity_hidden_states), expected_num_layers)
entity_length = self.model_tester.entity_length
self.assertListEqual(
list(entity_hidden_states[0].shape[-2:]),
[entity_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_retain_grad_entity_hidden_states(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
# no need to test all models as different heads yield the same functionality
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
inputs = self._prepare_for_class(inputs_dict, model_class)
outputs = model(**inputs)
output = outputs[0]
entity_hidden_states = outputs.entity_hidden_states[0]
entity_hidden_states.retain_grad()
output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(entity_hidden_states.grad)
@require_torch
class LukeModelIntegrationTests(unittest.TestCase):
@slow
def test_inference_base_model(self):
model = LukeModel.from_pretrained("studio-ousia/luke-base").eval()
model.to(torch_device)
tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base", task="entity_classification")
text = "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the new world number one avoid a humiliating second- round exit at Wimbledon ."
span = (39, 42)
encoding = tokenizer(text, entity_spans=[span], add_prefix_space=True, return_tensors="pt")
# move all values to device
for key, value in encoding.items():
encoding[key] = encoding[key].to(torch_device)
outputs = model(**encoding)
# Verify word hidden states
expected_shape = torch.Size((1, 42, 768))
self.assertEqual(outputs.last_hidden_state.shape, expected_shape)
expected_slice = torch.tensor(
[[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]]
).to(torch_device)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
# Verify entity hidden states
expected_shape = torch.Size((1, 1, 768))
self.assertEqual(outputs.entity_last_hidden_state.shape, expected_shape)
expected_slice = torch.tensor([[0.1457, 0.1044, 0.0174]]).to(torch_device)
self.assertTrue(torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
@slow
def test_inference_large_model(self):
model = LukeModel.from_pretrained("studio-ousia/luke-large").eval()
model.to(torch_device)
tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-large", task="entity_classification")
text = "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the new world number one avoid a humiliating second- round exit at Wimbledon ."
span = (39, 42)
encoding = tokenizer(text, entity_spans=[span], add_prefix_space=True, return_tensors="pt")
# move all values to device
for key, value in encoding.items():
encoding[key] = encoding[key].to(torch_device)
outputs = model(**encoding)
# Verify word hidden states
expected_shape = torch.Size((1, 42, 1024))
self.assertEqual(outputs.last_hidden_state.shape, expected_shape)
expected_slice = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]]
).to(torch_device)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
# Verify entity hidden states
expected_shape = torch.Size((1, 1, 1024))
self.assertEqual(outputs.entity_last_hidden_state.shape, expected_shape)
expected_slice = torch.tensor([[0.0466, -0.0106, -0.0179]]).to(torch_device)
self.assertTrue(torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
| 26,798 | 38.179825 | 199 | py |
robust-transformers | robust-transformers-main/tests/luke/test_tokenization_luke.py | # coding=utf-8
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from os.path import dirname
from typing import Tuple
from transformers import AddedToken, LukeTokenizer
from transformers.testing_utils import require_torch, slow
from ..test_tokenization_common import TokenizerTesterMixin
SAMPLE_VOCAB = os.path.join(dirname(dirname(os.path.abspath(__file__))), "fixtures/vocab.json")
SAMPLE_MERGE_FILE = os.path.join(dirname(dirname(os.path.abspath(__file__))), "fixtures/merges.txt")
SAMPLE_ENTITY_VOCAB = os.path.join(dirname(dirname(os.path.abspath(__file__))), "fixtures/test_entity_vocab.json")
class LukeTokenizerTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = LukeTokenizer
test_rust_tokenizer = False
from_pretrained_kwargs = {"cls_token": "<s>"}
def setUp(self):
super().setUp()
self.special_tokens_map = {"entity_token_1": "<ent>", "entity_token_2": "<ent2>"}
def get_tokenizer(self, task=None, **kwargs):
kwargs.update(self.special_tokens_map)
tokenizer = LukeTokenizer(
vocab_file=SAMPLE_VOCAB,
merges_file=SAMPLE_MERGE_FILE,
entity_vocab_file=SAMPLE_ENTITY_VOCAB,
task=task,
**kwargs,
)
tokenizer.sanitize_special_tokens()
return tokenizer
def get_input_output_texts(self, tokenizer):
input_text = "lower newer"
output_text = "lower newer"
return input_text, output_text
def test_full_tokenizer(self):
tokenizer = self.get_tokenizer()
text = "lower newer"
bpe_tokens = ["l", "o", "w", "er", "Ġ", "n", "e", "w", "er"]
tokens = tokenizer.tokenize(text) # , add_prefix_space=True)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = tokens + [tokenizer.unk_token]
input_bpe_tokens = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
@slow
def test_sequence_builders(self):
tokenizer = self.tokenizer_class.from_pretrained("studio-ousia/luke-large")
text = tokenizer.encode("sequence builders", add_special_tokens=False)
text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False)
encoded_text_from_decode = tokenizer.encode(
"sequence builders", add_special_tokens=True, add_prefix_space=False
)
encoded_pair_from_decode = tokenizer.encode(
"sequence builders", "multi-sequence build", add_special_tokens=True, add_prefix_space=False
)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
self.assertEqual(encoded_sentence, encoded_text_from_decode)
self.assertEqual(encoded_pair, encoded_pair_from_decode)
def get_clean_sequence(self, tokenizer, max_length=20) -> Tuple[str, list]:
txt = "Beyonce lives in Los Angeles"
ids = tokenizer.encode(txt, add_special_tokens=False)
return txt, ids
def test_space_encoding(self):
tokenizer = self.get_tokenizer()
sequence = "Encode this sequence."
space_encoding = tokenizer.byte_encoder[" ".encode("utf-8")[0]]
# Testing encoder arguments
encoded = tokenizer.encode(sequence, add_special_tokens=False, add_prefix_space=False)
first_char = tokenizer.convert_ids_to_tokens(encoded[0])[0]
self.assertNotEqual(first_char, space_encoding)
encoded = tokenizer.encode(sequence, add_special_tokens=False, add_prefix_space=True)
first_char = tokenizer.convert_ids_to_tokens(encoded[0])[0]
self.assertEqual(first_char, space_encoding)
tokenizer.add_special_tokens({"bos_token": "<s>"})
encoded = tokenizer.encode(sequence, add_special_tokens=True)
first_char = tokenizer.convert_ids_to_tokens(encoded[1])[0]
self.assertNotEqual(first_char, space_encoding)
# Testing spaces after special tokens
mask = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(mask, lstrip=True, rstrip=False)}
) # mask token has a left space
mask_ind = tokenizer.convert_tokens_to_ids(mask)
sequence = "Encode <mask> sequence"
sequence_nospace = "Encode <mask>sequence"
encoded = tokenizer.encode(sequence)
mask_loc = encoded.index(mask_ind)
first_char = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0]
self.assertEqual(first_char, space_encoding)
encoded = tokenizer.encode(sequence_nospace)
mask_loc = encoded.index(mask_ind)
first_char = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0]
self.assertNotEqual(first_char, space_encoding)
def test_pretokenized_inputs(self):
pass
def test_embeded_special_tokens(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest("{} ({})".format(tokenizer.__class__.__name__, pretrained_name)):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
sentence = "A, <mask> AllenNLP sentence."
tokens_r = tokenizer_r.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True)
tokens_p = tokenizer_p.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True)
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"]), sum(tokens_p["token_type_ids"]))
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"]), sum(tokens_p["token_type_ids"]))
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_p["attention_mask"]) / len(tokens_p["attention_mask"]),
)
tokens_p_str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"])
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(
tokens_p_str, ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"]
)
def test_padding_entity_inputs(self):
tokenizer = self.get_tokenizer()
sentence = "Japanese is an East Asian language spoken by about 128 million people, primarily in Japan."
span = (15, 34)
pad_id = tokenizer.entity_vocab["[PAD]"]
mask_id = tokenizer.entity_vocab["[MASK]"]
encoding = tokenizer([sentence, sentence], entity_spans=[[span], [span, span]], padding=True)
self.assertEqual(encoding["entity_ids"], [[mask_id, pad_id], [mask_id, mask_id]])
# test with a sentence with no entity
encoding = tokenizer([sentence, sentence], entity_spans=[[], [span, span]], padding=True)
self.assertEqual(encoding["entity_ids"], [[pad_id, pad_id], [mask_id, mask_id]])
def test_if_tokenize_single_text_raise_error_with_invalid_inputs(self):
tokenizer = self.get_tokenizer()
sentence = "Japanese is an East Asian language spoken by about 128 million people, primarily in Japan."
spans = [(15, 34)]
entities = ["East Asian language"]
with self.assertRaises(ValueError):
tokenizer(sentence, entities=tuple(entities), entity_spans=spans)
with self.assertRaises(ValueError):
tokenizer(sentence, entities=entities, entity_spans=tuple(spans))
with self.assertRaises(ValueError):
tokenizer(sentence, entities=[0], entity_spans=spans)
with self.assertRaises(ValueError):
tokenizer(sentence, entities=entities, entity_spans=[0])
with self.assertRaises(ValueError):
tokenizer(sentence, entities=entities, entity_spans=spans + [(0, 9)])
def test_if_tokenize_entity_classification_raise_error_with_invalid_inputs(self):
tokenizer = self.get_tokenizer(task="entity_classification")
sentence = "Japanese is an East Asian language spoken by about 128 million people, primarily in Japan."
span = (15, 34)
with self.assertRaises(ValueError):
tokenizer(sentence, entity_spans=[])
with self.assertRaises(ValueError):
tokenizer(sentence, entity_spans=[span, span])
with self.assertRaises(ValueError):
tokenizer(sentence, entity_spans=[0])
def test_if_tokenize_entity_pair_classification_raise_error_with_invalid_inputs(self):
tokenizer = self.get_tokenizer(task="entity_pair_classification")
sentence = "Japanese is an East Asian language spoken by about 128 million people, primarily in Japan."
# head and tail information
with self.assertRaises(ValueError):
tokenizer(sentence, entity_spans=[])
with self.assertRaises(ValueError):
tokenizer(sentence, entity_spans=[0, 0])
def test_if_tokenize_entity_span_classification_raise_error_with_invalid_inputs(self):
tokenizer = self.get_tokenizer(task="entity_span_classification")
sentence = "Japanese is an East Asian language spoken by about 128 million people, primarily in Japan."
with self.assertRaises(ValueError):
tokenizer(sentence, entity_spans=[])
with self.assertRaises(ValueError):
tokenizer(sentence, entity_spans=[0, 0, 0])
@slow
@require_torch
class LukeTokenizerIntegrationTests(unittest.TestCase):
tokenizer_class = LukeTokenizer
from_pretrained_kwargs = {"cls_token": "<s>"}
def setUp(self):
super().setUp()
def test_single_text_no_padding_or_truncation(self):
tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base", return_token_type_ids=True)
sentence = "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck."
entities = ["Ana Ivanovic", "Thursday", "Dummy Entity"]
spans = [(9, 21), (30, 38), (39, 42)]
encoding = tokenizer(sentence, entities=entities, entity_spans=spans, return_token_type_ids=True)
self.assertEqual(
tokenizer.decode(encoding["input_ids"], spaces_between_special_tokens=False),
"<s>Top seed Ana Ivanovic said on Thursday she could hardly believe her luck.</s>",
)
self.assertEqual(
tokenizer.decode(encoding["input_ids"][3:6], spaces_between_special_tokens=False), " Ana Ivanovic"
)
self.assertEqual(
tokenizer.decode(encoding["input_ids"][8:9], spaces_between_special_tokens=False), " Thursday"
)
self.assertEqual(tokenizer.decode(encoding["input_ids"][9:10], spaces_between_special_tokens=False), " she")
self.assertEqual(
encoding["entity_ids"],
[
tokenizer.entity_vocab["Ana Ivanovic"],
tokenizer.entity_vocab["Thursday"],
tokenizer.entity_vocab["[UNK]"],
],
)
self.assertEqual(encoding["entity_attention_mask"], [1, 1, 1])
self.assertEqual(encoding["entity_token_type_ids"], [0, 0, 0])
# fmt: off
self.assertEqual(
encoding["entity_position_ids"],
[
[3, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
]
)
# fmt: on
def test_single_text_only_entity_spans_no_padding_or_truncation(self):
tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base", return_token_type_ids=True)
sentence = "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck."
spans = [(9, 21), (30, 38), (39, 42)]
encoding = tokenizer(sentence, entity_spans=spans, return_token_type_ids=True)
self.assertEqual(
tokenizer.decode(encoding["input_ids"], spaces_between_special_tokens=False),
"<s>Top seed Ana Ivanovic said on Thursday she could hardly believe her luck.</s>",
)
self.assertEqual(
tokenizer.decode(encoding["input_ids"][3:6], spaces_between_special_tokens=False), " Ana Ivanovic"
)
self.assertEqual(
tokenizer.decode(encoding["input_ids"][8:9], spaces_between_special_tokens=False), " Thursday"
)
self.assertEqual(tokenizer.decode(encoding["input_ids"][9:10], spaces_between_special_tokens=False), " she")
mask_id = tokenizer.entity_vocab["[MASK]"]
self.assertEqual(encoding["entity_ids"], [mask_id, mask_id, mask_id])
self.assertEqual(encoding["entity_attention_mask"], [1, 1, 1])
self.assertEqual(encoding["entity_token_type_ids"], [0, 0, 0])
# fmt: off
self.assertEqual(
encoding["entity_position_ids"],
[
[3, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, ],
[9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, ]
]
)
# fmt: on
def test_single_text_padding_pytorch_tensors(self):
tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base", return_token_type_ids=True)
sentence = "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck."
entities = ["Ana Ivanovic", "Thursday", "Dummy Entity"]
spans = [(9, 21), (30, 38), (39, 42)]
encoding = tokenizer(
sentence,
entities=entities,
entity_spans=spans,
return_token_type_ids=True,
padding="max_length",
max_length=30,
max_entity_length=16,
return_tensors="pt",
)
# test words
self.assertEqual(encoding["input_ids"].shape, (1, 30))
self.assertEqual(encoding["attention_mask"].shape, (1, 30))
self.assertEqual(encoding["token_type_ids"].shape, (1, 30))
# test entities
self.assertEqual(encoding["entity_ids"].shape, (1, 16))
self.assertEqual(encoding["entity_attention_mask"].shape, (1, 16))
self.assertEqual(encoding["entity_token_type_ids"].shape, (1, 16))
self.assertEqual(encoding["entity_position_ids"].shape, (1, 16, tokenizer.max_mention_length))
def test_text_pair_no_padding_or_truncation(self):
tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base", return_token_type_ids=True)
sentence = "Top seed Ana Ivanovic said on Thursday"
sentence_pair = "She could hardly believe her luck."
entities = ["Ana Ivanovic", "Thursday"]
entities_pair = ["Dummy Entity"]
spans = [(9, 21), (30, 38)]
spans_pair = [(0, 3)]
encoding = tokenizer(
sentence,
sentence_pair,
entities=entities,
entities_pair=entities_pair,
entity_spans=spans,
entity_spans_pair=spans_pair,
return_token_type_ids=True,
)
self.assertEqual(
tokenizer.decode(encoding["input_ids"], spaces_between_special_tokens=False),
"<s>Top seed Ana Ivanovic said on Thursday</s></s>She could hardly believe her luck.</s>",
)
self.assertEqual(
tokenizer.decode(encoding["input_ids"][3:6], spaces_between_special_tokens=False), " Ana Ivanovic"
)
self.assertEqual(
tokenizer.decode(encoding["input_ids"][8:9], spaces_between_special_tokens=False), " Thursday"
)
self.assertEqual(tokenizer.decode(encoding["input_ids"][11:12], spaces_between_special_tokens=False), "She")
self.assertEqual(
encoding["entity_ids"],
[
tokenizer.entity_vocab["Ana Ivanovic"],
tokenizer.entity_vocab["Thursday"],
tokenizer.entity_vocab["[UNK]"],
],
)
self.assertEqual(encoding["entity_attention_mask"], [1, 1, 1])
self.assertEqual(encoding["entity_token_type_ids"], [0, 0, 0])
# fmt: off
self.assertEqual(
encoding["entity_position_ids"],
[
[3, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
]
)
# fmt: on
def test_text_pair_only_entity_spans_no_padding_or_truncation(self):
tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base", return_token_type_ids=True)
sentence = "Top seed Ana Ivanovic said on Thursday"
sentence_pair = "She could hardly believe her luck."
spans = [(9, 21), (30, 38)]
spans_pair = [(0, 3)]
encoding = tokenizer(
sentence,
sentence_pair,
entity_spans=spans,
entity_spans_pair=spans_pair,
return_token_type_ids=True,
)
self.assertEqual(
tokenizer.decode(encoding["input_ids"], spaces_between_special_tokens=False),
"<s>Top seed Ana Ivanovic said on Thursday</s></s>She could hardly believe her luck.</s>",
)
self.assertEqual(
tokenizer.decode(encoding["input_ids"][3:6], spaces_between_special_tokens=False), " Ana Ivanovic"
)
self.assertEqual(
tokenizer.decode(encoding["input_ids"][8:9], spaces_between_special_tokens=False), " Thursday"
)
self.assertEqual(tokenizer.decode(encoding["input_ids"][11:12], spaces_between_special_tokens=False), "She")
mask_id = tokenizer.entity_vocab["[MASK]"]
self.assertEqual(encoding["entity_ids"], [mask_id, mask_id, mask_id])
self.assertEqual(encoding["entity_attention_mask"], [1, 1, 1])
self.assertEqual(encoding["entity_token_type_ids"], [0, 0, 0])
# fmt: off
self.assertEqual(
encoding["entity_position_ids"],
[
[3, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
]
)
# fmt: on
def test_text_pair_padding_pytorch_tensors(self):
tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base", return_token_type_ids=True)
sentence = "Top seed Ana Ivanovic said on Thursday"
sentence_pair = "She could hardly believe her luck."
entities = ["Ana Ivanovic", "Thursday"]
entities_pair = ["Dummy Entity"]
spans = [(9, 21), (30, 38)]
spans_pair = [(0, 3)]
encoding = tokenizer(
sentence,
sentence_pair,
entities=entities,
entities_pair=entities_pair,
entity_spans=spans,
entity_spans_pair=spans_pair,
return_token_type_ids=True,
padding="max_length",
max_length=30,
max_entity_length=16,
return_tensors="pt",
)
# test words
self.assertEqual(encoding["input_ids"].shape, (1, 30))
self.assertEqual(encoding["attention_mask"].shape, (1, 30))
self.assertEqual(encoding["token_type_ids"].shape, (1, 30))
# test entities
self.assertEqual(encoding["entity_ids"].shape, (1, 16))
self.assertEqual(encoding["entity_attention_mask"].shape, (1, 16))
self.assertEqual(encoding["entity_token_type_ids"].shape, (1, 16))
self.assertEqual(encoding["entity_position_ids"].shape, (1, 16, tokenizer.max_mention_length))
def test_entity_classification_no_padding_or_truncation(self):
tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base", task="entity_classification")
sentence = "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the new world number one avoid a humiliating second- round exit at Wimbledon ."
span = (39, 42)
encoding = tokenizer(sentence, entity_spans=[span], return_token_type_ids=True)
# test words
self.assertEqual(len(encoding["input_ids"]), 42)
self.assertEqual(len(encoding["attention_mask"]), 42)
self.assertEqual(len(encoding["token_type_ids"]), 42)
self.assertEqual(
tokenizer.decode(encoding["input_ids"], spaces_between_special_tokens=False),
"<s>Top seed Ana Ivanovic said on Thursday<ent> she<ent> could hardly believe her luck as a fortuitous netcord helped the new world number one avoid a humiliating second- round exit at Wimbledon.</s>",
)
self.assertEqual(
tokenizer.decode(encoding["input_ids"][9:12], spaces_between_special_tokens=False), "<ent> she<ent>"
)
# test entities
self.assertEqual(encoding["entity_ids"], [2])
self.assertEqual(encoding["entity_attention_mask"], [1])
self.assertEqual(encoding["entity_token_type_ids"], [0])
# fmt: off
self.assertEqual(
encoding["entity_position_ids"],
[
[9, 10, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1]
]
)
# fmt: on
def test_entity_classification_padding_pytorch_tensors(self):
tokenizer = LukeTokenizer.from_pretrained(
"studio-ousia/luke-base", task="entity_classification", return_token_type_ids=True
)
sentence = "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the new world number one avoid a humiliating second- round exit at Wimbledon ."
# entity information
span = (39, 42)
encoding = tokenizer(
sentence, entity_spans=[span], return_token_type_ids=True, padding="max_length", return_tensors="pt"
)
# test words
self.assertEqual(encoding["input_ids"].shape, (1, 512))
self.assertEqual(encoding["attention_mask"].shape, (1, 512))
self.assertEqual(encoding["token_type_ids"].shape, (1, 512))
# test entities
self.assertEqual(encoding["entity_ids"].shape, (1, 1))
self.assertEqual(encoding["entity_attention_mask"].shape, (1, 1))
self.assertEqual(encoding["entity_token_type_ids"].shape, (1, 1))
self.assertEqual(
encoding["entity_position_ids"].shape, (1, tokenizer.max_entity_length, tokenizer.max_mention_length)
)
def test_entity_pair_classification_no_padding_or_truncation(self):
tokenizer = LukeTokenizer.from_pretrained(
"studio-ousia/luke-base", task="entity_pair_classification", return_token_type_ids=True
)
sentence = "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck."
# head and tail information
spans = [(9, 21), (39, 42)]
encoding = tokenizer(sentence, entity_spans=spans, return_token_type_ids=True)
self.assertEqual(
tokenizer.decode(encoding["input_ids"], spaces_between_special_tokens=False),
"<s>Top seed<ent> Ana Ivanovic<ent> said on Thursday<ent2> she<ent2> could hardly believe her luck.</s>",
)
self.assertEqual(
tokenizer.decode(encoding["input_ids"][3:8], spaces_between_special_tokens=False),
"<ent> Ana Ivanovic<ent>",
)
self.assertEqual(
tokenizer.decode(encoding["input_ids"][11:14], spaces_between_special_tokens=False), "<ent2> she<ent2>"
)
self.assertEqual(encoding["entity_ids"], [2, 3])
self.assertEqual(encoding["entity_attention_mask"], [1, 1])
self.assertEqual(encoding["entity_token_type_ids"], [0, 0])
# fmt: off
self.assertEqual(
encoding["entity_position_ids"],
[
[3, 4, 5, 6, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[11, 12, 13, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
]
)
# fmt: on
def test_entity_pair_classification_padding_pytorch_tensors(self):
tokenizer = LukeTokenizer.from_pretrained(
"studio-ousia/luke-base", task="entity_pair_classification", return_token_type_ids=True
)
sentence = "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck."
# head and tail information
spans = [(9, 21), (39, 42)]
encoding = tokenizer(
sentence,
entity_spans=spans,
return_token_type_ids=True,
padding="max_length",
max_length=30,
return_tensors="pt",
)
# test words
self.assertEqual(encoding["input_ids"].shape, (1, 30))
self.assertEqual(encoding["attention_mask"].shape, (1, 30))
self.assertEqual(encoding["token_type_ids"].shape, (1, 30))
# test entities
self.assertEqual(encoding["entity_ids"].shape, (1, 2))
self.assertEqual(encoding["entity_attention_mask"].shape, (1, 2))
self.assertEqual(encoding["entity_token_type_ids"].shape, (1, 2))
self.assertEqual(
encoding["entity_position_ids"].shape, (1, tokenizer.max_entity_length, tokenizer.max_mention_length)
)
def test_entity_span_classification_no_padding_or_truncation(self):
tokenizer = LukeTokenizer.from_pretrained(
"studio-ousia/luke-base", task="entity_span_classification", return_token_type_ids=True
)
sentence = "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck."
spans = [(0, 8), (9, 21), (39, 42)]
encoding = tokenizer(sentence, entity_spans=spans, return_token_type_ids=True)
self.assertEqual(
tokenizer.decode(encoding["input_ids"], spaces_between_special_tokens=False),
"<s>Top seed Ana Ivanovic said on Thursday she could hardly believe her luck.</s>",
)
self.assertEqual(encoding["entity_ids"], [2, 2, 2])
self.assertEqual(encoding["entity_attention_mask"], [1, 1, 1])
self.assertEqual(encoding["entity_token_type_ids"], [0, 0, 0])
# fmt: off
self.assertEqual(
encoding["entity_position_ids"],
[
[1, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[3, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
]
)
# fmt: on
self.assertEqual(encoding["entity_start_positions"], [1, 3, 9])
self.assertEqual(encoding["entity_end_positions"], [2, 5, 9])
def test_entity_span_classification_padding_pytorch_tensors(self):
tokenizer = LukeTokenizer.from_pretrained(
"studio-ousia/luke-base", task="entity_span_classification", return_token_type_ids=True
)
sentence = "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck."
spans = [(0, 8), (9, 21), (39, 42)]
encoding = tokenizer(
sentence,
entity_spans=spans,
return_token_type_ids=True,
padding="max_length",
max_length=30,
max_entity_length=16,
return_tensors="pt",
)
# test words
self.assertEqual(encoding["input_ids"].shape, (1, 30))
self.assertEqual(encoding["attention_mask"].shape, (1, 30))
self.assertEqual(encoding["token_type_ids"].shape, (1, 30))
# test entities
self.assertEqual(encoding["entity_ids"].shape, (1, 16))
self.assertEqual(encoding["entity_attention_mask"].shape, (1, 16))
self.assertEqual(encoding["entity_token_type_ids"].shape, (1, 16))
self.assertEqual(encoding["entity_position_ids"].shape, (1, 16, tokenizer.max_mention_length))
self.assertEqual(encoding["entity_start_positions"].shape, (1, 16))
self.assertEqual(encoding["entity_end_positions"].shape, (1, 16))
| 30,244 | 44.549699 | 213 | py |
robust-transformers | robust-transformers-main/tests/vit_mae/test_modeling_vit_mae.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch ViTMAE model. """
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ..test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST, to_2tuple
if is_vision_available():
from PIL import Image
from transformers import ViTFeatureExtractor
class ViTMAEModelTester:
def __init__(
self,
parent,
batch_size=13,
image_size=30,
patch_size=2,
num_channels=3,
is_training=True,
use_labels=True,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
type_sequence_label_size=10,
initializer_range=0.02,
num_labels=3,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.use_labels = use_labels
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.scope = scope
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return ViTMAEConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
is_decoder=False,
initializer_range=self.initializer_range,
)
def create_and_check_model(self, config, pixel_values, labels):
model = ViTMAEModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
image_size = to_2tuple(self.image_size)
patch_size = to_2tuple(self.patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
expected_seq_len = int(math.ceil((1 - config.mask_ratio) * (num_patches + 1)))
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, self.hidden_size))
def create_and_check_for_pretraining(self, config, pixel_values, labels):
model = ViTMAEForPreTraining(config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# expected sequence length = num_patches
image_size = to_2tuple(self.image_size)
patch_size = to_2tuple(self.patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
expected_seq_len = num_patches
expected_num_channels = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape, (self.batch_size, expected_seq_len, expected_num_channels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
pixel_values,
labels,
) = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class ViTMAEModelTest(ModelTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as ViTMAE does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
test_pruning = False
test_torchscript = False
test_resize_embeddings = False
test_head_masking = False
def setUp(self):
self.model_tester = ViTMAEModelTester(self)
self.config_tester = ConfigTester(self, config_class=ViTMAEConfig, has_text_modality=False, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_inputs_embeds(self):
# ViTMAE does not use inputs_embeds
pass
def test_model_common_attributes(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_pretraining(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*config_and_inputs)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
# in ViTMAE, the seq_len equals (number of patches + 1) * (1 - mask_ratio), rounded above
image_size = to_2tuple(self.model_tester.image_size)
patch_size = to_2tuple(self.model_tester.patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
seq_len = int(math.ceil((1 - config.mask_ratio) * (num_patches + 1)))
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
chunk_length = getattr(self.model_tester, "chunk_length", None)
if chunk_length is not None and hasattr(self.model_tester, "num_hashes"):
encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]),
[self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length],
)
else:
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if hasattr(self.model_tester, "num_hidden_states_types"):
added_hidden_states = self.model_tester.num_hidden_states_types
elif self.is_encoder_decoder:
added_hidden_states = 2
else:
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
if chunk_length is not None:
self.assertListEqual(
list(self_attentions[0].shape[-4:]),
[self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length],
)
else:
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
# ViTMAE has a different seq_length
image_size = to_2tuple(self.model_tester.image_size)
patch_size = to_2tuple(self.model_tester.patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
seq_length = int(math.ceil((1 - config.mask_ratio) * (num_patches + 1)))
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_save_load(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
out_2 = outputs[0].cpu().numpy()
out_2[np.isnan(out_2)] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model = model_class.from_pretrained(tmpdirname)
model.to(torch_device)
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
after_outputs = model(**self._prepare_for_class(inputs_dict, model_class))
# Make sure we don't have nans
out_1 = after_outputs[0].cpu().numpy()
out_1[np.isnan(out_1)] = 0
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results."""
)
def test_determinism(self):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results."""
)
def test_save_load_fast_init_from_base(self):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results."""
)
def test_save_load_fast_init_to_base(self):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""")
def test_model_outputs_equivalence(self):
pass
@slow
def test_model_from_pretrained(self):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = ViTMAEModel.from_pretrained(model_name)
self.assertIsNotNone(model)
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
class ViTMAEModelIntegrationTest(unittest.TestCase):
@cached_property
def default_feature_extractor(self):
return ViTFeatureExtractor.from_pretrained("facebook/vit-mae-base") if is_vision_available() else None
@slow
def test_inference_for_pretraining(self):
# make random mask reproducible
# note that the same seed on CPU and on GPU doesn’t mean they spew the same random number sequences,
# as they both have fairly different PRNGs (for efficiency reasons).
# source: https://discuss.pytorch.org/t/random-seed-that-spans-across-devices/19735
torch.manual_seed(2)
model = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base").to(torch_device)
feature_extractor = self.default_feature_extractor
image = prepare_img()
inputs = feature_extractor(images=image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
expected_shape = torch.Size((1, 196, 768))
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice_cpu = torch.tensor(
[[0.7366, -1.3663, -0.2844], [0.7919, -1.3839, -0.3241], [0.4313, -0.7168, -0.2878]]
)
expected_slice_gpu = torch.tensor(
[[0.8948, -1.0680, 0.0030], [0.9758, -1.1181, -0.0290], [1.0602, -1.1522, -0.0528]]
)
# set expected slice depending on device
expected_slice = expected_slice_cpu if torch_device == "cpu" else expected_slice_gpu
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice.to(torch_device), atol=1e-4))
| 17,639 | 39.645161 | 121 | py |
robust-transformers | robust-transformers-main/tests/bort/test_modeling_bort.py | # coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModel
@require_torch
@require_sentencepiece
@require_tokenizers
class BortIntegrationTest(unittest.TestCase):
@slow
def test_output_embeds_base_model(self):
model = AutoModel.from_pretrained("amazon/bort")
model.to(torch_device)
input_ids = torch.tensor(
[[0, 18077, 4082, 7804, 8606, 6195, 2457, 3321, 11, 10489, 16, 269, 2579, 328, 2]],
device=torch_device,
dtype=torch.long,
) # Schloß Nymphenburg in Munich is really nice!
output = model(input_ids)["last_hidden_state"]
expected_shape = torch.Size((1, 15, 1024))
self.assertEqual(output.shape, expected_shape)
# compare the actual values for a slice.
expected_slice = torch.tensor(
[[[-0.0349, 0.0436, -1.8654], [-0.6964, 0.0835, -1.7393], [-0.9819, 0.2956, -0.2868]]],
device=torch_device,
dtype=torch.float,
)
self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
| 1,893 | 35.423077 | 115 | py |
robust-transformers | robust-transformers-main/tests/convnext/test_modeling_convnext.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch ConvNext model. """
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import ConvNextConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ..test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
if is_torch_available():
import torch
from transformers import ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoFeatureExtractor
class ConvNextModelTester:
def __init__(
self,
parent,
batch_size=13,
image_size=32,
num_channels=3,
num_stages=4,
hidden_sizes=[10, 20, 30, 40],
depths=[2, 2, 3, 2],
is_training=True,
use_labels=True,
intermediate_size=37,
hidden_act="gelu",
type_sequence_label_size=10,
initializer_range=0.02,
num_labels=3,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.num_channels = num_channels
self.num_stages = num_stages
self.hidden_sizes = hidden_sizes
self.depths = depths
self.is_training = is_training
self.use_labels = use_labels
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.scope = scope
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return ConvNextConfig(
num_channels=self.num_channels,
hidden_sizes=self.hidden_sizes,
depths=self.depths,
num_stages=self.num_stages,
hidden_act=self.hidden_act,
is_decoder=False,
initializer_range=self.initializer_range,
)
def create_and_check_model(self, config, pixel_values, labels):
model = ConvNextModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape,
(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32),
)
def create_and_check_for_image_classification(self, config, pixel_values, labels):
config.num_labels = self.type_sequence_label_size
model = ConvNextForImageClassification(config)
model.to(torch_device)
model.eval()
result = model(pixel_values, labels=labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, labels = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class ConvNextModelTest(ModelTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as ConvNext does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (
(
ConvNextModel,
ConvNextForImageClassification,
)
if is_torch_available()
else ()
)
test_pruning = False
test_torchscript = False
test_resize_embeddings = False
test_head_masking = False
def setUp(self):
self.model_tester = ConvNextModelTester(self)
self.config_tester = ConfigTester(self, config_class=ConvNextConfig, has_text_modality=False, hidden_size=37)
def test_config(self):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def create_and_test_config_common_properties(self):
return
@unittest.skip(reason="ConvNext does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="ConvNext does not support input and output embeddings")
def test_model_common_attributes(self):
pass
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip(reason="Model doesn't have attention layers")
def test_attention_outputs(self):
pass
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_stages = self.model_tester.num_stages
self.assertEqual(len(hidden_states), expected_num_stages + 1)
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[self.model_tester.image_size // 4, self.model_tester.image_size // 4],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_retain_grad_hidden_states_attentions(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
# no need to test all models as different heads yield the same functionality
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
inputs = self._prepare_for_class(inputs_dict, model_class)
outputs = model(**inputs)
output = outputs[0]
hidden_states = outputs.hidden_states[0]
hidden_states.retain_grad()
output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(hidden_states.grad)
def test_model_outputs_equivalence(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(t):
t[t != t] = 0
return t
def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
with torch.no_grad():
tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs)
dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple()
def recursive_check(tuple_object, dict_object):
if isinstance(tuple_object, (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif isinstance(tuple_object, Dict):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values(), dict_object.values()
):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5
),
msg=f"Tuple and dict output are not equal. Difference: {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`: {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}.",
)
recursive_check(tuple_output, dict_output)
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
def test_for_image_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = ConvNextModel.from_pretrained(model_name)
self.assertIsNotNone(model)
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
class ConvNextModelIntegrationTest(unittest.TestCase):
@cached_property
def default_feature_extractor(self):
return AutoFeatureExtractor.from_pretrained("facebook/convnext-tiny-224") if is_vision_available() else None
@slow
def test_inference_image_classification_head(self):
model = ConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224").to(torch_device)
feature_extractor = self.default_feature_extractor
image = prepare_img()
inputs = feature_extractor(images=image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
expected_shape = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([-0.0260, -0.4739, 0.1911]).to(torch_device)
self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
| 13,511 | 38.741176 | 315 | py |
robust-transformers | robust-transformers-main/tests/convnext/test_feature_extraction_convnext.py | # coding=utf-8
# Copyright 2022s HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ..test_feature_extraction_common import FeatureExtractionSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConvNextFeatureExtractor
class ConvNextFeatureExtractionTester(unittest.TestCase):
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=20,
crop_pct=0.875,
do_normalize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.crop_pct = crop_pct
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
def prepare_feat_extract_dict(self):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_pct": self.crop_pct,
}
@require_torch
@require_vision
class ConvNextFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase):
feature_extraction_class = ConvNextFeatureExtractor if is_vision_available() else None
def setUp(self):
self.feature_extract_tester = ConvNextFeatureExtractionTester(self)
@property
def feat_extract_dict(self):
return self.feature_extract_tester.prepare_feat_extract_dict()
def test_feat_extract_properties(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(feature_extractor, "do_resize"))
self.assertTrue(hasattr(feature_extractor, "size"))
self.assertTrue(hasattr(feature_extractor, "crop_pct"))
self.assertTrue(hasattr(feature_extractor, "do_normalize"))
self.assertTrue(hasattr(feature_extractor, "image_mean"))
self.assertTrue(hasattr(feature_extractor, "image_std"))
def test_batch_feature(self):
pass
def test_call_pil(self):
# Initialize feature_extractor
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
# create random PIL images
image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
# Test not batched input
encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape,
(
1,
self.feature_extract_tester.num_channels,
self.feature_extract_tester.size,
self.feature_extract_tester.size,
),
)
# Test batched
encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape,
(
self.feature_extract_tester.batch_size,
self.feature_extract_tester.num_channels,
self.feature_extract_tester.size,
self.feature_extract_tester.size,
),
)
def test_call_numpy(self):
# Initialize feature_extractor
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
# create random numpy tensors
image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
# Test not batched input
encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape,
(
1,
self.feature_extract_tester.num_channels,
self.feature_extract_tester.size,
self.feature_extract_tester.size,
),
)
# Test batched
encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape,
(
self.feature_extract_tester.batch_size,
self.feature_extract_tester.num_channels,
self.feature_extract_tester.size,
self.feature_extract_tester.size,
),
)
def test_call_pytorch(self):
# Initialize feature_extractor
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
# create random PyTorch tensors
image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True)
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
# Test not batched input
encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape,
(
1,
self.feature_extract_tester.num_channels,
self.feature_extract_tester.size,
self.feature_extract_tester.size,
),
)
# Test batched
encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape,
(
self.feature_extract_tester.batch_size,
self.feature_extract_tester.num_channels,
self.feature_extract_tester.size,
self.feature_extract_tester.size,
),
)
| 6,844 | 33.923469 | 111 | py |
robust-transformers | robust-transformers-main/tests/layoutlmv2/test_feature_extraction_layoutlmv2.py | # coding=utf-8
# Copyright 2021 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from transformers.file_utils import is_pytesseract_available, is_torch_available
from transformers.testing_utils import require_pytesseract, require_torch
from ..test_feature_extraction_common import FeatureExtractionSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMv2FeatureExtractor
class LayoutLMv2FeatureExtractionTester(unittest.TestCase):
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=18,
apply_ocr=True,
):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.apply_ocr = apply_ocr
def prepare_feat_extract_dict(self):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class LayoutLMv2FeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase):
feature_extraction_class = LayoutLMv2FeatureExtractor if is_pytesseract_available() else None
def setUp(self):
self.feature_extract_tester = LayoutLMv2FeatureExtractionTester(self)
@property
def feat_extract_dict(self):
return self.feature_extract_tester.prepare_feat_extract_dict()
def test_feat_extract_properties(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(feature_extractor, "do_resize"))
self.assertTrue(hasattr(feature_extractor, "size"))
self.assertTrue(hasattr(feature_extractor, "apply_ocr"))
def test_batch_feature(self):
pass
def test_call_pil(self):
# Initialize feature_extractor
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
# create random PIL images
image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
# Test not batched input
encoding = feature_extractor(image_inputs[0], return_tensors="pt")
self.assertEqual(
encoding.pixel_values.shape,
(
1,
self.feature_extract_tester.num_channels,
self.feature_extract_tester.size,
self.feature_extract_tester.size,
),
)
self.assertIsInstance(encoding.words, list)
self.assertIsInstance(encoding.boxes, list)
# Test batched
encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape,
(
self.feature_extract_tester.batch_size,
self.feature_extract_tester.num_channels,
self.feature_extract_tester.size,
self.feature_extract_tester.size,
),
)
def test_call_numpy(self):
# Initialize feature_extractor
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
# create random numpy tensors
image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
# Test not batched input
encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape,
(
1,
self.feature_extract_tester.num_channels,
self.feature_extract_tester.size,
self.feature_extract_tester.size,
),
)
# Test batched
encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape,
(
self.feature_extract_tester.batch_size,
self.feature_extract_tester.num_channels,
self.feature_extract_tester.size,
self.feature_extract_tester.size,
),
)
def test_call_pytorch(self):
# Initialize feature_extractor
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
# create random PyTorch tensors
image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True)
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
# Test not batched input
encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape,
(
1,
self.feature_extract_tester.num_channels,
self.feature_extract_tester.size,
self.feature_extract_tester.size,
),
)
# Test batched
encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape,
(
self.feature_extract_tester.batch_size,
self.feature_extract_tester.num_channels,
self.feature_extract_tester.size,
self.feature_extract_tester.size,
),
)
def test_layoutlmv2_integration_test(self):
# with apply_OCR = True
feature_extractor = LayoutLMv2FeatureExtractor()
from datasets import load_dataset
ds = load_dataset("hf-internal-testing/fixtures_docvqa", split="test")
image = Image.open(ds[0]["file"]).convert("RGB")
encoding = feature_extractor(image, return_tensors="pt")
self.assertEqual(encoding.pixel_values.shape, (1, 3, 224, 224))
self.assertEqual(len(encoding.words), len(encoding.boxes))
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
expected_words = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
expected_boxes = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words, expected_words)
self.assertListEqual(encoding.boxes, expected_boxes)
# with apply_OCR = False
feature_extractor = LayoutLMv2FeatureExtractor(apply_ocr=False)
encoding = feature_extractor(image, return_tensors="pt")
self.assertEqual(
encoding.pixel_values.shape,
(
1,
3,
224,
224,
),
)
| 12,929 | 57.243243 | 3,793 | py |
robust-transformers | robust-transformers-main/tests/layoutlmv2/test_processor_layoutlmv2.py | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
import tempfile
import unittest
from typing import List
from transformers import PreTrainedTokenizer, PreTrainedTokenizerBase, PreTrainedTokenizerFast
from transformers.file_utils import FEATURE_EXTRACTOR_NAME, cached_property, is_pytesseract_available
from transformers.models.layoutlmv2 import LayoutLMv2Tokenizer, LayoutLMv2TokenizerFast
from transformers.models.layoutlmv2.tokenization_layoutlmv2 import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pytesseract, require_tokenizers, require_torch, slow
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMv2FeatureExtractor, LayoutLMv2Processor
@require_pytesseract
@require_tokenizers
class LayoutLMv2ProcessorTest(unittest.TestCase):
tokenizer_class = LayoutLMv2Tokenizer
rust_tokenizer_class = LayoutLMv2TokenizerFast
def setUp(self):
vocab_tokens = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
feature_extractor_map = {
"do_resize": True,
"size": 224,
"apply_ocr": True,
}
self.tmpdirname = tempfile.mkdtemp()
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
self.feature_extraction_file = os.path.join(self.tmpdirname, FEATURE_EXTRACTOR_NAME)
with open(self.feature_extraction_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(feature_extractor_map) + "\n")
def get_tokenizer(self, **kwargs) -> PreTrainedTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)
def get_rust_tokenizer(self, **kwargs) -> PreTrainedTokenizerFast:
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)
def get_tokenizers(self, **kwargs) -> List[PreTrainedTokenizerBase]:
return [self.get_tokenizer(**kwargs), self.get_rust_tokenizer(**kwargs)]
def get_feature_extractor(self, **kwargs):
return LayoutLMv2FeatureExtractor.from_pretrained(self.tmpdirname, **kwargs)
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def test_save_load_pretrained_default(self):
feature_extractor = self.get_feature_extractor()
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
processor = LayoutLMv2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)
processor.save_pretrained(self.tmpdirname)
processor = LayoutLMv2Processor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer, (LayoutLMv2Tokenizer, LayoutLMv2TokenizerFast))
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string())
self.assertIsInstance(processor.feature_extractor, LayoutLMv2FeatureExtractor)
def test_save_load_pretrained_additional_features(self):
processor = LayoutLMv2Processor(feature_extractor=self.get_feature_extractor(), tokenizer=self.get_tokenizer())
processor.save_pretrained(self.tmpdirname)
# slow tokenizer
tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
feature_extractor_add_kwargs = self.get_feature_extractor(do_resize=False, size=30)
processor = LayoutLMv2Processor.from_pretrained(
self.tmpdirname, use_fast=False, bos_token="(BOS)", eos_token="(EOS)", do_resize=False, size=30
)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, LayoutLMv2Tokenizer)
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string())
self.assertIsInstance(processor.feature_extractor, LayoutLMv2FeatureExtractor)
# fast tokenizer
tokenizer_add_kwargs = self.get_rust_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
feature_extractor_add_kwargs = self.get_feature_extractor(do_resize=False, size=30)
processor = LayoutLMv2Processor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_resize=False, size=30
)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, LayoutLMv2TokenizerFast)
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string())
self.assertIsInstance(processor.feature_extractor, LayoutLMv2FeatureExtractor)
# different use cases tests
@require_torch
@require_pytesseract
class LayoutLMv2ProcessorIntegrationTests(unittest.TestCase):
@cached_property
def get_images(self):
# we verify our implementation on 2 document images from the DocVQA dataset
from datasets import load_dataset
ds = load_dataset("hf-internal-testing/fixtures_docvqa", split="test")
image_1 = Image.open(ds[0]["file"]).convert("RGB")
image_2 = Image.open(ds[1]["file"]).convert("RGB")
return image_1, image_2
@cached_property
def get_tokenizers(self):
slow_tokenizer = LayoutLMv2Tokenizer.from_pretrained("microsoft/layoutlmv2-base-uncased")
fast_tokenizer = LayoutLMv2TokenizerFast.from_pretrained("microsoft/layoutlmv2-base-uncased")
return [slow_tokenizer, fast_tokenizer]
@slow
def test_processor_case_1(self):
# case 1: document image classification (training, inference) + token classification (inference), apply_ocr = True
feature_extractor = LayoutLMv2FeatureExtractor()
tokenizers = self.get_tokenizers
images = self.get_images
for tokenizer in tokenizers:
processor = LayoutLMv2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)
# not batched
input_feat_extract = feature_extractor(images[0], return_tensors="pt")
input_processor = processor(images[0], return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
actual_keys = sorted(list(input_processor.keys()))
self.assertListEqual(actual_keys, expected_keys)
# verify image
self.assertAlmostEqual(
input_feat_extract["pixel_values"].sum(), input_processor["image"].sum(), delta=1e-2
)
# verify input_ids
# fmt: off
expected_decoding = "[CLS] 11 : 14 to 11 : 39 a. m 11 : 39 to 11 : 44 a. m. 11 : 44 a. m. to 12 : 25 p. m. 12 : 25 to 12 : 58 p. m. 12 : 58 to 4 : 00 p. m. 2 : 00 to 5 : 00 p. m. coffee break coffee will be served for men and women in the lobby adjacent to exhibit area. please move into exhibit area. ( exhibits open ) trrf general session ( part | ) presiding : lee a. waller trrf vice president “ introductory remarks ” lee a. waller, trrf vice presi - dent individual interviews with trrf public board members and sci - entific advisory council mem - bers conducted by trrf treasurer philip g. kuehn to get answers which the public refrigerated warehousing industry is looking for. plus questions from the floor. dr. emil m. mrak, university of cal - ifornia, chairman, trrf board ; sam r. cecil, university of georgia college of agriculture ; dr. stanley charm, tufts university school of medicine ; dr. robert h. cotton, itt continental baking company ; dr. owen fennema, university of wis - consin ; dr. robert e. hardenburg, usda. questions and answers exhibits open capt. jack stoney room trrf scientific advisory council meeting ballroom foyer [SEP]" # noqa: E231
# fmt: on
decoding = tokenizer.decode(input_processor.input_ids.squeeze().tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# batched
input_feat_extract = feature_extractor(images, return_tensors="pt")
input_processor = processor(images, padding=True, return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
actual_keys = sorted(list(input_processor.keys()))
self.assertListEqual(actual_keys, expected_keys)
# verify images
self.assertAlmostEqual(
input_feat_extract["pixel_values"].sum(), input_processor["image"].sum(), delta=1e-2
)
# verify input_ids
# fmt: off
expected_decoding = "[CLS] 7 itc limited report and accounts 2013 itc ’ s brands : an asset for the nation the consumer needs and aspirations they fulfil, the benefit they generate for millions across itc ’ s value chains, the future - ready capabilities that support them, and the value that they create for the country, have made itc ’ s brands national assets, adding to india ’ s competitiveness. it is itc ’ s aspiration to be the no 1 fmcg player in the country, driven by its new fmcg businesses. a recent nielsen report has highlighted that itc's new fmcg businesses are the fastest growing among the top consumer goods companies operating in india. itc takes justifiable pride that, along with generating economic value, these celebrated indian brands also drive the creation of larger societal capital through the virtuous cycle of sustainable and inclusive growth. di wills * ; love delightfully soft skin? aia ans source : https : / / www. industrydocuments. ucsf. edu / docs / snbx0223 [SEP] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD]" # noqa: E231
# fmt: on
decoding = tokenizer.decode(input_processor.input_ids[1].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
@slow
def test_processor_case_2(self):
# case 2: document image classification (training, inference) + token classification (inference), apply_ocr=False
feature_extractor = LayoutLMv2FeatureExtractor(apply_ocr=False)
tokenizers = self.get_tokenizers
images = self.get_images
for tokenizer in tokenizers:
processor = LayoutLMv2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)
# not batched
words = ["hello", "world"]
boxes = [[1, 2, 3, 4], [5, 6, 7, 8]]
input_processor = processor(images[0], words, boxes=boxes, return_tensors="pt")
# verify keys
expected_keys = ["input_ids", "bbox", "token_type_ids", "attention_mask", "image"]
actual_keys = list(input_processor.keys())
for key in expected_keys:
self.assertIn(key, actual_keys)
# verify input_ids
expected_decoding = "[CLS] hello world [SEP]"
decoding = tokenizer.decode(input_processor.input_ids.squeeze().tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# batched
words = [["hello", "world"], ["my", "name", "is", "niels"]]
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3]]]
input_processor = processor(images, words, boxes=boxes, padding=True, return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
actual_keys = sorted(list(input_processor.keys()))
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
expected_decoding = "[CLS] hello world [SEP] [PAD] [PAD] [PAD]"
decoding = tokenizer.decode(input_processor.input_ids[0].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# verify bbox
expected_bbox = [
[0, 0, 0, 0],
[3, 2, 5, 1],
[6, 7, 4, 2],
[3, 9, 2, 4],
[1, 1, 2, 3],
[1, 1, 2, 3],
[1000, 1000, 1000, 1000],
]
self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox)
@slow
def test_processor_case_3(self):
# case 3: token classification (training), apply_ocr=False
feature_extractor = LayoutLMv2FeatureExtractor(apply_ocr=False)
tokenizers = self.get_tokenizers
images = self.get_images
for tokenizer in tokenizers:
processor = LayoutLMv2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)
# not batched
words = ["weirdly", "world"]
boxes = [[1, 2, 3, 4], [5, 6, 7, 8]]
word_labels = [1, 2]
input_processor = processor(images[0], words, boxes=boxes, word_labels=word_labels, return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "labels", "token_type_ids"]
actual_keys = sorted(list(input_processor.keys()))
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
expected_decoding = "[CLS] weirdly world [SEP]"
decoding = tokenizer.decode(input_processor.input_ids.squeeze().tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# verify labels
expected_labels = [-100, 1, -100, 2, -100]
self.assertListEqual(input_processor.labels.squeeze().tolist(), expected_labels)
# batched
words = [["hello", "world"], ["my", "name", "is", "niels"]]
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3]]]
word_labels = [[1, 2], [6, 3, 10, 2]]
input_processor = processor(
images, words, boxes=boxes, word_labels=word_labels, padding=True, return_tensors="pt"
)
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "labels", "token_type_ids"]
actual_keys = sorted(list(input_processor.keys()))
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
expected_decoding = "[CLS] my name is niels [SEP]"
decoding = tokenizer.decode(input_processor.input_ids[1].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# verify bbox
expected_bbox = [
[0, 0, 0, 0],
[3, 2, 5, 1],
[6, 7, 4, 2],
[3, 9, 2, 4],
[1, 1, 2, 3],
[1, 1, 2, 3],
[1000, 1000, 1000, 1000],
]
self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox)
# verify labels
expected_labels = [-100, 6, 3, 10, 2, -100, -100]
self.assertListEqual(input_processor.labels[1].tolist(), expected_labels)
@slow
def test_processor_case_4(self):
# case 4: visual question answering (inference), apply_ocr=True
feature_extractor = LayoutLMv2FeatureExtractor()
tokenizers = self.get_tokenizers
images = self.get_images
for tokenizer in tokenizers:
processor = LayoutLMv2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)
# not batched
question = "What's his name?"
input_processor = processor(images[0], question, return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
actual_keys = sorted(list(input_processor.keys()))
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
# fmt: off
expected_decoding = "[CLS] what's his name? [SEP] 11 : 14 to 11 : 39 a. m 11 : 39 to 11 : 44 a. m. 11 : 44 a. m. to 12 : 25 p. m. 12 : 25 to 12 : 58 p. m. 12 : 58 to 4 : 00 p. m. 2 : 00 to 5 : 00 p. m. coffee break coffee will be served for men and women in the lobby adjacent to exhibit area. please move into exhibit area. ( exhibits open ) trrf general session ( part | ) presiding : lee a. waller trrf vice president “ introductory remarks ” lee a. waller, trrf vice presi - dent individual interviews with trrf public board members and sci - entific advisory council mem - bers conducted by trrf treasurer philip g. kuehn to get answers which the public refrigerated warehousing industry is looking for. plus questions from the floor. dr. emil m. mrak, university of cal - ifornia, chairman, trrf board ; sam r. cecil, university of georgia college of agriculture ; dr. stanley charm, tufts university school of medicine ; dr. robert h. cotton, itt continental baking company ; dr. owen fennema, university of wis - consin ; dr. robert e. hardenburg, usda. questions and answers exhibits open capt. jack stoney room trrf scientific advisory council meeting ballroom foyer [SEP]" # noqa: E231
# fmt: on
decoding = tokenizer.decode(input_processor.input_ids.squeeze().tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# batched
questions = ["How old is he?", "what's the time"]
input_processor = processor(
images, questions, padding="max_length", max_length=20, truncation=True, return_tensors="pt"
)
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
actual_keys = sorted(list(input_processor.keys()))
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
expected_decoding = "[CLS] what's the time [SEP] 7 itc limited report and accounts 2013 itc ’ s [SEP]"
decoding = tokenizer.decode(input_processor.input_ids[1].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# verify bbox
# fmt: off
expected_bbox = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [0, 45, 67, 80], [72, 56, 109, 67], [72, 56, 109, 67], [116, 56, 189, 67], [198, 59, 253, 66], [257, 59, 285, 66], [289, 59, 365, 66], [372, 59, 407, 66], [74, 136, 161, 158], [74, 136, 161, 158], [74, 136, 161, 158], [74, 136, 161, 158], [1000, 1000, 1000, 1000]] # noqa: E231
# fmt: on
self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox)
@slow
def test_processor_case_5(self):
# case 5: visual question answering (inference), apply_ocr=False
feature_extractor = LayoutLMv2FeatureExtractor(apply_ocr=False)
tokenizers = self.get_tokenizers
images = self.get_images
for tokenizer in tokenizers:
processor = LayoutLMv2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)
# not batched
question = "What's his name?"
words = ["hello", "world"]
boxes = [[1, 2, 3, 4], [5, 6, 7, 8]]
input_processor = processor(images[0], question, words, boxes, return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
actual_keys = sorted(list(input_processor.keys()))
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
expected_decoding = "[CLS] what's his name? [SEP] hello world [SEP]"
decoding = tokenizer.decode(input_processor.input_ids.squeeze().tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# batched
questions = ["How old is he?", "what's the time"]
words = [["hello", "world"], ["my", "name", "is", "niels"]]
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3]]]
input_processor = processor(images, questions, words, boxes, padding=True, return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
actual_keys = sorted(list(input_processor.keys()))
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
expected_decoding = "[CLS] how old is he? [SEP] hello world [SEP] [PAD] [PAD] [PAD]"
decoding = tokenizer.decode(input_processor.input_ids[0].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
expected_decoding = "[CLS] what's the time [SEP] my name is niels [SEP]"
decoding = tokenizer.decode(input_processor.input_ids[1].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# verify bbox
expected_bbox = [[6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3], [1, 1, 2, 3], [1000, 1000, 1000, 1000]]
self.assertListEqual(input_processor.bbox[1].tolist()[-5:], expected_bbox)
| 22,549 | 51.44186 | 1,451 | py |
robust-transformers | robust-transformers-main/tests/layoutlmv2/test_modeling_layoutlmv2.py | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch LayoutLMv2 model. """
import os
import random
import tempfile
import unittest
from transformers.file_utils import is_detectron2_available, is_torch_available
from transformers.testing_utils import require_detectron2, require_torch, slow, torch_device
from ..test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, _config_zero_init, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
LayoutLMv2Config,
LayoutLMv2ForQuestionAnswering,
LayoutLMv2ForSequenceClassification,
LayoutLMv2ForTokenClassification,
LayoutLMv2Model,
)
from transformers.models.layoutlmv2.modeling_layoutlmv2 import LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_detectron2_available():
from detectron2.structures.image_list import ImageList
class LayoutLMv2ModelTester:
def __init__(
self,
parent,
batch_size=2,
num_channels=3,
image_size=4,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=36,
num_hidden_layers=3,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
image_feature_pool_shape=[7, 7, 256],
coordinate_size=6,
shape_size=6,
num_labels=3,
num_choices=4,
scope=None,
range_bbox=1000,
):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.image_feature_pool_shape = image_feature_pool_shape
self.coordinate_size = coordinate_size
self.shape_size = shape_size
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
self.range_bbox = range_bbox
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
bbox = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
t = bbox[i, j, 3]
bbox[i, j, 3] = bbox[i, j, 1]
bbox[i, j, 1] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
t = bbox[i, j, 2]
bbox[i, j, 2] = bbox[i, j, 0]
bbox[i, j, 0] = t
image = ImageList(
torch.zeros(self.batch_size, self.num_channels, self.image_size, self.image_size, device=torch_device),
self.image_size,
)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
config = LayoutLMv2Config(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
image_feature_pool_shape=self.image_feature_pool_shape,
coordinate_size=self.coordinate_size,
shape_size=self.shape_size,
)
# use smaller resnet backbone to make tests faster
config.detectron2_config_args["MODEL.RESNETS.DEPTH"] = 18
config.detectron2_config_args["MODEL.RESNETS.RES2_OUT_CHANNELS"] = 64
config.detectron2_config_args["MODEL.RESNETS.NUM_GROUPS"] = 1
return config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels
def create_and_check_model(
self, config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels
):
model = LayoutLMv2Model(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, bbox=bbox, image=image, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, bbox=bbox, image=image, token_type_ids=token_type_ids)
result = model(input_ids, bbox=bbox, image=image)
# LayoutLMv2 has a different expected sequence length, namely also visual tokens are added
expected_seq_len = self.seq_length + self.image_feature_pool_shape[0] * self.image_feature_pool_shape[1]
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_for_sequence_classification(
self, config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels
):
config.num_labels = self.num_labels
model = LayoutLMv2ForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
bbox=bbox,
image=image,
attention_mask=input_mask,
token_type_ids=token_type_ids,
labels=sequence_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_for_token_classification(
self, config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels
):
config.num_labels = self.num_labels
model = LayoutLMv2ForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
bbox=bbox,
image=image,
attention_mask=input_mask,
token_type_ids=token_type_ids,
labels=token_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_for_question_answering(
self, config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels
):
model = LayoutLMv2ForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
bbox=bbox,
image=image,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
bbox,
image,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"bbox": bbox,
"image": image,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
@require_detectron2
class LayoutLMv2ModelTest(ModelTesterMixin, unittest.TestCase):
test_pruning = False
test_torchscript = False
test_mismatched_shapes = False
all_model_classes = (
(
LayoutLMv2Model,
LayoutLMv2ForSequenceClassification,
LayoutLMv2ForTokenClassification,
LayoutLMv2ForQuestionAnswering,
)
if is_torch_available()
else ()
)
def setUp(self):
self.model_tester = LayoutLMv2ModelTester(self)
self.config_tester = ConfigTester(self, config_class=LayoutLMv2Config, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_model_various_embeddings(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
config_and_inputs[0].position_embedding_type = type
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
def test_save_load_fast_init_from_base(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
base_class = MODEL_MAPPING[config.__class__]
if isinstance(base_class, tuple):
base_class = base_class[0]
for model_class in self.all_model_classes:
if model_class == base_class:
continue
# make a copy of model class to not break future tests
# from https://stackoverflow.com/questions/9541025/how-to-copy-a-python-class
class CopyClass(model_class):
pass
model_class_copy = CopyClass
# make sure that all keys are expected for test
model_class_copy._keys_to_ignore_on_load_missing = []
# make init deterministic, but make sure that
# non-initialized weights throw errors nevertheless
model_class_copy._init_weights = self._mock_init_weights
model = base_class(config)
state_dict = model.state_dict()
# this will often delete a single weight of a multi-weight module
# to test an edge case
random_key_to_del = random.choice(list(state_dict.keys()))
del state_dict[random_key_to_del]
# check that certain keys didn't get saved with the model
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
torch.save(state_dict, os.path.join(tmpdirname, "pytorch_model.bin"))
model_fast_init = model_class_copy.from_pretrained(tmpdirname)
model_slow_init = model_class_copy.from_pretrained(tmpdirname, _fast_init=False)
for key in model_fast_init.state_dict().keys():
if key == "layoutlmv2.visual_segment_embedding":
# we skip the visual segment embedding as it has a custom initialization scheme
continue
max_diff = (model_slow_init.state_dict()[key] - model_fast_init.state_dict()[key]).sum().item()
self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical")
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
# LayoutLMv2 has a different expected sequence length
expected_seq_len = (
self.model_tester.seq_length
+ self.model_tester.image_feature_pool_shape[0] * self.model_tester.image_feature_pool_shape[1]
)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, expected_seq_len, expected_seq_len],
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if hasattr(self.model_tester, "num_hidden_states_types"):
added_hidden_states = self.model_tester.num_hidden_states_types
else:
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, expected_seq_len, expected_seq_len],
)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
# LayoutLMv2 has a different expected sequence length
expected_seq_len = (
self.model_tester.seq_length
+ self.model_tester.image_feature_pool_shape[0] * self.model_tester.image_feature_pool_shape[1]
)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[expected_seq_len, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
@slow
def test_model_from_pretrained(self):
for model_name in LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = LayoutLMv2Model.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
if "backbone" in name or "visual_segment_embedding" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),
[0.0, 1.0],
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
def prepare_layoutlmv2_batch_inputs():
# Here we prepare a batch of 2 sequences to test a LayoutLMv2 forward pass on:
# fmt: off
input_ids = torch.tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]]) # noqa: E231
bbox = torch.tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]]) # noqa: E231
image = ImageList(torch.randn((2,3,224,224)), image_sizes=[(224,224), (224,224)]) # noqa: E231
attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],]) # noqa: E231
token_type_ids = torch.tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]) # noqa: E231
# fmt: on
return input_ids, bbox, image, attention_mask, token_type_ids
@require_torch
@require_detectron2
class LayoutLMv2ModelIntegrationTest(unittest.TestCase):
@slow
def test_inference_no_head(self):
model = LayoutLMv2Model.from_pretrained("microsoft/layoutlmv2-base-uncased").to(torch_device)
(
input_ids,
bbox,
image,
attention_mask,
token_type_ids,
) = prepare_layoutlmv2_batch_inputs()
# forward pass
outputs = model(
input_ids=input_ids.to(torch_device),
bbox=bbox.to(torch_device),
image=image.to(torch_device),
attention_mask=attention_mask.to(torch_device),
token_type_ids=token_type_ids.to(torch_device),
)
# verify the sequence output
expected_shape = torch.Size(
(
2,
input_ids.shape[1]
+ model.config.image_feature_pool_shape[0] * model.config.image_feature_pool_shape[1],
model.config.hidden_size,
)
)
self.assertEqual(outputs.last_hidden_state.shape, expected_shape)
expected_slice = torch.tensor(
[[-0.1087, 0.0727, -0.3075], [0.0799, -0.0427, -0.0751], [-0.0367, 0.0480, -0.1358]], device=torch_device
)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-3))
# verify the pooled output
expected_shape = torch.Size((2, model.config.hidden_size))
self.assertEqual(outputs.pooler_output.shape, expected_shape)
| 22,608 | 41.259813 | 917 | py |
robust-transformers | robust-transformers-main/tests/layoutlmv2/test_tokenization_layoutlmv2.py | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
import re
import shutil
import tempfile
import unittest
from typing import List
from transformers import AddedToken, LayoutLMv2TokenizerFast, SpecialTokensMixin, is_tf_available, is_torch_available
from transformers.models.layoutlmv2.tokenization_layoutlmv2 import (
VOCAB_FILES_NAMES,
BasicTokenizer,
LayoutLMv2Tokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import is_pt_tf_cross_test, require_pandas, require_tokenizers, require_torch, slow
from ..test_tokenization_common import (
SMALL_TRAINING_CORPUS,
TokenizerTesterMixin,
filter_non_english,
merge_model_tokenizer_mappings,
)
@require_tokenizers
@require_pandas
class LayoutLMv2TokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = LayoutLMv2Tokenizer
rust_tokenizer_class = LayoutLMv2TokenizerFast
test_rust_tokenizer = True
space_between_special_tokens = True
from_pretrained_filter = filter_non_english
test_seq2seq = False
def get_words_and_boxes(self):
words = ["a", "weirdly", "test"]
boxes = [[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]]
return words, boxes
def get_words_and_boxes_batch(self):
words = [["a", "weirdly", "test"], ["hello", "my", "name", "is", "bob"]]
boxes = [
[[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]],
[[961, 885, 992, 912], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69]],
]
return words, boxes
def get_question_words_and_boxes(self):
question = "what's his name?"
words = ["a", "weirdly", "test"]
boxes = [[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]]
return question, words, boxes
def get_question_words_and_boxes_batch(self):
questions = ["what's his name?", "how is he called?"]
words = [["a", "weirdly", "test"], ["what", "a", "laif", "gastn"]]
boxes = [
[[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]],
[[256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69]],
]
return questions, words, boxes
def setUp(self):
super().setUp()
vocab_tokens = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"what",
"s",
"his",
"name",
"?",
"a",
"weird",
"##ly",
"test",
"lowest",
]
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def get_input_output_texts(self, tokenizer):
input_text = "UNwant\u00E9d,running"
output_text = "unwanted, running"
return input_text, output_text
def test_chinese(self):
tokenizer = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz"), ["ah", "\u535A", "\u63A8", "zz"])
def test_basic_tokenizer_lower(self):
tokenizer = BasicTokenizer(do_lower_case=True)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["hello", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def test_basic_tokenizer_lower_strip_accents_false(self):
tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=False)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hällo", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["h\u00E9llo"])
def test_basic_tokenizer_lower_strip_accents_true(self):
tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=True)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def test_basic_tokenizer_lower_strip_accents_default(self):
tokenizer = BasicTokenizer(do_lower_case=True)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def test_basic_tokenizer_no_lower(self):
tokenizer = BasicTokenizer(do_lower_case=False)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["HeLLo", "!", "how", "Are", "yoU", "?"]
)
def test_basic_tokenizer_no_lower_strip_accents_false(self):
tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=False)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HäLLo", "!", "how", "Are", "yoU", "?"]
)
def test_basic_tokenizer_no_lower_strip_accents_true(self):
tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=True)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HaLLo", "!", "how", "Are", "yoU", "?"]
)
def test_basic_tokenizer_respects_never_split_tokens(self):
tokenizer = BasicTokenizer(do_lower_case=False, never_split=["[UNK]"])
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]"), ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"]
)
def test_wordpiece_tokenizer(self):
vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
vocab = {}
for (i, token) in enumerate(vocab_tokens):
vocab[token] = i
tokenizer = WordpieceTokenizer(vocab=vocab, unk_token="[UNK]")
self.assertListEqual(tokenizer.tokenize(""), [])
self.assertListEqual(tokenizer.tokenize("unwanted running"), ["un", "##want", "##ed", "runn", "##ing"])
self.assertListEqual(tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"])
def test_is_whitespace(self):
self.assertTrue(_is_whitespace(" "))
self.assertTrue(_is_whitespace("\t"))
self.assertTrue(_is_whitespace("\r"))
self.assertTrue(_is_whitespace("\n"))
self.assertTrue(_is_whitespace("\u00A0"))
self.assertFalse(_is_whitespace("A"))
self.assertFalse(_is_whitespace("-"))
def test_is_control(self):
self.assertTrue(_is_control("\u0005"))
self.assertFalse(_is_control("A"))
self.assertFalse(_is_control(" "))
self.assertFalse(_is_control("\t"))
self.assertFalse(_is_control("\r"))
def test_is_punctuation(self):
self.assertTrue(_is_punctuation("-"))
self.assertTrue(_is_punctuation("$"))
self.assertTrue(_is_punctuation("`"))
self.assertTrue(_is_punctuation("."))
self.assertFalse(_is_punctuation("A"))
self.assertFalse(_is_punctuation(" "))
def test_clean_text(self):
tokenizer = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(t) for t in ["Hello", "\xad", "hello"]], [["[UNK]"], [], ["[UNK]"]])
@slow
def test_sequence_builders(self):
tokenizer = self.tokenizer_class.from_pretrained("microsoft/layoutlmv2-base-uncased")
question, words, boxes = self.get_question_words_and_boxes()
text = tokenizer.encode(
question.split(),
boxes=[tokenizer.pad_token_box for _ in range(len(question.split()))],
add_special_tokens=False,
)
text_2 = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert encoded_pair == [101] + text + [102] + text_2 + [102]
def test_offsets_with_special_characters(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
words, boxes = self.get_words_and_boxes()
words[1] = tokenizer_r.mask_token
tokens = tokenizer_r.encode_plus(
words,
boxes=boxes,
return_attention_mask=False,
return_token_type_ids=False,
return_offsets_mapping=True,
add_special_tokens=True,
)
expected_results = [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((0, 6), tokenizer_r.mask_token),
((0, 4), "test"),
((0, 0), tokenizer_r.sep_token),
]
self.assertEqual(
[e[1] for e in expected_results], tokenizer_r.convert_ids_to_tokens(tokens["input_ids"])
)
self.assertEqual([e[0] for e in expected_results], tokens["offset_mapping"])
def test_add_special_tokens(self):
tokenizers: List[LayoutLMv2Tokenizer] = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
special_token = "[SPECIAL_TOKEN]"
special_token_box = [1000, 1000, 1000, 1000]
tokenizer.add_special_tokens({"cls_token": special_token})
encoded_special_token = tokenizer.encode(
[special_token], boxes=[special_token_box], add_special_tokens=False
)
self.assertEqual(len(encoded_special_token), 1)
decoded = tokenizer.decode(encoded_special_token, skip_special_tokens=True)
self.assertTrue(special_token not in decoded)
def test_add_tokens_tokenizer(self):
tokenizers: List[LayoutLMv2Tokenizer] = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
vocab_size = tokenizer.vocab_size
all_size = len(tokenizer)
self.assertNotEqual(vocab_size, 0)
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
new_toks = ["aaaaa", "bbbbbb", "cccccccccdddddddd"]
added_toks = tokenizer.add_tokens(new_toks)
vocab_size_2 = tokenizer.vocab_size
all_size_2 = len(tokenizer)
self.assertNotEqual(vocab_size_2, 0)
self.assertEqual(vocab_size, vocab_size_2)
self.assertEqual(added_toks, len(new_toks))
self.assertEqual(all_size_2, all_size + len(new_toks))
words = "aaaaa bbbbbb low cccccccccdddddddd l".split()
boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))]
tokens = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
self.assertGreaterEqual(len(tokens), 4)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
self.assertGreater(tokens[-2], tokenizer.vocab_size - 1)
new_toks_2 = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
added_toks_2 = tokenizer.add_special_tokens(new_toks_2)
vocab_size_3 = tokenizer.vocab_size
all_size_3 = len(tokenizer)
self.assertNotEqual(vocab_size_3, 0)
self.assertEqual(vocab_size, vocab_size_3)
self.assertEqual(added_toks_2, len(new_toks_2))
self.assertEqual(all_size_3, all_size_2 + len(new_toks_2))
words = ">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l".split()
boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))]
tokens = tokenizer.encode(
words,
boxes=boxes,
add_special_tokens=False,
)
self.assertGreaterEqual(len(tokens), 6)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
self.assertGreater(tokens[0], tokens[1])
self.assertGreater(tokens[-2], tokenizer.vocab_size - 1)
self.assertGreater(tokens[-2], tokens[-3])
self.assertEqual(tokens[0], tokenizer.eos_token_id)
self.assertEqual(tokens[-2], tokenizer.pad_token_id)
@require_tokenizers
def test_encode_decode_with_spaces(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
new_toks = [AddedToken("[ABC]", normalized=False), AddedToken("[DEF]", normalized=False)]
tokenizer.add_tokens(new_toks)
input = "[ABC][DEF][ABC][DEF]"
if self.space_between_special_tokens:
output = "[ABC] [DEF] [ABC] [DEF]"
else:
output = input
encoded = tokenizer.encode(input.split(), boxes=boxes, add_special_tokens=False)
decoded = tokenizer.decode(encoded, spaces_between_special_tokens=self.space_between_special_tokens)
self.assertIn(decoded, [output, output.lower()])
@unittest.skip("Not implemented")
def test_right_and_left_truncation(self):
pass
def test_encode_plus_with_padding(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, words)
padding_size = 10
padding_idx = tokenizer.pad_token_id
encoded_sequence = tokenizer.encode_plus(words, boxes=boxes, return_special_tokens_mask=True)
input_ids = encoded_sequence["input_ids"]
special_tokens_mask = encoded_sequence["special_tokens_mask"]
sequence_length = len(input_ids)
# Test 'longest' and 'no_padding' don't do anything
tokenizer.padding_side = "right"
not_padded_sequence = tokenizer.encode_plus(
words,
boxes=boxes,
padding=False,
return_special_tokens_mask=True,
)
not_padded_input_ids = not_padded_sequence["input_ids"]
not_padded_special_tokens_mask = not_padded_sequence["special_tokens_mask"]
not_padded_sequence_length = len(not_padded_input_ids)
self.assertTrue(sequence_length == not_padded_sequence_length)
self.assertTrue(input_ids == not_padded_input_ids)
self.assertTrue(special_tokens_mask == not_padded_special_tokens_mask)
not_padded_sequence = tokenizer.encode_plus(
words,
boxes=boxes,
padding=False,
return_special_tokens_mask=True,
)
not_padded_input_ids = not_padded_sequence["input_ids"]
not_padded_special_tokens_mask = not_padded_sequence["special_tokens_mask"]
not_padded_sequence_length = len(not_padded_input_ids)
self.assertTrue(sequence_length == not_padded_sequence_length)
self.assertTrue(input_ids == not_padded_input_ids)
self.assertTrue(special_tokens_mask == not_padded_special_tokens_mask)
# Test right padding
tokenizer.padding_side = "right"
right_padded_sequence = tokenizer.encode_plus(
words,
boxes=boxes,
max_length=sequence_length + padding_size,
padding="max_length",
return_special_tokens_mask=True,
)
right_padded_input_ids = right_padded_sequence["input_ids"]
right_padded_special_tokens_mask = right_padded_sequence["special_tokens_mask"]
right_padded_sequence_length = len(right_padded_input_ids)
self.assertTrue(sequence_length + padding_size == right_padded_sequence_length)
self.assertTrue(input_ids + [padding_idx] * padding_size == right_padded_input_ids)
self.assertTrue(special_tokens_mask + [1] * padding_size == right_padded_special_tokens_mask)
# Test left padding
tokenizer.padding_side = "left"
left_padded_sequence = tokenizer.encode_plus(
words,
boxes=boxes,
max_length=sequence_length + padding_size,
padding="max_length",
return_special_tokens_mask=True,
)
left_padded_input_ids = left_padded_sequence["input_ids"]
left_padded_special_tokens_mask = left_padded_sequence["special_tokens_mask"]
left_padded_sequence_length = len(left_padded_input_ids)
self.assertTrue(sequence_length + padding_size == left_padded_sequence_length)
self.assertTrue([padding_idx] * padding_size + input_ids == left_padded_input_ids)
self.assertTrue([1] * padding_size + special_tokens_mask == left_padded_special_tokens_mask)
if "token_type_ids" in tokenizer.model_input_names:
token_type_ids = encoded_sequence["token_type_ids"]
left_padded_token_type_ids = left_padded_sequence["token_type_ids"]
right_padded_token_type_ids = right_padded_sequence["token_type_ids"]
assert token_type_ids + [0] * padding_size == right_padded_token_type_ids
assert [0] * padding_size + token_type_ids == left_padded_token_type_ids
if "attention_mask" in tokenizer.model_input_names:
attention_mask = encoded_sequence["attention_mask"]
right_padded_attention_mask = right_padded_sequence["attention_mask"]
left_padded_attention_mask = left_padded_sequence["attention_mask"]
self.assertTrue(attention_mask + [0] * padding_size == right_padded_attention_mask)
self.assertTrue([0] * padding_size + attention_mask == left_padded_attention_mask)
def test_internal_consistency(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
tokens = []
for word in words:
tokens.extend(tokenizer.tokenize(word))
ids = tokenizer.convert_tokens_to_ids(tokens)
ids_2 = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
self.assertListEqual(ids, ids_2)
tokens_2 = tokenizer.convert_ids_to_tokens(ids)
self.assertNotEqual(len(tokens_2), 0)
text_2 = tokenizer.decode(ids)
self.assertIsInstance(text_2, str)
output_text = "a weirdly test"
self.assertEqual(text_2, output_text)
def test_mask_output(self):
tokenizers = self.get_tokenizers(fast=False, do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
if (
tokenizer.build_inputs_with_special_tokens.__qualname__.split(".")[0] != "PreTrainedTokenizer"
and "token_type_ids" in tokenizer.model_input_names
):
information = tokenizer.encode_plus(words, boxes=boxes, add_special_tokens=True)
sequences, mask = information["input_ids"], information["token_type_ids"]
self.assertEqual(len(sequences), len(mask))
def test_number_of_added_tokens(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# test 1: single sequence
words, boxes = self.get_words_and_boxes()
sequences = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
attached_sequences = tokenizer.encode(words, boxes=boxes, add_special_tokens=True)
# Method is implemented (e.g. not GPT-2)
if len(attached_sequences) != 2:
self.assertEqual(
tokenizer.num_special_tokens_to_add(pair=False), len(attached_sequences) - len(sequences)
)
# test 2: two sequences
question, words, boxes = self.get_question_words_and_boxes()
sequences = tokenizer.encode(question, words, boxes=boxes, add_special_tokens=False)
attached_sequences = tokenizer.encode(question, words, boxes=boxes, add_special_tokens=True)
# Method is implemented (e.g. not GPT-2)
if len(attached_sequences) != 2:
self.assertEqual(
tokenizer.num_special_tokens_to_add(pair=True), len(attached_sequences) - len(sequences)
)
def test_padding_to_max_length(self):
"""We keep this test for backward compatibility but it should be removed when `pad_to_max_length` will be deprecated"""
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
padding_size = 10
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, words)
padding_idx = tokenizer.pad_token_id
# Check that it correctly pads when a maximum length is specified along with the padding flag set to True
tokenizer.padding_side = "right"
encoded_sequence = tokenizer.encode(words, boxes=boxes)
sequence_length = len(encoded_sequence)
# FIXME: the next line should be padding(max_length) to avoid warning
padded_sequence = tokenizer.encode(
words, boxes=boxes, max_length=sequence_length + padding_size, pad_to_max_length=True
)
padded_sequence_length = len(padded_sequence)
assert sequence_length + padding_size == padded_sequence_length
assert encoded_sequence + [padding_idx] * padding_size == padded_sequence
# Check that nothing is done when a maximum length is not specified
encoded_sequence = tokenizer.encode(words, boxes=boxes)
sequence_length = len(encoded_sequence)
tokenizer.padding_side = "right"
padded_sequence_right = tokenizer.encode(words, boxes=boxes, pad_to_max_length=True)
padded_sequence_right_length = len(padded_sequence_right)
assert sequence_length == padded_sequence_right_length
assert encoded_sequence == padded_sequence_right
def test_padding(self, max_length=50):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
self.assertEqual(tokenizer_p.pad_token_id, tokenizer_r.pad_token_id)
pad_token_id = tokenizer_p.pad_token_id
# Encode - Simple input
words, boxes = self.get_words_and_boxes()
input_r = tokenizer_r.encode(words, boxes=boxes, max_length=max_length, pad_to_max_length=True)
input_p = tokenizer_p.encode(words, boxes=boxes, max_length=max_length, pad_to_max_length=True)
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.encode(words, boxes=boxes, max_length=max_length, padding="max_length")
input_p = tokenizer_p.encode(words, boxes=boxes, max_length=max_length, padding="max_length")
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.encode(words, boxes=boxes, padding="longest")
input_p = tokenizer_p.encode(words, boxes=boxes, padding=True)
self.assert_padded_input_match(input_r, input_p, len(input_r), pad_token_id)
# Encode - Pair input
question, words, boxes = self.get_question_words_and_boxes()
input_r = tokenizer_r.encode(
question, words, boxes=boxes, max_length=max_length, pad_to_max_length=True
)
input_p = tokenizer_p.encode(
question, words, boxes=boxes, max_length=max_length, pad_to_max_length=True
)
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.encode(question, words, boxes=boxes, max_length=max_length, padding="max_length")
input_p = tokenizer_p.encode(question, words, boxes=boxes, max_length=max_length, padding="max_length")
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.encode(question, words, boxes=boxes, padding=True)
input_p = tokenizer_p.encode(question, words, boxes=boxes, padding="longest")
self.assert_padded_input_match(input_r, input_p, len(input_r), pad_token_id)
# Encode_plus - Simple input
words, boxes = self.get_words_and_boxes()
input_r = tokenizer_r.encode_plus(words, boxes=boxes, max_length=max_length, pad_to_max_length=True)
input_p = tokenizer_p.encode_plus(words, boxes=boxes, max_length=max_length, pad_to_max_length=True)
self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
input_r = tokenizer_r.encode_plus(words, boxes=boxes, max_length=max_length, padding="max_length")
input_p = tokenizer_p.encode_plus(words, boxes=boxes, max_length=max_length, padding="max_length")
self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
input_r = tokenizer_r.encode_plus(words, boxes=boxes, padding="longest")
input_p = tokenizer_p.encode_plus(words, boxes=boxes, padding=True)
self.assert_padded_input_match(
input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id
)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
# Encode_plus - Pair input
question, words, boxes = self.get_question_words_and_boxes()
input_r = tokenizer_r.encode_plus(
question, words, boxes=boxes, max_length=max_length, pad_to_max_length=True
)
input_p = tokenizer_p.encode_plus(
question, words, boxes=boxes, max_length=max_length, pad_to_max_length=True
)
self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
input_r = tokenizer_r.encode_plus(
question, words, boxes=boxes, max_length=max_length, padding="max_length"
)
input_p = tokenizer_p.encode_plus(
question, words, boxes=boxes, max_length=max_length, padding="max_length"
)
self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
input_r = tokenizer_r.encode_plus(question, words, boxes=boxes, padding="longest")
input_p = tokenizer_p.encode_plus(question, words, boxes=boxes, padding=True)
self.assert_padded_input_match(
input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id
)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
# Batch_encode_plus - Simple input
words, boxes = self.get_words_and_boxes_batch()
input_r = tokenizer_r.batch_encode_plus(
words,
boxes=boxes,
max_length=max_length,
pad_to_max_length=True,
)
input_p = tokenizer_p.batch_encode_plus(
words,
boxes=boxes,
max_length=max_length,
pad_to_max_length=True,
)
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.batch_encode_plus(
words,
boxes=boxes,
max_length=max_length,
padding="max_length",
)
input_p = tokenizer_p.batch_encode_plus(
words,
boxes=boxes,
max_length=max_length,
padding="max_length",
)
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.batch_encode_plus(
words,
boxes=boxes,
max_length=max_length,
padding="longest",
)
input_p = tokenizer_p.batch_encode_plus(
words,
boxes=boxes,
max_length=max_length,
padding=True,
)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id)
input_r = tokenizer_r.batch_encode_plus(words, boxes=boxes, padding="longest")
input_p = tokenizer_p.batch_encode_plus(words, boxes=boxes, padding=True)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id)
# Batch_encode_plus - Pair input
questions, words, boxes = self.get_question_words_and_boxes_batch()
input_r = tokenizer_r.batch_encode_plus(
list(zip(questions, words)),
is_pair=True,
boxes=boxes,
max_length=max_length,
truncation=True,
padding="max_length",
)
input_p = tokenizer_p.batch_encode_plus(
list(zip(questions, words)),
is_pair=True,
boxes=boxes,
max_length=max_length,
truncation=True,
padding="max_length",
)
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.batch_encode_plus(
list(zip(questions, words)),
is_pair=True,
boxes=boxes,
padding=True,
)
input_p = tokenizer_p.batch_encode_plus(
list(zip(questions, words)),
is_pair=True,
boxes=boxes,
padding="longest",
)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id)
# Using pad on single examples after tokenization
words, boxes = self.get_words_and_boxes()
input_r = tokenizer_r.encode_plus(words, boxes=boxes)
input_r = tokenizer_r.pad(input_r)
input_p = tokenizer_r.encode_plus(words, boxes=boxes)
input_p = tokenizer_r.pad(input_p)
self.assert_padded_input_match(
input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id
)
# Using pad on single examples after tokenization
input_r = tokenizer_r.encode_plus(words, boxes=boxes)
input_r = tokenizer_r.pad(input_r, max_length=max_length, padding="max_length")
input_p = tokenizer_r.encode_plus(words, boxes=boxes)
input_p = tokenizer_r.pad(input_p, max_length=max_length, padding="max_length")
self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id)
# Using pad after tokenization
words, boxes = self.get_words_and_boxes_batch()
input_r = tokenizer_r.batch_encode_plus(
words,
boxes=boxes,
)
input_r = tokenizer_r.pad(input_r)
input_p = tokenizer_r.batch_encode_plus(
words,
boxes=boxes,
)
input_p = tokenizer_r.pad(input_p)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id)
# Using pad after tokenization
words, boxes = self.get_words_and_boxes_batch()
input_r = tokenizer_r.batch_encode_plus(
words,
boxes=boxes,
)
input_r = tokenizer_r.pad(input_r, max_length=max_length, padding="max_length")
input_p = tokenizer_r.batch_encode_plus(
words,
boxes=boxes,
)
input_p = tokenizer_r.pad(input_p, max_length=max_length, padding="max_length")
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
def test_call(self):
# Tests that all call wrap to encode_plus and batch_encode_plus
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Test not batched
words, boxes = self.get_words_and_boxes()
encoded_sequences_1 = tokenizer.encode_plus(words, boxes=boxes)
encoded_sequences_2 = tokenizer(words, boxes=boxes)
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
# Test not batched pairs
question, words, boxes = self.get_question_words_and_boxes()
encoded_sequences_1 = tokenizer.encode_plus(words, boxes=boxes)
encoded_sequences_2 = tokenizer(words, boxes=boxes)
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
# Test batched
words, boxes = self.get_words_and_boxes_batch()
encoded_sequences_1 = tokenizer.batch_encode_plus(words, is_pair=False, boxes=boxes)
encoded_sequences_2 = tokenizer(words, boxes=boxes)
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
def test_batch_encode_plus_batch_sequence_length(self):
# Tests that all encoded values have the correct size
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes_batch()
encoded_sequences = [
tokenizer.encode_plus(words_example, boxes=boxes_example)
for words_example, boxes_example in zip(words, boxes)
]
encoded_sequences_batch = tokenizer.batch_encode_plus(words, is_pair=False, boxes=boxes, padding=False)
self.assertListEqual(
encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch)
)
maximum_length = len(
max([encoded_sequence["input_ids"] for encoded_sequence in encoded_sequences], key=len)
)
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, words)
encoded_sequences_padded = [
tokenizer.encode_plus(
words_example, boxes=boxes_example, max_length=maximum_length, padding="max_length"
)
for words_example, boxes_example in zip(words, boxes)
]
encoded_sequences_batch_padded = tokenizer.batch_encode_plus(
words, is_pair=False, boxes=boxes, padding=True
)
self.assertListEqual(
encoded_sequences_padded,
self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch_padded),
)
# check 'longest' is unsensitive to a max length
encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus(
words, is_pair=False, boxes=boxes, padding=True
)
encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus(
words, is_pair=False, boxes=boxes, max_length=maximum_length + 10, padding="longest"
)
for key in encoded_sequences_batch_padded_1.keys():
self.assertListEqual(
encoded_sequences_batch_padded_1[key],
encoded_sequences_batch_padded_2[key],
)
# check 'no_padding' is unsensitive to a max length
encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus(
words, is_pair=False, boxes=boxes, padding=False
)
encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus(
words, is_pair=False, boxes=boxes, max_length=maximum_length + 10, padding=False
)
for key in encoded_sequences_batch_padded_1.keys():
self.assertListEqual(
encoded_sequences_batch_padded_1[key],
encoded_sequences_batch_padded_2[key],
)
@unittest.skip("batch_encode_plus does not handle overflowing tokens.")
def test_batch_encode_plus_overflowing_tokens(self):
pass
def test_batch_encode_plus_padding(self):
# Test that padded sequences are equivalent between batch_encode_plus and encode_plus
# Right padding tests
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes_batch()
max_length = 100
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, words)
encoded_sequences = [
tokenizer.encode_plus(
words_example, boxes=boxes_example, max_length=max_length, padding="max_length"
)
for words_example, boxes_example in zip(words, boxes)
]
encoded_sequences_batch = tokenizer.batch_encode_plus(
words, is_pair=False, boxes=boxes, max_length=max_length, padding="max_length"
)
self.assertListEqual(
encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch)
)
# Left padding tests
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
tokenizer.padding_side = "left"
words, boxes = self.get_words_and_boxes_batch()
max_length = 100
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, words)
encoded_sequences = [
tokenizer.encode_plus(
words_example, boxes=boxes_example, max_length=max_length, padding="max_length"
)
for words_example, boxes_example in zip(words, boxes)
]
encoded_sequences_batch = tokenizer.batch_encode_plus(
words, is_pair=False, boxes=boxes, max_length=max_length, padding="max_length"
)
self.assertListEqual(
encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch)
)
def test_padding_to_multiple_of(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
if tokenizer.pad_token is None:
self.skipTest("No padding token.")
else:
words, boxes = self.get_words_and_boxes()
# empty_tokens = tokenizer([""], [[]], padding=True, pad_to_multiple_of=8)
normal_tokens = tokenizer(words, boxes=boxes, padding=True, pad_to_multiple_of=8)
# for key, value in empty_tokens.items():
# self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
for key, value in normal_tokens.items():
self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
normal_tokens = tokenizer(words, boxes=boxes, pad_to_multiple_of=8)
for key, value in normal_tokens.items():
self.assertNotEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
# Should also work with truncation
normal_tokens = tokenizer(words, boxes=boxes, padding=True, truncation=True, pad_to_multiple_of=8)
for key, value in normal_tokens.items():
self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
# truncation to something which is not a multiple of pad_to_multiple_of raises an error
self.assertRaises(
ValueError,
tokenizer.__call__,
words,
boxes=boxes,
padding=True,
truncation=True,
max_length=12,
pad_to_multiple_of=8,
)
def test_tokenizer_slow_store_full_signature(self):
signature = inspect.signature(self.tokenizer_class.__init__)
tokenizer = self.get_tokenizer()
for parameter_name, parameter in signature.parameters.items():
if parameter.default != inspect.Parameter.empty:
self.assertIn(parameter_name, tokenizer.init_kwargs)
def test_build_inputs_with_special_tokens(self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
# Input tokens id
words, boxes = self.get_words_and_boxes()
input_simple = tokenizer_p.encode(words, boxes=boxes, add_special_tokens=False)
input_pair = tokenizer_p.encode(words, boxes=boxes, add_special_tokens=False)
# Generate output
output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple)
output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple)
self.assertEqual(output_p, output_r)
# Generate pair output
output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple, input_pair)
output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple, input_pair)
self.assertEqual(output_p, output_r)
def test_special_tokens_mask_input_pairs(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
encoded_sequence = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
encoded_sequence_dict = tokenizer.encode_plus(
words,
boxes=boxes,
add_special_tokens=True,
return_special_tokens_mask=True,
# add_prefix_space=False,
)
encoded_sequence_w_special = encoded_sequence_dict["input_ids"]
special_tokens_mask = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special))
filtered_sequence = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(encoded_sequence_w_special)
]
filtered_sequence = [x for x in filtered_sequence if x is not None]
self.assertEqual(encoded_sequence, filtered_sequence)
def test_special_tokens_mask(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
# Testing single inputs
encoded_sequence = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
encoded_sequence_dict = tokenizer.encode_plus(
words, boxes=boxes, add_special_tokens=True, return_special_tokens_mask=True
)
encoded_sequence_w_special = encoded_sequence_dict["input_ids"]
special_tokens_mask = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special))
filtered_sequence = [x for i, x in enumerate(encoded_sequence_w_special) if not special_tokens_mask[i]]
self.assertEqual(encoded_sequence, filtered_sequence)
def test_save_and_load_tokenizer(self):
# safety check on max_len default value so we are sure the test works
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
self.assertNotEqual(tokenizer.model_max_length, 42)
# Now let's start the test
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Isolate this from the other tests because we save additional tokens/etc
words, boxes = self.get_words_and_boxes()
tmpdirname = tempfile.mkdtemp()
before_tokens = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
before_vocab = tokenizer.get_vocab()
tokenizer.save_pretrained(tmpdirname)
after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname)
after_tokens = after_tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
after_vocab = after_tokenizer.get_vocab()
self.assertListEqual(before_tokens, after_tokens)
self.assertDictEqual(before_vocab, after_vocab)
shutil.rmtree(tmpdirname)
def test_right_and_left_padding(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
sequence = "Sequence"
padding_size = 10
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, sequence)
padding_idx = tokenizer.pad_token_id
# RIGHT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True
tokenizer.padding_side = "right"
encoded_sequence = tokenizer.encode(words, boxes=boxes)
sequence_length = len(encoded_sequence)
padded_sequence = tokenizer.encode(
words, boxes=boxes, max_length=sequence_length + padding_size, padding="max_length"
)
padded_sequence_length = len(padded_sequence)
assert sequence_length + padding_size == padded_sequence_length
assert encoded_sequence + [padding_idx] * padding_size == padded_sequence
# LEFT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True
tokenizer.padding_side = "left"
encoded_sequence = tokenizer.encode(words, boxes=boxes)
sequence_length = len(encoded_sequence)
padded_sequence = tokenizer.encode(
words, boxes=boxes, max_length=sequence_length + padding_size, padding="max_length"
)
padded_sequence_length = len(padded_sequence)
assert sequence_length + padding_size == padded_sequence_length
assert [padding_idx] * padding_size + encoded_sequence == padded_sequence
# RIGHT & LEFT PADDING - Check that nothing is done for 'longest' and 'no_padding'
encoded_sequence = tokenizer.encode(words, boxes=boxes)
sequence_length = len(encoded_sequence)
tokenizer.padding_side = "right"
padded_sequence_right = tokenizer.encode(words, boxes=boxes, padding=True)
padded_sequence_right_length = len(padded_sequence_right)
assert sequence_length == padded_sequence_right_length
assert encoded_sequence == padded_sequence_right
tokenizer.padding_side = "left"
padded_sequence_left = tokenizer.encode(words, boxes=boxes, padding="longest")
padded_sequence_left_length = len(padded_sequence_left)
assert sequence_length == padded_sequence_left_length
assert encoded_sequence == padded_sequence_left
tokenizer.padding_side = "right"
padded_sequence_right = tokenizer.encode(words, boxes=boxes)
padded_sequence_right_length = len(padded_sequence_right)
assert sequence_length == padded_sequence_right_length
assert encoded_sequence == padded_sequence_right
tokenizer.padding_side = "left"
padded_sequence_left = tokenizer.encode(words, boxes=boxes, padding=False)
padded_sequence_left_length = len(padded_sequence_left)
assert sequence_length == padded_sequence_left_length
assert encoded_sequence == padded_sequence_left
def test_token_type_ids(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# test 1: single sequence
words, boxes = self.get_words_and_boxes()
output = tokenizer(words, boxes=boxes, return_token_type_ids=True)
# Assert that the token type IDs have the same length as the input IDs
self.assertEqual(len(output["token_type_ids"]), len(output["input_ids"]))
# Assert that the token type IDs have the same length as the attention mask
self.assertEqual(len(output["token_type_ids"]), len(output["attention_mask"]))
self.assertIn(0, output["token_type_ids"])
self.assertNotIn(1, output["token_type_ids"])
# test 2: two sequences (question + words)
question, words, boxes = self.get_question_words_and_boxes()
output = tokenizer(question, words, boxes, return_token_type_ids=True)
# Assert that the token type IDs have the same length as the input IDs
self.assertEqual(len(output["token_type_ids"]), len(output["input_ids"]))
# Assert that the token type IDs have the same length as the attention mask
self.assertEqual(len(output["token_type_ids"]), len(output["attention_mask"]))
self.assertIn(0, output["token_type_ids"])
self.assertIn(1, output["token_type_ids"])
def test_offsets_mapping(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
text = ["a", "wonderful", "test"]
boxes = [[1, 8, 12, 20] for _ in range(len(text))]
# No pair
tokens_with_offsets = tokenizer_r.encode_plus(
text,
boxes=boxes,
return_special_tokens_mask=True,
return_offsets_mapping=True,
add_special_tokens=True,
)
added_tokens = tokenizer_r.num_special_tokens_to_add(False)
offsets = tokens_with_offsets["offset_mapping"]
# Assert there is the same number of tokens and offsets
self.assertEqual(len(offsets), len(tokens_with_offsets["input_ids"]))
# Assert there is online added_tokens special_tokens
self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens)
# Pairs
text = "what's his name"
pair = ["a", "wonderful", "test"]
boxes = [[1, 8, 12, 20] for _ in range(len(pair))]
tokens_with_offsets = tokenizer_r.encode_plus(
text,
pair,
boxes=boxes,
return_special_tokens_mask=True,
return_offsets_mapping=True,
add_special_tokens=True,
)
added_tokens = tokenizer_r.num_special_tokens_to_add(True)
offsets = tokens_with_offsets["offset_mapping"]
# Assert there is the same number of tokens and offsets
self.assertEqual(len(offsets), len(tokens_with_offsets["input_ids"]))
# Assert there is online added_tokens special_tokens
self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens)
@require_torch
@slow
def test_torch_encode_plus_sent_to_model(self):
import torch
from transformers import MODEL_MAPPING, TOKENIZER_MAPPING
MODEL_TOKENIZER_MAPPING = merge_model_tokenizer_mappings(MODEL_MAPPING, TOKENIZER_MAPPING)
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
if tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING:
return
config_class, model_class = MODEL_TOKENIZER_MAPPING[tokenizer.__class__]
config = config_class()
if config.is_encoder_decoder or config.pad_token_id is None:
return
model = model_class(config)
# Make sure the model contains at least the full vocabulary size in its embedding matrix
is_using_common_embeddings = hasattr(model.get_input_embeddings(), "weight")
assert (
(model.get_input_embeddings().weight.shape[0] >= len(tokenizer))
if is_using_common_embeddings
else True
)
# Build sequence
words, boxes = self.get_words_and_boxes()
encoded_sequence = tokenizer.encode_plus(words, boxes=boxes, return_tensors="pt")
batch_encoded_sequence = tokenizer.batch_encode_plus(
[words, words], boxes=[boxes, boxes], return_tensors="pt"
)
# We add dummy image keys (as LayoutLMv2 actually also requires a feature extractor
# to prepare the image input)
encoded_sequence["image"] = torch.randn(1, 3, 224, 224)
batch_encoded_sequence["image"] = torch.randn(2, 3, 224, 224)
# This should not fail
with torch.no_grad(): # saves some time
model(**encoded_sequence)
model(**batch_encoded_sequence)
def test_rust_and_python_full_tokenizers(self):
if not self.test_rust_tokenizer:
return
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
words, boxes = self.get_words_and_boxes()
ids = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
rust_ids = rust_tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
ids = tokenizer.encode(words, boxes=boxes, add_special_tokens=True)
rust_ids = rust_tokenizer.encode(words, boxes=boxes, add_special_tokens=True)
self.assertListEqual(ids, rust_ids)
def test_tokenization_python_rust_equals(self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
words, boxes = self.get_words_and_boxes()
# Ensure basic input match
input_p = tokenizer_p.encode_plus(words, boxes=boxes)
input_r = tokenizer_r.encode_plus(words, boxes=boxes)
for key in filter(
lambda x: x in ["input_ids", "token_type_ids", "attention_mask", "bbox"], input_p.keys()
):
self.assertSequenceEqual(input_p[key], input_r[key])
input_pairs_p = tokenizer_p.encode_plus(words, boxes=boxes)
input_pairs_r = tokenizer_r.encode_plus(words, boxes=boxes)
for key in filter(
lambda x: x in ["input_ids", "token_type_ids", "attention_mask", "bbox"], input_p.keys()
):
self.assertSequenceEqual(input_pairs_p[key], input_pairs_r[key])
words = ["hello" for _ in range(1000)]
boxes = [[1000, 1000, 1000, 1000] for _ in range(1000)]
# Ensure truncation match
input_p = tokenizer_p.encode_plus(words, boxes=boxes, max_length=512, truncation=True)
input_r = tokenizer_r.encode_plus(words, boxes=boxes, max_length=512, truncation=True)
for key in filter(
lambda x: x in ["input_ids", "token_type_ids", "attention_mask", "bbox"], input_p.keys()
):
self.assertSequenceEqual(input_p[key], input_r[key])
# Ensure truncation with stride match
input_p = tokenizer_p.encode_plus(
words, boxes=boxes, max_length=512, truncation=True, stride=3, return_overflowing_tokens=True
)
input_r = tokenizer_r.encode_plus(
words, boxes=boxes, max_length=512, truncation=True, stride=3, return_overflowing_tokens=True
)
for key in filter(
lambda x: x in ["input_ids", "token_type_ids", "attention_mask", "bbox"], input_p.keys()
):
self.assertSequenceEqual(input_p[key], input_r[key][0])
def test_embeded_special_tokens(self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
words, boxes = self.get_words_and_boxes()
tokens_r = tokenizer_r.encode_plus(
words,
boxes=boxes,
add_special_tokens=True,
)
tokens_p = tokenizer_p.encode_plus(
words,
boxes=boxes,
add_special_tokens=True,
)
for key in tokens_p.keys():
self.assertEqual(tokens_r[key], tokens_p[key])
if "token_type_ids" in tokens_r:
self.assertEqual(sum(tokens_r["token_type_ids"]), sum(tokens_p["token_type_ids"]))
tokens_r = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"])
tokens_p = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"])
self.assertSequenceEqual(tokens_r, tokens_p)
def test_compare_add_special_tokens(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
simple_num_special_tokens_to_add = tokenizer_r.num_special_tokens_to_add(pair=False)
words, boxes = self.get_words_and_boxes()
# tokenize()
no_special_tokens = tokenizer_r.tokenize(" ".join(words), add_special_tokens=False)
with_special_tokens = tokenizer_r.tokenize(" ".join(words), add_special_tokens=True)
self.assertEqual(len(no_special_tokens), len(with_special_tokens) - simple_num_special_tokens_to_add)
# encode()
no_special_tokens = tokenizer_r.encode(words, boxes=boxes, add_special_tokens=False)
with_special_tokens = tokenizer_r.encode(words, boxes=boxes, add_special_tokens=True)
self.assertEqual(len(no_special_tokens), len(with_special_tokens) - simple_num_special_tokens_to_add)
# encode_plus()
no_special_tokens = tokenizer_r.encode_plus(words, boxes=boxes, add_special_tokens=False)
with_special_tokens = tokenizer_r.encode_plus(words, boxes=boxes, add_special_tokens=True)
for key in no_special_tokens.keys():
self.assertEqual(
len(no_special_tokens[key]),
len(with_special_tokens[key]) - simple_num_special_tokens_to_add,
)
# # batch_encode_plus
words, boxes = self.get_words_and_boxes_batch()
no_special_tokens = tokenizer_r.batch_encode_plus(words, boxes=boxes, add_special_tokens=False)
with_special_tokens = tokenizer_r.batch_encode_plus(words, boxes=boxes, add_special_tokens=True)
for key in no_special_tokens.keys():
for i_no, i_with in zip(no_special_tokens[key], with_special_tokens[key]):
self.assertEqual(len(i_no), len(i_with) - simple_num_special_tokens_to_add)
@slow
def test_layoutlmv2_truncation_integration_test(self):
words, boxes = self.get_words_and_boxes()
tokenizer = LayoutLMv2Tokenizer.from_pretrained("microsoft/layoutlmv2-base-uncased", model_max_length=512)
for i in range(12, 512):
new_encoded_inputs = tokenizer.encode(words, boxes=boxes, max_length=i, truncation=True)
# Ensure that the input IDs are less than the max length defined.
self.assertLessEqual(len(new_encoded_inputs), i)
tokenizer.model_max_length = 20
new_encoded_inputs = tokenizer.encode(words, boxes=boxes, truncation=True)
dropped_encoded_inputs = tokenizer.encode(words, boxes=boxes, truncation=True)
# Ensure that the input IDs are still truncated when no max_length is specified
self.assertListEqual(new_encoded_inputs, dropped_encoded_inputs)
self.assertLessEqual(len(new_encoded_inputs), 20)
@is_pt_tf_cross_test
def test_batch_encode_plus_tensors(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes_batch()
# A Tensor cannot be build by sequences which are not the same size
self.assertRaises(ValueError, tokenizer.batch_encode_plus, words, boxes=boxes, return_tensors="pt")
self.assertRaises(ValueError, tokenizer.batch_encode_plus, words, boxes=boxes, return_tensors="tf")
if tokenizer.pad_token_id is None:
self.assertRaises(
ValueError,
tokenizer.batch_encode_plus,
words,
boxes=boxes,
padding=True,
return_tensors="pt",
)
self.assertRaises(
ValueError,
tokenizer.batch_encode_plus,
words,
boxes=boxes,
padding="longest",
return_tensors="tf",
)
else:
pytorch_tensor = tokenizer.batch_encode_plus(words, boxes=boxes, padding=True, return_tensors="pt")
tensorflow_tensor = tokenizer.batch_encode_plus(
words, boxes=boxes, padding="longest", return_tensors="tf"
)
encoded_sequences = tokenizer.batch_encode_plus(words, boxes=boxes, padding=True)
for key in encoded_sequences.keys():
pytorch_value = pytorch_tensor[key].tolist()
tensorflow_value = tensorflow_tensor[key].numpy().tolist()
encoded_value = encoded_sequences[key]
self.assertEqual(pytorch_value, tensorflow_value, encoded_value)
def test_sequence_ids(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
if not tokenizer.is_fast:
continue
with self.subTest(f"{tokenizer.__class__.__name__}"):
seq_0 = "Test this method."
seq_1 = ["With", "these", "inputs."]
boxes = [[1000, 1000, 1000, 1000] for _ in range(len(seq_1))]
# We want to have sequence 0 and sequence 1 are tagged
# respectively with 0 and 1 token_ids
# (regardless of whether the model use token type ids)
# We use this assumption in the QA pipeline among other place
output = tokenizer(seq_0.split(), boxes=boxes)
self.assertIn(0, output.sequence_ids())
output = tokenizer(seq_0, seq_1, boxes=boxes)
self.assertIn(0, output.sequence_ids())
self.assertIn(1, output.sequence_ids())
if tokenizer.num_special_tokens_to_add(pair=True):
self.assertIn(None, output.sequence_ids())
def test_special_tokens_initialization(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
added_tokens = [AddedToken("<special>", lstrip=True)]
tokenizer_r = self.rust_tokenizer_class.from_pretrained(
pretrained_name, additional_special_tokens=added_tokens, **kwargs
)
words = "Hey this is a <special> token".split()
boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))]
r_output = tokenizer_r.encode(words, boxes=boxes)
special_token_id = tokenizer_r.encode(
["<special>"], boxes=[1000, 1000, 1000, 1000], add_special_tokens=False
)[0]
self.assertTrue(special_token_id in r_output)
if self.test_slow_tokenizer:
tokenizer_cr = self.rust_tokenizer_class.from_pretrained(
pretrained_name, additional_special_tokens=added_tokens, **kwargs, from_slow=True
)
tokenizer_p = self.tokenizer_class.from_pretrained(
pretrained_name, additional_special_tokens=added_tokens, **kwargs
)
words = "Hey this is a <special> token".split()
boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))]
p_output = tokenizer_p.encode(words, boxes=boxes)
cr_output = tokenizer_cr.encode(words, boxes=boxes)
self.assertEqual(p_output, r_output)
self.assertEqual(cr_output, r_output)
self.assertTrue(special_token_id in p_output)
self.assertTrue(special_token_id in cr_output)
def test_training_new_tokenizer(self):
# This feature only exists for fast tokenizers
if not self.test_rust_tokenizer:
return
tokenizer = self.get_rust_tokenizer()
new_tokenizer = tokenizer.train_new_from_iterator(SMALL_TRAINING_CORPUS, 100)
# Test we can use the new tokenizer with something not seen during training
text = [["this", "is", "the"], ["how", "are", "you"]]
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8], [1, 3, 4, 8]], [[5, 6, 7, 8], [4, 5, 6, 7], [3, 9, 2, 7]]]
inputs = new_tokenizer(text, boxes=boxes)
self.assertEqual(len(inputs["input_ids"]), 2)
decoded_input = new_tokenizer.decode(inputs["input_ids"][0], skip_special_tokens=True)
expected_result = "this is the"
if tokenizer.backend_tokenizer.normalizer is not None:
expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result)
self.assertEqual(expected_result, decoded_input)
# We check that the parameters of the tokenizer remained the same
# Check we have the same number of added_tokens for both pair and non-pair inputs.
self.assertEqual(tokenizer.num_special_tokens_to_add(False), new_tokenizer.num_special_tokens_to_add(False))
self.assertEqual(tokenizer.num_special_tokens_to_add(True), new_tokenizer.num_special_tokens_to_add(True))
# Check we have the correct max_length for both pair and non-pair inputs.
self.assertEqual(tokenizer.max_len_single_sentence, new_tokenizer.max_len_single_sentence)
self.assertEqual(tokenizer.max_len_sentences_pair, new_tokenizer.max_len_sentences_pair)
# Assert the set of special tokens match as we didn't ask to change them
self.assertSequenceEqual(
tokenizer.all_special_tokens_extended,
new_tokenizer.all_special_tokens_extended,
)
self.assertDictEqual(tokenizer.special_tokens_map, new_tokenizer.special_tokens_map)
def test_training_new_tokenizer_with_special_tokens_change(self):
# This feature only exists for fast tokenizers
if not self.test_rust_tokenizer:
return
tokenizer = self.get_rust_tokenizer()
# Test with a special tokens map
class_signature = inspect.signature(tokenizer.__class__)
if "cls_token" in class_signature.parameters:
new_tokenizer = tokenizer.train_new_from_iterator(
SMALL_TRAINING_CORPUS, 100, special_tokens_map={tokenizer.cls_token: "<cls>"}
)
cls_id = new_tokenizer.get_vocab()["<cls>"]
self.assertEqual(new_tokenizer.cls_token, "<cls>")
self.assertEqual(new_tokenizer.cls_token_id, cls_id)
# Create a new mapping from the special tokens defined in the original tokenizer
special_tokens_list = SpecialTokensMixin.SPECIAL_TOKENS_ATTRIBUTES.copy()
special_tokens_list.remove("additional_special_tokens")
special_tokens_map = {}
for token in special_tokens_list:
# Get the private one to avoid unnecessary warnings.
if getattr(tokenizer, f"_{token}") is not None:
special_token = getattr(tokenizer, token)
special_tokens_map[special_token] = f"{special_token}a"
# Train new tokenizer
new_tokenizer = tokenizer.train_new_from_iterator(
SMALL_TRAINING_CORPUS, 100, special_tokens_map=special_tokens_map
)
# Check the changes
for token in special_tokens_list:
# Get the private one to avoid unnecessary warnings.
if getattr(tokenizer, f"_{token}") is None:
continue
special_token = getattr(tokenizer, token)
if special_token in special_tokens_map:
new_special_token = getattr(new_tokenizer, token)
self.assertEqual(special_tokens_map[special_token], new_special_token)
new_id = new_tokenizer.get_vocab()[new_special_token]
self.assertEqual(getattr(new_tokenizer, f"{token}_id"), new_id)
# Check if the AddedToken / string format has been kept
for special_token in tokenizer.all_special_tokens_extended:
if isinstance(special_token, AddedToken) and special_token.content not in special_tokens_map:
# The special token must appear identically in the list of the new tokenizer.
self.assertTrue(
special_token in new_tokenizer.all_special_tokens_extended,
f"'{special_token}' should be in {new_tokenizer.all_special_tokens_extended}",
)
elif isinstance(special_token, AddedToken):
# The special token must appear in the list of the new tokenizer as an object of type AddedToken with
# the same parameters as the old AddedToken except the content that the user has requested to change.
special_token_str = special_token.content
new_special_token_str = special_tokens_map[special_token_str]
find = False
for candidate in new_tokenizer.all_special_tokens_extended:
if (
isinstance(candidate, AddedToken)
and candidate.content == new_special_token_str
and candidate.lstrip == special_token.lstrip
and candidate.rstrip == special_token.rstrip
and candidate.normalized == special_token.normalized
and candidate.single_word == special_token.single_word
):
find = True
break
self.assertTrue(
find,
(
f"'{new_special_token_str}' doesn't appear in the list "
f"'{new_tokenizer.all_special_tokens_extended}' as an AddedToken with the same parameters as "
f"'{special_token}' in the list {tokenizer.all_special_tokens_extended}"
),
)
elif special_token not in special_tokens_map:
# The special token must appear identically in the list of the new tokenizer.
self.assertTrue(
special_token in new_tokenizer.all_special_tokens_extended,
f"'{special_token}' should be in {new_tokenizer.all_special_tokens_extended}",
)
else:
# The special token must appear in the list of the new tokenizer as an object of type string.
self.assertTrue(special_tokens_map[special_token] in new_tokenizer.all_special_tokens_extended)
# Test we can use the new tokenizer with something not seen during training
words = [["this", "is"], ["hello", "🤗"]]
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[1, 2, 3, 4], [5, 6, 7, 8]]]
inputs = new_tokenizer(words, boxes=boxes)
self.assertEqual(len(inputs["input_ids"]), 2)
decoded_input = new_tokenizer.decode(inputs["input_ids"][0], skip_special_tokens=True)
expected_result = "this is"
if tokenizer.backend_tokenizer.normalizer is not None:
expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result)
self.assertEqual(expected_result, decoded_input)
def test_prepare_for_model(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
# only test prepare_for_model for the slow tokenizer
if tokenizer.__class__.__name__ == "LayoutLMv2TokenizerFast":
continue
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
prepared_input_dict = tokenizer.prepare_for_model(words, boxes=boxes, add_special_tokens=True)
input_dict = tokenizer.encode_plus(words, boxes=boxes, add_special_tokens=True)
self.assertEqual(input_dict, prepared_input_dict)
def test_padding_different_model_input_name(self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
self.assertEqual(tokenizer_p.pad_token_id, tokenizer_r.pad_token_id)
pad_token_id = tokenizer_p.pad_token_id
words, boxes = self.get_words_and_boxes_batch()
input_r = tokenizer_r.batch_encode_plus(words, boxes=boxes)
input_p = tokenizer_r.batch_encode_plus(words, boxes=boxes)
# rename encoded batch to "inputs"
input_r["inputs"] = input_r[tokenizer_r.model_input_names[0]]
del input_r[tokenizer_r.model_input_names[0]]
input_p["inputs"] = input_p[tokenizer_p.model_input_names[0]]
del input_p[tokenizer_p.model_input_names[0]]
# Renaming `input_ids` to `inputs`
tokenizer_r.model_input_names = ["inputs"] + tokenizer_r.model_input_names[1:]
tokenizer_p.model_input_names = ["inputs"] + tokenizer_p.model_input_names[1:]
input_r = tokenizer_r.pad(input_r, padding="longest")
input_p = tokenizer_r.pad(input_p, padding="longest")
max_length = len(input_p["inputs"][0])
self.assert_batch_padded_input_match(
input_r, input_p, max_length, pad_token_id, model_main_input_name="inputs"
)
def test_batch_encode_dynamic_overflowing(self):
"""
When calling batch_encode with multiple sequences, it can return different number of
overflowing encoding for each sequence:
[
Sequence 1: [Encoding 1, Encoding 2],
Sequence 2: [Encoding 1],
Sequence 3: [Encoding 1, Encoding 2, ... Encoding N]
]
This needs to be padded so that it can represented as a tensor
"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name}, {tokenizer.__class__.__name__})"):
if is_torch_available():
returned_tensor = "pt"
elif is_tf_available():
returned_tensor = "tf"
else:
returned_tensor = "jax"
# Single example
words, boxes = self.get_words_and_boxes()
tokens = tokenizer.encode_plus(
words,
boxes=boxes,
max_length=6,
padding=True,
truncation=True,
return_tensors=returned_tensor,
return_overflowing_tokens=True,
)
for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):
if key != "bbox":
self.assertEqual(len(tokens[key].shape), 2)
else:
self.assertEqual(len(tokens[key].shape), 3)
# Batch of examples
# For these 2 examples, 3 training examples will be created
words, boxes = self.get_words_and_boxes_batch()
tokens = tokenizer.batch_encode_plus(
words,
boxes=boxes,
max_length=6,
padding=True,
truncation="only_first",
return_tensors=returned_tensor,
return_overflowing_tokens=True,
)
for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):
if key != "bbox":
self.assertEqual(len(tokens[key].shape), 2)
self.assertEqual(tokens[key].shape[-1], 6)
else:
self.assertEqual(len(tokens[key].shape), 3)
self.assertEqual(tokens[key].shape[-1], 4)
@unittest.skip("TO DO: overwrite this very extensive test.")
def test_alignement_methods(self):
pass
def get_clean_sequence(self, tokenizer, with_prefix_space=False, max_length=20, min_length=5):
toks = [(i, tokenizer.decode([i], clean_up_tokenization_spaces=False)) for i in range(len(tokenizer))]
toks = list(filter(lambda t: re.match(r"^[ a-zA-Z]+$", t[1]), toks))
toks = list(
filter(
lambda t: [t[0]]
== tokenizer.encode(t[1].split(" "), boxes=len(t[1]) * [[1, 1, 1, 1]], add_special_tokens=False),
toks,
)
)
if max_length is not None and len(toks) > max_length:
toks = toks[:max_length]
if min_length is not None and len(toks) < min_length and len(toks) > 0:
while len(toks) < min_length:
toks = toks + toks
# toks_str = [t[1] for t in toks]
toks_ids = [t[0] for t in toks]
# Ensure consistency
output_txt = tokenizer.decode(toks_ids, clean_up_tokenization_spaces=False)
if " " not in output_txt and len(toks_ids) > 1:
output_txt = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=False)
+ " "
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=False)
)
if with_prefix_space:
output_txt = " " + output_txt
words = output_txt.split(" ")
boxes = [[i, i, i, i] for i in range(len(words))]
output_ids = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
return words, boxes, output_ids
# @unittest.skip("LayoutLMv2 tokenizer requires boxes besides sequences.")
def test_maximum_encoding_length_pair_input(self):
tokenizers = self.get_tokenizers(do_lower_case=False, model_max_length=100)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Build a sequence from our model's vocabulary
stride = 2
seq_0, boxes_0, ids = self.get_clean_sequence(tokenizer, max_length=20)
question_0 = " ".join(map(str, seq_0))
if len(ids) <= 2 + stride:
seq_0 = (seq_0 + " ") * (2 + stride)
ids = None
seq0_tokens = tokenizer(seq_0, boxes=boxes_0, add_special_tokens=False)
self.assertGreater(len(seq0_tokens["input_ids"]), 2 + stride)
question_1 = "This is another sentence to be encoded."
seq_1 = ["what", "a", "weird", "test", "weirdly", "weird"]
boxes_1 = [[i, i, i, i] for i in range(len(seq_1))]
seq1_tokens = tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)
if abs(len(seq0_tokens["input_ids"]) - len(seq1_tokens["input_ids"])) <= 2:
seq1_tokens_input_ids = seq1_tokens["input_ids"] + seq1_tokens["input_ids"]
seq_1 = tokenizer.decode(seq1_tokens_input_ids, clean_up_tokenization_spaces=False)
seq_1 = seq_1.split(" ")
boxes_1 = [[i, i, i, i] for i in range(len(seq_1))]
seq1_tokens = tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)
self.assertGreater(len(seq1_tokens["input_ids"]), 2 + stride)
smallest = (
seq1_tokens["input_ids"]
if len(seq0_tokens["input_ids"]) > len(seq1_tokens["input_ids"])
else seq0_tokens["input_ids"]
)
# We are not using the special tokens - a bit too hard to test all the tokenizers with this
# TODO try this again later
sequence = tokenizer(
question_0, seq_1, boxes=boxes_1, add_special_tokens=False
) # , add_prefix_space=False)
# Test with max model input length
model_max_length = tokenizer.model_max_length
self.assertEqual(model_max_length, 100)
seq_2 = seq_0 * model_max_length
question_2 = " ".join(map(str, seq_2))
boxes_2 = boxes_0 * model_max_length
self.assertGreater(len(seq_2), model_max_length)
sequence1 = tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)
total_length1 = len(sequence1["input_ids"])
sequence2 = tokenizer(question_2, seq_1, boxes=boxes_1, add_special_tokens=False)
total_length2 = len(sequence2["input_ids"])
self.assertLess(total_length1, model_max_length, "Issue with the testing sequence, please update it.")
self.assertGreater(
total_length2, model_max_length, "Issue with the testing sequence, please update it."
)
# Simple
padding_strategies = (
[False, True, "longest"] if tokenizer.pad_token and tokenizer.pad_token_id >= 0 else [False]
)
for padding_state in padding_strategies:
with self.subTest(f"{tokenizer.__class__.__name__} Padding: {padding_state}"):
for truncation_state in [True, "longest_first", "only_first"]:
with self.subTest(f"{tokenizer.__class__.__name__} Truncation: {truncation_state}"):
output = tokenizer(
question_2,
seq_1,
boxes=boxes_1,
padding=padding_state,
truncation=truncation_state,
)
self.assertEqual(len(output["input_ids"]), model_max_length)
self.assertEqual(len(output["bbox"]), model_max_length)
output = tokenizer(
[question_2],
[seq_1],
boxes=[boxes_1],
padding=padding_state,
truncation=truncation_state,
)
self.assertEqual(len(output["input_ids"][0]), model_max_length)
self.assertEqual(len(output["bbox"][0]), model_max_length)
# Simple
output = tokenizer(
question_1, seq_2, boxes=boxes_2, padding=padding_state, truncation="only_second"
)
self.assertEqual(len(output["input_ids"]), model_max_length)
self.assertEqual(len(output["bbox"]), model_max_length)
output = tokenizer(
[question_1], [seq_2], boxes=[boxes_2], padding=padding_state, truncation="only_second"
)
self.assertEqual(len(output["input_ids"][0]), model_max_length)
self.assertEqual(len(output["bbox"][0]), model_max_length)
# Simple with no truncation
# Reset warnings
tokenizer.deprecation_warnings = {}
with self.assertLogs("transformers", level="WARNING") as cm:
output = tokenizer(
question_1, seq_2, boxes=boxes_2, padding=padding_state, truncation=False
)
self.assertNotEqual(len(output["input_ids"]), model_max_length)
self.assertNotEqual(len(output["bbox"]), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(
cm.records[0].message.startswith(
"Token indices sequence length is longer than the specified maximum sequence length for this model"
)
)
tokenizer.deprecation_warnings = {}
with self.assertLogs("transformers", level="WARNING") as cm:
output = tokenizer(
[question_1], [seq_2], boxes=[boxes_2], padding=padding_state, truncation=False
)
self.assertNotEqual(len(output["input_ids"][0]), model_max_length)
self.assertNotEqual(len(output["bbox"][0]), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(
cm.records[0].message.startswith(
"Token indices sequence length is longer than the specified maximum sequence length for this model"
)
)
# Check the order of Sequence of input ids, overflowing tokens and bbox sequence with truncation
truncated_first_sequence = (
tokenizer(seq_0, boxes=boxes_0, add_special_tokens=False)["input_ids"][:-2]
+ tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)["input_ids"]
)
truncated_second_sequence = (
tokenizer(seq_0, boxes=boxes_0, add_special_tokens=False)["input_ids"]
+ tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)["input_ids"][:-2]
)
truncated_longest_sequence = (
truncated_first_sequence if len(seq0_tokens) > len(seq1_tokens) else truncated_second_sequence
)
overflow_first_sequence = (
tokenizer(seq_0, boxes=boxes_0, add_special_tokens=False)["input_ids"][-(2 + stride) :]
+ tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)["input_ids"]
)
overflow_second_sequence = (
tokenizer(seq_0, boxes=boxes_0, add_special_tokens=False)["input_ids"]
+ tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)["input_ids"][-(2 + stride) :]
)
overflow_longest_sequence = (
overflow_first_sequence if len(seq0_tokens) > len(seq1_tokens) else overflow_second_sequence
)
bbox_first = [[0, 0, 0, 0]] * (len(seq_0) - 2)
bbox_first_sequence = bbox_first + tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)["bbox"]
overflowing_token_bbox_first_sequence_slow = [[0, 0, 0, 0]] * (2 + stride)
overflowing_token_bbox_first_sequence_fast = [[0, 0, 0, 0]] * (2 + stride) + tokenizer(
seq_1, boxes=boxes_1, add_special_tokens=False
)["bbox"]
bbox_second = [[0, 0, 0, 0]] * len(seq_0)
bbox_second_sequence = (
bbox_second + tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)["bbox"][:-2]
)
overflowing_token_bbox_second_sequence_slow = tokenizer(
seq_1, boxes=boxes_1, add_special_tokens=False
)["bbox"][-(2 + stride) :]
overflowing_token_bbox_second_sequence_fast = [[0, 0, 0, 0]] * len(seq_0) + tokenizer(
seq_1, boxes=boxes_1, add_special_tokens=False
)["bbox"][-(2 + stride) :]
bbox_longest_sequence = (
bbox_first_sequence if len(seq0_tokens) > len(seq1_tokens) else bbox_second_sequence
)
overflowing_token_bbox_longest_sequence_fast = (
overflowing_token_bbox_first_sequence_fast
if len(seq0_tokens) > len(seq1_tokens)
else overflowing_token_bbox_second_sequence_fast
)
# Overflowing tokens are handled quite differently in slow and fast tokenizers
if isinstance(tokenizer, LayoutLMv2TokenizerFast):
information = tokenizer(
question_0,
seq_1,
boxes=boxes_1,
max_length=len(sequence["input_ids"]) - 2,
add_special_tokens=False,
stride=stride,
truncation="longest_first",
return_overflowing_tokens=True,
# add_prefix_space=False,
)
truncated_sequence = information["input_ids"][0]
overflowing_tokens = information["input_ids"][1]
bbox = information["bbox"][0]
overflowing_bbox = information["bbox"][1]
self.assertEqual(len(information["input_ids"]), 2)
self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2)
self.assertEqual(truncated_sequence, truncated_longest_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride + len(smallest))
self.assertEqual(overflowing_tokens, overflow_longest_sequence)
self.assertEqual(bbox, bbox_longest_sequence)
self.assertEqual(len(overflowing_bbox), 2 + stride + len(smallest))
self.assertEqual(overflowing_bbox, overflowing_token_bbox_longest_sequence_fast)
else:
# No overflowing tokens when using 'longest' in python tokenizers
with self.assertRaises(ValueError) as context:
information = tokenizer(
question_0,
seq_1,
boxes=boxes_1,
max_length=len(sequence["input_ids"]) - 2,
add_special_tokens=False,
stride=stride,
truncation="longest_first",
return_overflowing_tokens=True,
# add_prefix_space=False,
)
self.assertTrue(
context.exception.args[0].startswith(
"Not possible to return overflowing tokens for pair of sequences with the "
"`longest_first`. Please select another truncation strategy than `longest_first`, "
"for instance `only_second` or `only_first`."
)
)
# Overflowing tokens are handled quite differently in slow and fast tokenizers
if isinstance(tokenizer, LayoutLMv2TokenizerFast):
information = tokenizer(
question_0,
seq_1,
boxes=boxes_1,
max_length=len(sequence["input_ids"]) - 2,
add_special_tokens=False,
stride=stride,
truncation=True,
return_overflowing_tokens=True,
# add_prefix_space=False,
)
truncated_sequence = information["input_ids"][0]
overflowing_tokens = information["input_ids"][1]
bbox = information["bbox"][0]
overflowing_bbox = information["bbox"][1]
self.assertEqual(len(information["input_ids"]), 2)
self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2)
self.assertEqual(truncated_sequence, truncated_longest_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride + len(smallest))
self.assertEqual(overflowing_tokens, overflow_longest_sequence)
self.assertEqual(bbox, bbox_longest_sequence)
self.assertEqual(overflowing_bbox, overflowing_token_bbox_longest_sequence_fast)
else:
# No overflowing tokens when using 'longest' in python tokenizers
with self.assertRaises(ValueError) as context:
information = tokenizer(
question_0,
seq_1,
boxes=boxes_1,
max_length=len(sequence["input_ids"]) - 2,
add_special_tokens=False,
stride=stride,
truncation=True,
return_overflowing_tokens=True,
# add_prefix_space=False,
)
self.assertTrue(
context.exception.args[0].startswith(
"Not possible to return overflowing tokens for pair of sequences with the "
"`longest_first`. Please select another truncation strategy than `longest_first`, "
"for instance `only_second` or `only_first`."
)
)
information_first_truncated = tokenizer(
question_0,
seq_1,
boxes=boxes_1,
max_length=len(sequence["input_ids"]) - 2,
add_special_tokens=False,
stride=stride,
truncation="only_first",
return_overflowing_tokens=True,
# add_prefix_space=False,
)
# Overflowing tokens are handled quite differently in slow and fast tokenizers
if isinstance(tokenizer, LayoutLMv2TokenizerFast):
truncated_sequence = information_first_truncated["input_ids"][0]
overflowing_tokens = information_first_truncated["input_ids"][1]
bbox = information_first_truncated["bbox"][0]
overflowing_bbox = information_first_truncated["bbox"][1]
self.assertEqual(len(information_first_truncated["input_ids"]), 2)
self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2)
self.assertEqual(truncated_sequence, truncated_first_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride + len(seq1_tokens["input_ids"]))
self.assertEqual(overflowing_tokens, overflow_first_sequence)
self.assertEqual(bbox, bbox_first_sequence)
self.assertEqual(overflowing_bbox, overflowing_token_bbox_first_sequence_fast)
else:
truncated_sequence = information_first_truncated["input_ids"]
overflowing_tokens = information_first_truncated["overflowing_tokens"]
overflowing_bbox = information_first_truncated["overflowing_token_boxes"]
bbox = information_first_truncated["bbox"]
self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2)
self.assertEqual(truncated_sequence, truncated_first_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride)
self.assertEqual(overflowing_tokens, seq0_tokens["input_ids"][-(2 + stride) :])
self.assertEqual(bbox, bbox_first_sequence)
self.assertEqual(overflowing_bbox, overflowing_token_bbox_first_sequence_slow)
information_second_truncated = tokenizer(
question_0,
seq_1,
boxes=boxes_1,
max_length=len(sequence["input_ids"]) - 2,
add_special_tokens=False,
stride=stride,
truncation="only_second",
return_overflowing_tokens=True,
# add_prefix_space=False,
)
# Overflowing tokens are handled quite differently in slow and fast tokenizers
if isinstance(tokenizer, LayoutLMv2TokenizerFast):
truncated_sequence = information_second_truncated["input_ids"][0]
overflowing_tokens = information_second_truncated["input_ids"][1]
bbox = information_second_truncated["bbox"][0]
overflowing_bbox = information_second_truncated["bbox"][1]
self.assertEqual(len(information_second_truncated["input_ids"]), 2)
self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2)
self.assertEqual(truncated_sequence, truncated_second_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride + len(seq0_tokens["input_ids"]))
self.assertEqual(overflowing_tokens, overflow_second_sequence)
self.assertEqual(bbox, bbox_second_sequence)
self.assertEqual(overflowing_bbox, overflowing_token_bbox_second_sequence_fast)
else:
truncated_sequence = information_second_truncated["input_ids"]
overflowing_tokens = information_second_truncated["overflowing_tokens"]
bbox = information_second_truncated["bbox"]
overflowing_bbox = information_second_truncated["overflowing_token_boxes"]
self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2)
self.assertEqual(truncated_sequence, truncated_second_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride)
self.assertEqual(overflowing_tokens, seq1_tokens["input_ids"][-(2 + stride) :])
self.assertEqual(bbox, bbox_second_sequence)
self.assertEqual(overflowing_bbox, overflowing_token_bbox_second_sequence_slow)
# @unittest.skip("LayoutLMv2 tokenizer requires boxes besides sequences.")
def test_maximum_encoding_length_single_input(self):
tokenizers = self.get_tokenizers(do_lower_case=False, model_max_length=100)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
seq_0, boxes_0, ids = self.get_clean_sequence(tokenizer, max_length=20)
sequence = tokenizer(seq_0, boxes=boxes_0, add_special_tokens=False)
total_length = len(sequence["input_ids"])
self.assertGreater(total_length, 4, "Issue with the testing sequence, please update it it's too short")
# Test with max model input length
model_max_length = tokenizer.model_max_length
self.assertEqual(model_max_length, 100)
seq_1 = seq_0 * model_max_length
boxes_1 = boxes_0 * model_max_length
sequence1 = tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)
total_length1 = len(sequence1["input_ids"])
self.assertGreater(
total_length1, model_max_length, "Issue with the testing sequence, please update it it's too short"
)
# Simple
padding_strategies = (
[False, True, "longest"] if tokenizer.pad_token and tokenizer.pad_token_id >= 0 else [False]
)
for padding_state in padding_strategies:
with self.subTest(f"Padding: {padding_state}"):
for truncation_state in [True, "longest_first", "only_first"]:
with self.subTest(f"Truncation: {truncation_state}"):
output = tokenizer(
seq_1,
boxes=boxes_1,
padding=padding_state,
truncation=truncation_state,
)
self.assertEqual(len(output["input_ids"]), model_max_length)
self.assertEqual(len(output["bbox"]), model_max_length)
output = tokenizer(
[seq_1],
boxes=[boxes_1],
padding=padding_state,
truncation=truncation_state,
)
self.assertEqual(len(output["input_ids"][0]), model_max_length)
self.assertEqual(len(output["bbox"][0]), model_max_length)
# Simple with no truncation
# Reset warnings
tokenizer.deprecation_warnings = {}
with self.assertLogs("transformers", level="WARNING") as cm:
output = tokenizer(seq_1, boxes=boxes_1, padding=padding_state, truncation=False)
self.assertNotEqual(len(output["input_ids"]), model_max_length)
self.assertNotEqual(len(output["bbox"]), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(
cm.records[0].message.startswith(
"Token indices sequence length is longer than the specified maximum sequence length for this model"
)
)
tokenizer.deprecation_warnings = {}
with self.assertLogs("transformers", level="WARNING") as cm:
output = tokenizer([seq_1], boxes=[boxes_1], padding=padding_state, truncation=False)
self.assertNotEqual(len(output["input_ids"][0]), model_max_length)
self.assertNotEqual(len(output["bbox"][0]), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(
cm.records[0].message.startswith(
"Token indices sequence length is longer than the specified maximum sequence length for this model"
)
)
# Check the order of Sequence of input ids, overflowing tokens and bbox sequence with truncation
stride = 2
information = tokenizer(
seq_0,
boxes=boxes_0,
max_length=total_length - 2,
add_special_tokens=False,
stride=stride,
truncation=True,
return_overflowing_tokens=True,
# add_prefix_space=False,
)
# Overflowing tokens are handled quite differently in slow and fast tokenizers
if isinstance(tokenizer, LayoutLMv2TokenizerFast):
truncated_sequence = information["input_ids"][0]
overflowing_tokens = information["input_ids"][1]
bbox = information["bbox"][0]
overflowing_bbox = information["bbox"][1]
self.assertEqual(len(information["input_ids"]), 2)
self.assertEqual(len(truncated_sequence), total_length - 2)
self.assertEqual(truncated_sequence, sequence["input_ids"][:-2])
self.assertEqual(len(overflowing_tokens), 2 + stride)
self.assertEqual(overflowing_tokens, sequence["input_ids"][-(2 + stride) :])
self.assertEqual(bbox, sequence["bbox"][:-2])
self.assertEqual(overflowing_bbox, sequence["bbox"][-(2 + stride) :])
else:
truncated_sequence = information["input_ids"]
overflowing_tokens = information["overflowing_tokens"]
bbox = information["bbox"]
overflowing_bbox = information["overflowing_token_boxes"]
self.assertEqual(len(truncated_sequence), total_length - 2)
self.assertEqual(truncated_sequence, sequence["input_ids"][:-2])
self.assertEqual(len(overflowing_tokens), 2 + stride)
self.assertEqual(overflowing_tokens, sequence["input_ids"][-(2 + stride) :])
self.assertEqual(bbox, sequence["bbox"][:-2])
self.assertEqual(overflowing_bbox, sequence["bbox"][-(2 + stride) :])
@unittest.skip("LayoutLMv2 tokenizer requires boxes besides sequences.")
def test_pretokenized_inputs(self):
pass
@unittest.skip("LayoutLMv2 tokenizer always expects pretokenized inputs.")
def test_compare_pretokenized_inputs(self):
pass
@unittest.skip("LayoutLMv2 fast tokenizer does not support prepare_for_model")
def test_compare_prepare_for_model(self):
pass
@slow
def test_only_label_first_subword(self):
words = ["hello", "niels"]
boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))]
word_labels = [0, 1]
# test slow tokenizer
tokenizer_p = LayoutLMv2Tokenizer.from_pretrained("microsoft/layoutlmv2-base-uncased")
encoding = tokenizer_p(words, boxes=boxes, word_labels=word_labels)
self.assertListEqual(encoding.labels, [-100, 0, 1, -100, -100])
tokenizer_p = LayoutLMv2Tokenizer.from_pretrained(
"microsoft/layoutlmv2-base-uncased", only_label_first_subword=False
)
encoding = tokenizer_p(words, boxes=boxes, word_labels=word_labels)
self.assertListEqual(encoding.labels, [-100, 0, 1, 1, -100])
# test fast tokenizer
tokenizer_r = LayoutLMv2TokenizerFast.from_pretrained("microsoft/layoutlmv2-base-uncased")
encoding = tokenizer_r(words, boxes=boxes, word_labels=word_labels)
self.assertListEqual(encoding.labels, [-100, 0, 1, -100, -100])
tokenizer_r = LayoutLMv2Tokenizer.from_pretrained(
"microsoft/layoutlmv2-base-uncased", only_label_first_subword=False
)
encoding = tokenizer_r(words, boxes=boxes, word_labels=word_labels)
self.assertListEqual(encoding.labels, [-100, 0, 1, 1, -100])
@slow
def test_layoutlmv2_integration_test(self):
tokenizer_p = LayoutLMv2Tokenizer.from_pretrained("microsoft/layoutlmv2-base-uncased")
tokenizer_r = LayoutLMv2TokenizerFast.from_pretrained("microsoft/layoutlmv2-base-uncased")
# There are 3 cases:
# CASE 1: document image classification (training + inference), document image token classification (inference),
# in which case only words and normalized bounding boxes are provided to the tokenizer
# CASE 2: document image token classification (training),
# in which case one also provides word labels to the tokenizer
# CASE 3: document image visual question answering (inference),
# in which case one also provides a question to the tokenizer
# We need to test all 3 cases both on batched and non-batched inputs.
# CASE 1: not batched
words, boxes = self.get_words_and_boxes()
# fmt: off
expected_results = {'input_ids': [101, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'bbox': [[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} # noqa: E231
# fmt: on
encoding_p = tokenizer_p(words, boxes=boxes, padding="max_length", max_length=20)
encoding_r = tokenizer_r(words, boxes=boxes, padding="max_length", max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
# CASE 1: batched
words, boxes = self.get_words_and_boxes_batch()
# fmt: off
expected_results = {'input_ids': [[101, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [101, 7592, 2026, 2171, 2003, 3960, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'bbox': [[[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [961, 885, 992, 912], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E231
# fmt: on
encoding_p = tokenizer_p(words, boxes=boxes, padding="max_length", max_length=20)
encoding_r = tokenizer_r(words, boxes=boxes, padding="max_length", max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
# CASE 2: not batched
words, boxes = self.get_words_and_boxes()
word_labels = [1, 2, 3]
# fmt: off
expected_results = {'input_ids': [101, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'bbox': [[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'labels': [-100, 1, 2, -100, 3, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100], 'attention_mask': [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} # noqa: E231
# fmt: on
encoding_p = tokenizer_p(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20)
encoding_r = tokenizer_r(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
# CASE 2: batched
words, boxes = self.get_words_and_boxes_batch()
word_labels = [[1, 2, 3], [2, 46, 17, 22, 3]]
# fmt: off
expected_results = {'input_ids': [[101, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [101, 7592, 2026, 2171, 2003, 3960, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'bbox': [[[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [961, 885, 992, 912], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'labels': [[-100, 1, 2, -100, 3, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100], [-100, 2, 46, 17, 22, 3, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E231
# fmt: on
encoding_p = tokenizer_p(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20)
encoding_r = tokenizer_r(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
# CASE 3: not batched
question, words, boxes = self.get_question_words_and_boxes()
# fmt: off
expected_results = {'input_ids': [101, 2054, 1005, 1055, 2010, 2171, 1029, 102, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0], 'bbox': [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]} # noqa: E231
# fmt: on
encoding_p = tokenizer_p(question, words, boxes, padding="max_length", max_length=20)
encoding_r = tokenizer_r(question, words, boxes, padding="max_length", max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
# CASE 3: batched
questions, words, boxes = self.get_question_words_and_boxes_batch()
# fmt: off
expected_results = {'input_ids': [[101, 2054, 1005, 1055, 2010, 2171, 1029, 102, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0], [101, 2129, 2003, 2002, 2170, 1029, 102, 2054, 1037, 21110, 2546, 3806, 2102, 2078, 102, 0, 0, 0, 0, 0]], 'bbox': [[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [336, 42, 353, 57], [34, 42, 66, 69], [34, 42, 66, 69], [34, 42, 66, 69], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]]} # noqa: E231
# fmt: on
encoding_p = tokenizer_p(questions, words, boxes, padding="max_length", max_length=20)
encoding_r = tokenizer_r(questions, words, boxes, padding="max_length", max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
@unittest.skip("Doesn't support another framework than PyTorch")
def test_np_encode_plus_sent_to_model(self):
pass
| 126,856 | 51.312165 | 1,398 | py |
robust-transformers | robust-transformers-main/tests/layoutlm/test_modeling_layoutlm.py | # coding=utf-8
# Copyright 2018 The Microsoft Research Asia LayoutLM Team Authors, The Hugging Face Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import LayoutLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ..test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, ids_tensor
if is_torch_available():
import torch
from transformers import (
LayoutLMForMaskedLM,
LayoutLMForSequenceClassification,
LayoutLMForTokenClassification,
LayoutLMModel,
)
class LayoutLMModelTester:
"""You can also import this e.g from .test_modeling_layoutlm import LayoutLMModelTester"""
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
range_bbox=1000,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
self.range_bbox = range_bbox
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
bbox = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
t = bbox[i, j, 3]
bbox[i, j, 3] = bbox[i, j, 1]
bbox[i, j, 1] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
t = bbox[i, j, 2]
bbox[i, j, 2] = bbox[i, j, 0]
bbox[i, j, 0] = t
input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def get_config(self):
return LayoutLMConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
)
def create_and_check_model(
self, config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = LayoutLMModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, bbox, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, bbox, token_type_ids=token_type_ids)
result = model(input_ids, bbox)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_for_masked_lm(
self, config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = LayoutLMForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, bbox, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_for_sequence_classification(
self, config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = LayoutLMForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(
input_ids, bbox, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_for_token_classification(
self, config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = LayoutLMForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, bbox, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
bbox,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class LayoutLMModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
LayoutLMModel,
LayoutLMForMaskedLM,
LayoutLMForSequenceClassification,
LayoutLMForTokenClassification,
)
if is_torch_available()
else None
)
def setUp(self):
self.model_tester = LayoutLMModelTester(self)
self.config_tester = ConfigTester(self, config_class=LayoutLMConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_model_various_embeddings(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
config_and_inputs[0].position_embedding_type = type
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
def prepare_layoutlm_batch_inputs():
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
input_ids = torch.tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]],device=torch_device) # noqa: E231
attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],],device=torch_device) # noqa: E231
bbox = torch.tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]],device=torch_device) # noqa: E231
token_type_ids = torch.tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]],device=torch_device) # noqa: E231
# these are sequence labels (i.e. at the token level)
labels = torch.tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]],device=torch_device) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_torch
class LayoutLMModelIntegrationTest(unittest.TestCase):
@slow
def test_forward_pass_no_head(self):
model = LayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased").to(torch_device)
input_ids, attention_mask, bbox, token_type_ids, labels = prepare_layoutlm_batch_inputs()
# forward pass
outputs = model(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids)
# test the sequence output on [0, :3, :3]
expected_slice = torch.tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]],
device=torch_device,
)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-3))
# test the pooled output on [1, :3]
expected_slice = torch.tensor([-0.6580, -0.0214, 0.8552], device=torch_device)
self.assertTrue(torch.allclose(outputs.pooler_output[1, :3], expected_slice, atol=1e-3))
@slow
def test_forward_pass_sequence_classification(self):
# initialize model with randomly initialized sequence classification head
model = LayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased", num_labels=2).to(
torch_device
)
input_ids, attention_mask, bbox, token_type_ids, _ = prepare_layoutlm_batch_inputs()
# forward pass
outputs = model(
input_ids=input_ids,
bbox=bbox,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
labels=torch.tensor([1, 1], device=torch_device),
)
# test whether we get a loss as a scalar
loss = outputs.loss
expected_shape = torch.Size([])
self.assertEqual(loss.shape, expected_shape)
# test the shape of the logits
logits = outputs.logits
expected_shape = torch.Size((2, 2))
self.assertEqual(logits.shape, expected_shape)
@slow
def test_forward_pass_token_classification(self):
# initialize model with randomly initialized token classification head
model = LayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased", num_labels=13).to(
torch_device
)
input_ids, attention_mask, bbox, token_type_ids, labels = prepare_layoutlm_batch_inputs()
# forward pass
outputs = model(
input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=labels
)
# test the loss calculation to be around 2.65
# expected_loss = torch.tensor(2.65, device=torch_device)
# The loss is currently somewhat random and can vary between 0.1-0.3 atol.
# self.assertTrue(torch.allclose(outputs.loss, expected_loss, atol=0.1))
# test the shape of the logits
logits = outputs.logits
expected_shape = torch.Size((2, 25, 13))
self.assertEqual(logits.shape, expected_shape)
| 14,920 | 43.014749 | 937 | py |
robust-transformers | robust-transformers-main/tests/deberta_v2/test_modeling_deberta_v2.py | # coding=utf-8
# Copyright 2018 Microsoft Authors and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import DebertaV2Config, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ..test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, ids_tensor
if is_torch_available():
import torch
from transformers import (
DebertaV2ForMaskedLM,
DebertaV2ForQuestionAnswering,
DebertaV2ForSequenceClassification,
DebertaV2ForTokenClassification,
DebertaV2Model,
)
from transformers.models.deberta_v2.modeling_deberta_v2 import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class DebertaV2ModelTester(object):
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
relative_attention=False,
position_biased_input=True,
pos_att_type="None",
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.relative_attention = relative_attention
self.position_biased_input = position_biased_input
self.pos_att_type = pos_att_type
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def get_config(self):
return DebertaV2Config(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
relative_attention=self.relative_attention,
position_biased_input=self.position_biased_input,
pos_att_type=self.pos_att_type,
)
def check_loss_output(self, result):
self.parent.assertListEqual(list(result.loss.size()), [])
def create_and_check_deberta_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = DebertaV2Model(config=config)
model.to(torch_device)
model.eval()
sequence_output = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)[0]
sequence_output = model(input_ids, token_type_ids=token_type_ids)[0]
sequence_output = model(input_ids)[0]
self.parent.assertListEqual(list(sequence_output.size()), [self.batch_size, self.seq_length, self.hidden_size])
def create_and_check_deberta_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = DebertaV2ForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_deberta_for_sequence_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = DebertaV2ForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels)
self.parent.assertListEqual(list(result.logits.size()), [self.batch_size, self.num_labels])
self.check_loss_output(result)
def create_and_check_deberta_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = DebertaV2ForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_deberta_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = DebertaV2ForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class DebertaV2ModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
DebertaV2Model,
DebertaV2ForMaskedLM,
DebertaV2ForSequenceClassification,
DebertaV2ForTokenClassification,
DebertaV2ForQuestionAnswering,
)
if is_torch_available()
else ()
)
test_torchscript = False
test_pruning = False
test_head_masking = False
is_encoder_decoder = False
def setUp(self):
self.model_tester = DebertaV2ModelTester(self)
self.config_tester = ConfigTester(self, config_class=DebertaV2Config, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_deberta_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = DebertaV2Model.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_torch
@require_sentencepiece
@require_tokenizers
class DebertaV2ModelIntegrationTest(unittest.TestCase):
@unittest.skip(reason="Model not available yet")
def test_inference_masked_lm(self):
pass
@slow
def test_inference_no_head(self):
model = DebertaV2Model.from_pretrained("microsoft/deberta-v2-xlarge")
input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
output = model(input_ids, attention_mask=attention_mask)[0]
# compare the actual values for a slice.
expected_slice = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]]
)
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4), f"{output[:, 1:4, 1:4]}")
| 11,432 | 39.257042 | 119 | py |
robust-transformers | robust-transformers-main/tests/wavlm/test_modeling_wavlm.py | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch WavLM model. """
import math
import unittest
import pytest
from datasets import load_dataset
from transformers import WavLMConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from ..test_configuration_common import ConfigTester
from ..test_modeling_common import (
ModelTesterMixin,
_config_zero_init,
floats_tensor,
ids_tensor,
random_attention_mask,
)
if is_torch_available():
import torch
from transformers import (
Wav2Vec2FeatureExtractor,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
)
class WavLMModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=1024, # speech is longer
is_training=False,
hidden_size=16,
feat_extract_norm="group",
feat_extract_dropout=0.0,
feat_extract_activation="gelu",
conv_dim=(32, 32, 32),
conv_stride=(4, 4, 4),
conv_kernel=(8, 8, 8),
conv_bias=False,
num_conv_pos_embeddings=16,
num_conv_pos_embedding_groups=2,
num_hidden_layers=4,
num_attention_heads=2,
hidden_dropout_prob=0.1, # this is most likely not correctly set yet
intermediate_size=20,
layer_norm_eps=1e-5,
hidden_act="gelu",
initializer_range=0.02,
vocab_size=32,
do_stable_layer_norm=False,
tdnn_dim=(32, 32),
tdnn_kernel=(3, 3),
tdnn_dilation=(1, 1),
xvector_output_dim=32,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.hidden_size = hidden_size
self.feat_extract_norm = feat_extract_norm
self.feat_extract_dropout = feat_extract_dropout
self.feat_extract_activation = feat_extract_activation
self.conv_dim = conv_dim
self.conv_stride = conv_stride
self.conv_kernel = conv_kernel
self.conv_bias = conv_bias
self.num_conv_pos_embeddings = num_conv_pos_embeddings
self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_dropout_prob = hidden_dropout_prob
self.intermediate_size = intermediate_size
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.vocab_size = vocab_size
self.do_stable_layer_norm = do_stable_layer_norm
self.tdnn_dim = tdnn_dim
self.tdnn_kernel = tdnn_kernel
self.tdnn_dilation = tdnn_dilation
self.xvector_output_dim = xvector_output_dim
self.scope = scope
output_seq_length = self.seq_length
for kernel, stride in zip(self.conv_kernel, self.conv_stride):
output_seq_length = (output_seq_length - (kernel - 1)) / stride
self.output_seq_length = int(math.ceil(output_seq_length))
self.encoder_seq_length = self.output_seq_length
def prepare_config_and_inputs(self):
input_values = floats_tensor([self.batch_size, self.seq_length], self.vocab_size)
attention_mask = random_attention_mask([self.batch_size, self.seq_length])
config = self.get_config()
return config, input_values, attention_mask
def get_config(self):
return WavLMConfig(
hidden_size=self.hidden_size,
feat_extract_norm=self.feat_extract_norm,
feat_extract_dropout=self.feat_extract_dropout,
feat_extract_activation=self.feat_extract_activation,
conv_dim=self.conv_dim,
conv_stride=self.conv_stride,
conv_kernel=self.conv_kernel,
conv_bias=self.conv_bias,
num_conv_pos_embeddings=self.num_conv_pos_embeddings,
num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
hidden_dropout_prob=self.hidden_dropout_prob,
intermediate_size=self.intermediate_size,
layer_norm_eps=self.layer_norm_eps,
hidden_act=self.hidden_act,
initializer_range=self.initializer_range,
vocab_size=self.vocab_size,
tdnn_dim=self.tdnn_dim,
tdnn_kernel=self.tdnn_kernel,
tdnn_dilation=self.tdnn_dilation,
xvector_output_dim=self.xvector_output_dim,
)
def create_and_check_model(self, config, input_values, attention_mask):
model = WavLMModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_values, attention_mask=attention_mask)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size)
)
def create_and_check_batch_inference(self, config, input_values, *args):
# test does not pass for models making use of `group_norm`
# check: https://github.com/pytorch/fairseq/issues/3227
model = WavLMModel(config=config)
model.to(torch_device)
model.eval()
input_values = input_values[:3]
attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.bool)
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
attention_mask[i, input_lengths[i] :] = 0.0
batch_outputs = model(input_values, attention_mask=attention_mask).last_hidden_state
for i in range(input_values.shape[0]):
input_slice = input_values[i : i + 1, : input_lengths[i]]
output = model(input_slice).last_hidden_state
batch_output = batch_outputs[i : i + 1, : output.shape[1]]
self.parent.assertTrue(torch.allclose(output, batch_output, atol=1e-3))
def check_ctc_loss(self, config, input_values, *args):
model = WavLMForCTC(config=config)
model.to(torch_device)
# make sure that dropout is disabled
model.eval()
input_values = input_values[:3]
attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long)
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths))
labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size)
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
attention_mask[i, input_lengths[i] :] = 0
model.config.ctc_loss_reduction = "sum"
sum_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item()
model.config.ctc_loss_reduction = "mean"
mean_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item()
self.parent.assertTrue(isinstance(sum_loss, float))
self.parent.assertTrue(isinstance(mean_loss, float))
def check_seq_classifier_loss(self, config, input_values, *args):
model = WavLMForSequenceClassification(config=config)
model.to(torch_device)
# make sure that dropout is disabled
model.eval()
input_values = input_values[:3]
attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long)
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label))
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
attention_mask[i, input_lengths[i] :] = 0
masked_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item()
unmasked_loss = model(input_values, labels=labels).loss.item()
self.parent.assertTrue(isinstance(masked_loss, float))
self.parent.assertTrue(isinstance(unmasked_loss, float))
self.parent.assertTrue(masked_loss != unmasked_loss)
def check_ctc_training(self, config, input_values, *args):
config.ctc_zero_infinity = True
model = WavLMForCTC(config=config)
model.to(torch_device)
model.train()
# freeze feature encoder
model.freeze_feature_encoder()
input_values = input_values[:3]
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths))
labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size)
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
if max_length_labels[i] < labels.shape[-1]:
# it's important that we make sure that target lenghts are at least
# one shorter than logit lenghts to prevent -inf
labels[i, max_length_labels[i] - 1 :] = -100
loss = model(input_values, labels=labels).loss
self.parent.assertFalse(torch.isinf(loss).item())
loss.backward()
def check_seq_classifier_training(self, config, input_values, *args):
config.ctc_zero_infinity = True
model = WavLMForSequenceClassification(config=config)
model.to(torch_device)
model.train()
# freeze everything but the classification head
model.freeze_base_model()
input_values = input_values[:3]
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label))
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
loss = model(input_values, labels=labels).loss
self.parent.assertFalse(torch.isinf(loss).item())
loss.backward()
def check_labels_out_of_vocab(self, config, input_values, *args):
model = WavLMForCTC(config)
model.to(torch_device)
model.train()
input_values = input_values[:3]
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths))
labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size + 100)
with pytest.raises(ValueError):
model(input_values, labels=labels)
def prepare_config_and_inputs_for_common(self):
config, input_values, attention_mask = self.prepare_config_and_inputs()
inputs_dict = {"input_values": input_values, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class WavLMModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(WavLMForCTC, WavLMModel, WavLMForAudioFrameClassification, WavLMForSequenceClassification, WavLMForXVector)
if is_torch_available()
else ()
)
test_pruning = False
test_headmasking = False
test_torchscript = False
def setUp(self):
self.model_tester = WavLMModelTester(self)
self.config_tester = ConfigTester(self, config_class=WavLMConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_ctc_loss_inference(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_ctc_loss(*config_and_inputs)
def test_seq_classifier_loss_inference(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_seq_classifier_loss(*config_and_inputs)
def test_ctc_train(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_ctc_training(*config_and_inputs)
def test_seq_classifier_train(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_seq_classifier_training(*config_and_inputs)
def test_labels_out_of_vocab(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_labels_out_of_vocab(*config_and_inputs)
# WavLM has no inputs_embeds
def test_inputs_embeds(self):
pass
# `input_ids` is renamed to `input_values`
def test_forward_signature(self):
pass
# WavLM cannot resize token embeddings
# since it has no tokens embeddings
def test_resize_tokens_embeddings(self):
pass
# WavLM has no inputs_embeds
# and thus the `get_input_embeddings` fn
# is not implemented
def test_model_common_attributes(self):
pass
# WavLM uses PyTorch's multi-head-attention class
# and thus can't retain gradients on attentions
def test_retain_grad_hidden_states_attentions(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
# no need to test all models as different heads yield the same functionality
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
# set layer drop to 0
model.config.layerdrop = 0.0
input_values = inputs_dict["input_values"]
input_lengths = torch.tensor(
[input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device
)
output_lengths = model._get_feat_extract_output_lengths(input_lengths)
labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size)
inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"])
inputs_dict["labels"] = labels
outputs = model(**inputs_dict)
output = outputs[0]
# Encoder-/Decoder-only models
hidden_states = outputs.hidden_states[0]
hidden_states.retain_grad()
output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(hidden_states.grad)
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
uniform_init_parms = [
"conv.weight",
"masked_spec_embed",
"codevectors",
"quantizer.weight_proj.weight",
"project_hid.weight",
"project_hid.bias",
"project_q.weight",
"project_q.bias",
"feature_projection.projection.weight",
"feature_projection.projection.bias",
"label_embeddings_concat",
"rel_attn_embed",
"objective.weight",
]
if param.requires_grad:
if any([x in name for x in uniform_init_parms]):
self.assertTrue(
-1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0,
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
else:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),
[0.0, 1.0],
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
# overwrite from test_modeling_common
def _mock_init_weights(self, module):
if hasattr(module, "weight") and module.weight is not None:
module.weight.data.fill_(3)
if hasattr(module, "weight_g") and module.weight_g is not None:
module.weight_g.data.fill_(3)
if hasattr(module, "weight_v") and module.weight_v is not None:
module.weight_v.data.fill_(3)
if hasattr(module, "bias") and module.bias is not None:
module.bias.data.fill_(3)
if hasattr(module, "codevectors") and module.codevectors is not None:
module.codevectors.data.fill_(3)
if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None:
module.masked_spec_embed.data.fill_(3)
@unittest.skip(reason="Feed forward chunking is not implemented for WavLM")
def test_feed_forward_chunking(self):
pass
@slow
def test_model_from_pretrained(self):
model = WavLMModel.from_pretrained("microsoft/wavlm-base-plus")
self.assertIsNotNone(model)
@require_torch
@require_torchaudio
@slow
class WavLMModelIntegrationTest(unittest.TestCase):
def _load_datasamples(self, num_samples):
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
# automatic decoding with librispeech
speech_samples = ds.sort("id").filter(
lambda x: x["id"] in [f"1272-141231-000{i}" for i in range(num_samples)]
)[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def _load_superb(self, task, num_samples):
ds = load_dataset("anton-l/superb_dummy", task, split="test")
return ds[:num_samples]
def test_inference_base(self):
model = WavLMModel.from_pretrained("microsoft/wavlm-base-plus").to(torch_device)
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
"microsoft/wavlm-base-plus", return_attention_mask=True
)
input_speech = self._load_datasamples(2)
inputs = feature_extractor(input_speech, return_tensors="pt", padding=True)
input_values = inputs.input_values.to(torch_device)
attention_mask = inputs.attention_mask.to(torch_device)
with torch.no_grad():
hidden_states_slice = (
model(input_values, attention_mask=attention_mask).last_hidden_state[:, -2:, -2:].cpu()
)
EXPECTED_HIDDEN_STATES_SLICE = torch.tensor(
[[[0.0577, 0.1161], [0.0579, 0.1165]], [[0.0199, 0.1237], [0.0059, 0.0605]]]
)
# TODO: update the tolerance after the CI moves to torch 1.10
self.assertTrue(torch.allclose(hidden_states_slice, EXPECTED_HIDDEN_STATES_SLICE, atol=5e-2))
def test_inference_large(self):
model = WavLMModel.from_pretrained("microsoft/wavlm-large").to(torch_device)
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
"microsoft/wavlm-large", return_attention_mask=True
)
input_speech = self._load_datasamples(2)
inputs = feature_extractor(input_speech, return_tensors="pt", padding=True)
input_values = inputs.input_values.to(torch_device)
attention_mask = inputs.attention_mask.to(torch_device)
with torch.no_grad():
hidden_states_slice = (
model(input_values, attention_mask=attention_mask).last_hidden_state[:, -2:, -2:].cpu()
)
EXPECTED_HIDDEN_STATES_SLICE = torch.tensor(
[[[0.2122, 0.0500], [0.2118, 0.0563]], [[0.1353, 0.1818], [0.2453, 0.0595]]]
)
self.assertTrue(torch.allclose(hidden_states_slice, EXPECTED_HIDDEN_STATES_SLICE, rtol=5e-2))
def test_inference_diarization(self):
model = WavLMForAudioFrameClassification.from_pretrained("microsoft/wavlm-base-plus-sd").to(torch_device)
processor = Wav2Vec2FeatureExtractor.from_pretrained("microsoft/wavlm-base-plus-sd")
input_data = self._load_superb("sd", 4)
inputs = processor(input_data["speech"], return_tensors="pt", padding=True, sampling_rate=16_000)
input_values = inputs.input_values.to(torch_device)
attention_mask = inputs.attention_mask.to(torch_device)
with torch.no_grad():
outputs = model(input_values, attention_mask=attention_mask)
# labels is a one-hot array of shape (num_frames, num_speakers)
labels = (outputs.logits > 0).long()
# s3prl logits for the same batch
expected_logits = torch.tensor(
[
[[-5.9566, -8.6554], [-5.7137, -8.9386], [-5.7906, -7.0973], [-5.7829, -5.9999]],
[[-5.2086, -7.7878], [-4.8890, -7.9312], [-4.2004, -3.9101], [-5.4480, -4.6932]],
[[-4.6105, -6.7178], [-5.1930, -6.1635], [-2.6228, -4.1123], [-2.7646, -3.1576]],
[[-4.4477, -7.9206], [-3.9339, -7.3707], [-4.9528, -4.8242], [-3.6921, -2.9687]],
],
device=torch_device,
)
self.assertEqual(labels[0, :, 0].sum(), 258)
self.assertEqual(labels[0, :, 1].sum(), 647)
# TODO: update the tolerance after the CI moves to torch 1.10
self.assertTrue(torch.allclose(outputs.logits[:, :4], expected_logits, atol=1e-2))
def test_inference_speaker_verification(self):
model = WavLMForXVector.from_pretrained("microsoft/wavlm-base-plus-sv").to(torch_device)
processor = Wav2Vec2FeatureExtractor.from_pretrained("microsoft/wavlm-base-plus-sv")
input_data = self._load_superb("si", 4)
inputs = processor(input_data["speech"], return_tensors="pt", padding=True)
labels = torch.tensor([5, 1, 1, 3], device=torch_device).T
with torch.no_grad():
input_values = inputs.input_values.to(torch_device)
attention_mask = inputs.attention_mask.to(torch_device)
outputs = model(input_values, attention_mask=attention_mask, labels=labels)
embeddings = torch.nn.functional.normalize(outputs.embeddings, dim=-1)
cosine_sim = torch.nn.CosineSimilarity(dim=-1)
# id10002 vs id10002
self.assertAlmostEqual(cosine_sim(embeddings[1], embeddings[2]).item(), 0.9787, 3)
# id10006 vs id10002
self.assertAlmostEqual(cosine_sim(embeddings[0], embeddings[1]).item(), 0.5064, 3)
# id10002 vs id10004
self.assertAlmostEqual(cosine_sim(embeddings[2], embeddings[3]).item(), 0.4780, 3)
# TODO: update the tolerance after the CI moves to torch 1.10
self.assertAlmostEqual(outputs.loss.item(), 18.4154, 2)
| 23,759 | 39.477002 | 116 | py |
robust-transformers | robust-transformers-main/tests/marian/test_modeling_marian.py | # coding=utf-8
# Copyright 2021, The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch Marian model. """
import tempfile
import unittest
from huggingface_hub.hf_api import list_models
from transformers import MarianConfig, is_torch_available
from transformers.file_utils import cached_property
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ..generation.test_generation_utils import GenerationTesterMixin
from ..test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, ids_tensor
if is_torch_available():
import torch
from transformers import (
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
MarianModel,
MarianMTModel,
TranslationPipeline,
)
from transformers.models.marian.convert_marian_to_pytorch import (
ORG_NAME,
convert_hf_name_to_opus_name,
convert_opus_name_to_hf_name,
)
from transformers.models.marian.modeling_marian import (
MarianDecoder,
MarianEncoder,
MarianForCausalLM,
shift_tokens_right,
)
def prepare_marian_inputs_dict(
config,
input_ids,
decoder_input_ids,
attention_mask=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
):
if attention_mask is None:
attention_mask = input_ids.ne(config.pad_token_id)
if decoder_attention_mask is None:
decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id)
if head_mask is None:
head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device)
if decoder_head_mask is None:
decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device)
if cross_attn_head_mask is None:
cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class MarianModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_labels=False,
vocab_size=99,
hidden_size=16,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=4,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=20,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
decoder_start_token_id=3,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.decoder_start_token_id = decoder_start_token_id
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp(
3,
)
input_ids[:, -1] = self.eos_token_id # Eos Token
decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
config = self.get_config()
inputs_dict = prepare_marian_inputs_dict(config, input_ids, decoder_input_ids)
return config, inputs_dict
def get_config(self):
return MarianConfig(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.decoder_start_token_id,
)
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict):
model = MarianModel(config=config).get_decoder().to(torch_device).eval()
input_ids = inputs_dict["input_ids"]
attention_mask = inputs_dict["attention_mask"]
head_mask = inputs_dict["head_mask"]
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = ids_tensor((self.batch_size, 3), 2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[
"last_hidden_state"
]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def check_encoder_decoder_model_standalone(self, config, inputs_dict):
model = MarianModel(config=config).to(torch_device).eval()
outputs = model(**inputs_dict)
encoder_last_hidden_state = outputs.encoder_last_hidden_state
last_hidden_state = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
encoder = model.get_encoder()
encoder.save_pretrained(tmpdirname)
encoder = MarianEncoder.from_pretrained(tmpdirname).to(torch_device)
encoder_last_hidden_state_2 = encoder(inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"])[
0
]
self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3)
with tempfile.TemporaryDirectory() as tmpdirname:
decoder = model.get_decoder()
decoder.save_pretrained(tmpdirname)
decoder = MarianDecoder.from_pretrained(tmpdirname).to(torch_device)
last_hidden_state_2 = decoder(
input_ids=inputs_dict["decoder_input_ids"],
attention_mask=inputs_dict["decoder_attention_mask"],
encoder_hidden_states=encoder_last_hidden_state,
encoder_attention_mask=inputs_dict["attention_mask"],
)[0]
self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3)
@require_torch
class MarianModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (MarianModel, MarianMTModel) if is_torch_available() else ()
all_generative_model_classes = (MarianMTModel,) if is_torch_available() else ()
is_encoder_decoder = True
test_pruning = False
test_missing_keys = False
def setUp(self):
self.model_tester = MarianModelTester(self)
self.config_tester = ConfigTester(self, config_class=MarianConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_save_load_strict(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True)
self.assertEqual(info["missing_keys"], [])
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_encoder_decoder_model_standalone(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs)
def test_generate_fp16(self):
config, input_dict = self.model_tester.prepare_config_and_inputs()
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
model = MarianMTModel(config).eval().to(torch_device)
if torch_device == "cuda":
model.half()
model.generate(input_ids, attention_mask=attention_mask)
model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3)
def assert_tensors_close(a, b, atol=1e-12, prefix=""):
"""If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if torch.allclose(a, b, atol=atol):
return True
raise
except Exception:
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
if a.numel() > 100:
msg = f"tensor values are {pct_different:.1%} percent different."
else:
msg = f"{a} != {b}"
if prefix:
msg = prefix + ": " + msg
raise AssertionError(msg)
def _long_tensor(tok_lst):
return torch.tensor(tok_lst, dtype=torch.long, device=torch_device)
class ModelManagementTests(unittest.TestCase):
@slow
@require_torch
def test_model_names(self):
model_list = list_models()
model_ids = [x.modelId for x in model_list if x.modelId.startswith(ORG_NAME)]
bad_model_ids = [mid for mid in model_ids if "+" in model_ids]
self.assertListEqual([], bad_model_ids)
self.assertGreater(len(model_ids), 500)
@require_torch
@require_sentencepiece
@require_tokenizers
class MarianIntegrationTest(unittest.TestCase):
src = "en"
tgt = "de"
src_text = [
"I am a small frog.",
"Now I can forget the 100 words of german that I know.",
"Tom asked his teacher for advice.",
"That's how I would do it.",
"Tom really admired Mary's courage.",
"Turn around and close your eyes.",
]
expected_text = [
"Ich bin ein kleiner Frosch.",
"Jetzt kann ich die 100 Wörter des Deutschen vergessen, die ich kenne.",
"Tom bat seinen Lehrer um Rat.",
"So würde ich das machen.",
"Tom bewunderte Marias Mut wirklich.",
"Drehen Sie sich um und schließen Sie die Augen.",
]
# ^^ actual C++ output differs slightly: (1) des Deutschen removed, (2) ""-> "O", (3) tun -> machen
@classmethod
def setUpClass(cls) -> None:
cls.model_name = f"Helsinki-NLP/opus-mt-{cls.src}-{cls.tgt}"
return cls
@cached_property
def tokenizer(self):
return AutoTokenizer.from_pretrained(self.model_name)
@property
def eos_token_id(self) -> int:
return self.tokenizer.eos_token_id
@cached_property
def model(self):
model: MarianMTModel = AutoModelWithLMHead.from_pretrained(self.model_name).to(torch_device)
c = model.config
self.assertListEqual(c.bad_words_ids, [[c.pad_token_id]])
self.assertEqual(c.max_length, 512)
self.assertEqual(c.decoder_start_token_id, c.pad_token_id)
if torch_device == "cuda":
return model.half()
else:
return model
def _assert_generated_batch_equal_expected(self, **tokenizer_kwargs):
generated_words = self.translate_src_text(**tokenizer_kwargs)
self.assertListEqual(self.expected_text, generated_words)
def translate_src_text(self, **tokenizer_kwargs):
model_inputs = self.tokenizer(self.src_text, padding=True, return_tensors="pt", **tokenizer_kwargs).to(
torch_device
)
self.assertEqual(self.model.device, model_inputs.input_ids.device)
generated_ids = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, max_length=128
)
generated_words = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
return generated_words
@require_sentencepiece
@require_tokenizers
class TestMarian_EN_DE_More(MarianIntegrationTest):
@slow
def test_forward(self):
src, tgt = ["I am a small frog"], ["Ich bin ein kleiner Frosch."]
expected_ids = [38, 121, 14, 697, 38848, 0]
model_inputs = self.tokenizer(src, return_tensors="pt").to(torch_device)
with self.tokenizer.as_target_tokenizer():
targets = self.tokenizer(tgt, return_tensors="pt")
model_inputs["labels"] = targets["input_ids"].to(torch_device)
self.assertListEqual(expected_ids, model_inputs.input_ids[0].tolist())
desired_keys = {
"input_ids",
"attention_mask",
"labels",
}
self.assertSetEqual(desired_keys, set(model_inputs.keys()))
model_inputs["decoder_input_ids"] = shift_tokens_right(
model_inputs.labels, self.tokenizer.pad_token_id, self.model.config.decoder_start_token_id
)
model_inputs["return_dict"] = True
model_inputs["use_cache"] = False
with torch.no_grad():
outputs = self.model(**model_inputs)
max_indices = outputs.logits.argmax(-1)
self.tokenizer.batch_decode(max_indices)
def test_unk_support(self):
t = self.tokenizer
ids = t(["||"], return_tensors="pt").to(torch_device).input_ids[0].tolist()
expected = [t.unk_token_id, t.unk_token_id, t.eos_token_id]
self.assertEqual(expected, ids)
def test_pad_not_split(self):
input_ids_w_pad = self.tokenizer(["I am a small frog <pad>"], return_tensors="pt").input_ids[0].tolist()
expected_w_pad = [38, 121, 14, 697, 38848, self.tokenizer.pad_token_id, 0] # pad
self.assertListEqual(expected_w_pad, input_ids_w_pad)
@slow
def test_batch_generation_en_de(self):
self._assert_generated_batch_equal_expected()
def test_auto_config(self):
config = AutoConfig.from_pretrained(self.model_name)
self.assertIsInstance(config, MarianConfig)
@require_sentencepiece
@require_tokenizers
class TestMarian_EN_FR(MarianIntegrationTest):
src = "en"
tgt = "fr"
src_text = [
"I am a small frog.",
"Now I can forget the 100 words of german that I know.",
]
expected_text = [
"Je suis une petite grenouille.",
"Maintenant, je peux oublier les 100 mots d'allemand que je connais.",
]
@slow
def test_batch_generation_en_fr(self):
self._assert_generated_batch_equal_expected()
@require_sentencepiece
@require_tokenizers
class TestMarian_FR_EN(MarianIntegrationTest):
src = "fr"
tgt = "en"
src_text = [
"Donnez moi le micro.",
"Tom et Mary étaient assis à une table.", # Accents
]
expected_text = [
"Give me the microphone.",
"Tom and Mary were sitting at a table.",
]
@slow
def test_batch_generation_fr_en(self):
self._assert_generated_batch_equal_expected()
@require_sentencepiece
@require_tokenizers
class TestMarian_RU_FR(MarianIntegrationTest):
src = "ru"
tgt = "fr"
src_text = ["Он показал мне рукопись своей новой пьесы."]
expected_text = ["Il m'a montré le manuscrit de sa nouvelle pièce."]
@slow
def test_batch_generation_ru_fr(self):
self._assert_generated_batch_equal_expected()
@require_sentencepiece
@require_tokenizers
class TestMarian_MT_EN(MarianIntegrationTest):
"""Cover low resource/high perplexity setting. This breaks without adjust_logits_generation overwritten"""
src = "mt"
tgt = "en"
src_text = ["Billi messu b'mod ġentili, Ġesù fejjaq raġel li kien milqut bil - marda kerha tal - ġdiem."]
expected_text = ["Touching gently, Jesus healed a man who was affected by the sad disease of leprosy."]
@slow
def test_batch_generation_mt_en(self):
self._assert_generated_batch_equal_expected()
@require_sentencepiece
@require_tokenizers
class TestMarian_en_zh(MarianIntegrationTest):
src = "en"
tgt = "zh"
src_text = ["My name is Wolfgang and I live in Berlin"]
expected_text = ["我叫沃尔夫冈 我住在柏林"]
@slow
def test_batch_generation_eng_zho(self):
self._assert_generated_batch_equal_expected()
@require_sentencepiece
@require_tokenizers
class TestMarian_en_ROMANCE(MarianIntegrationTest):
"""Multilingual on target side."""
src = "en"
tgt = "ROMANCE"
src_text = [
">>fr<< Don't spend so much time watching TV.",
">>pt<< Your message has been sent.",
">>es<< He's two years older than me.",
]
expected_text = [
"Ne passez pas autant de temps à regarder la télé.",
"A sua mensagem foi enviada.",
"Es dos años más viejo que yo.",
]
@slow
def test_batch_generation_en_ROMANCE_multi(self):
self._assert_generated_batch_equal_expected()
@slow
def test_pipeline(self):
device = 0 if torch_device == "cuda" else -1
pipeline = TranslationPipeline(self.model, self.tokenizer, framework="pt", device=device)
output = pipeline(self.src_text)
self.assertEqual(self.expected_text, [x["translation_text"] for x in output])
@require_torch
class TestConversionUtils(unittest.TestCase):
def test_renaming_multilingual(self):
old_names = [
"opus-mt-cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh-fi",
"opus-mt-cmn+cn-fi", # no group
"opus-mt-en-de", # standard name
"opus-mt-en-de", # standard name
]
expected = ["opus-mt-ZH-fi", "opus-mt-cmn_cn-fi", "opus-mt-en-de", "opus-mt-en-de"]
self.assertListEqual(expected, [convert_opus_name_to_hf_name(x) for x in old_names])
def test_undoing_renaming(self):
hf_names = ["opus-mt-ZH-fi", "opus-mt-cmn_cn-fi", "opus-mt-en-de", "opus-mt-en-de"]
converted_opus_names = [convert_hf_name_to_opus_name(x) for x in hf_names]
expected_opus_names = [
"cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh-fi",
"cmn+cn-fi",
"en-de", # standard name
"en-de",
]
self.assertListEqual(expected_opus_names, converted_opus_names)
class MarianStandaloneDecoderModelTester:
def __init__(
self,
parent,
vocab_size=99,
batch_size=13,
d_model=16,
decoder_seq_length=7,
is_training=True,
is_decoder=True,
use_attention_mask=True,
use_cache=False,
use_labels=True,
decoder_start_token_id=2,
decoder_ffn_dim=32,
decoder_layers=4,
encoder_attention_heads=4,
decoder_attention_heads=4,
max_position_embeddings=30,
is_encoder_decoder=False,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.decoder_seq_length = decoder_seq_length
# For common tests
self.seq_length = self.decoder_seq_length
self.is_training = is_training
self.use_attention_mask = use_attention_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.d_model = d_model
self.hidden_size = d_model
self.num_hidden_layers = decoder_layers
self.decoder_layers = decoder_layers
self.decoder_ffn_dim = decoder_ffn_dim
self.encoder_attention_heads = encoder_attention_heads
self.decoder_attention_heads = decoder_attention_heads
self.num_attention_heads = decoder_attention_heads
self.eos_token_id = eos_token_id
self.bos_token_id = bos_token_id
self.pad_token_id = pad_token_id
self.decoder_start_token_id = decoder_start_token_id
self.use_cache = use_cache
self.max_position_embeddings = max_position_embeddings
self.is_encoder_decoder = is_encoder_decoder
self.scope = None
self.decoder_key_length = decoder_seq_length
self.base_model_out_len = 2
self.decoder_attention_idx = 1
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
attention_mask = None
if self.use_attention_mask:
attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2)
lm_labels = None
if self.use_labels:
lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
config = MarianConfig(
vocab_size=self.vocab_size,
d_model=self.d_model,
decoder_layers=self.decoder_layers,
decoder_ffn_dim=self.decoder_ffn_dim,
encoder_attention_heads=self.encoder_attention_heads,
decoder_attention_heads=self.decoder_attention_heads,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
use_cache=self.use_cache,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.decoder_start_token_id,
max_position_embeddings=self.max_position_embeddings,
is_encoder_decoder=self.is_encoder_decoder,
)
return (
config,
input_ids,
attention_mask,
lm_labels,
)
def create_and_check_decoder_model_past(
self,
config,
input_ids,
attention_mask,
lm_labels,
):
config.use_cache = True
model = MarianDecoder(config=config).to(torch_device).eval()
# first forward pass
outputs = model(input_ids, use_cache=True)
outputs_use_cache_conf = model(input_ids)
outputs_no_past = model(input_ids, use_cache=False)
self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))
self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)
past_key_values = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
output_from_no_past = model(next_input_ids)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)
def create_and_check_decoder_model_attention_mask_past(
self,
config,
input_ids,
attention_mask,
lm_labels,
):
model = MarianDecoder(config=config).to(torch_device).eval()
# create attention mask
attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
half_seq_length = input_ids.shape[-1] // 2
attn_mask[:, half_seq_length:] = 0
# first forward pass
past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# change a random masked slice from input_ids
random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1
random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1)
input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens
# append to next input_ids and attn_mask
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
attn_mask = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)],
dim=1,
)
# get two different outputs
output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"]
output_from_past = model(next_tokens, attention_mask=attn_mask, past_key_values=past_key_values)[
"last_hidden_state"
]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
attention_mask,
lm_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class MarianStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (MarianDecoder, MarianForCausalLM) if is_torch_available() else ()
all_generative_model_classes = (MarianForCausalLM,) if is_torch_available() else ()
test_pruning = False
is_encoder_decoder = False
def setUp(
self,
):
self.model_tester = MarianStandaloneDecoderModelTester(self, is_training=False)
self.config_tester = ConfigTester(self, config_class=MarianConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_decoder_model_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*config_and_inputs)
def test_decoder_model_attn_mask_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs)
def test_retain_grad_hidden_states_attentions(self):
# decoder cannot keep gradients
return
| 29,030 | 36.604922 | 123 | py |
robust-transformers | robust-transformers-main/tests/marian/test_modeling_flax_marian.py | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import MarianConfig, is_flax_available
from transformers.file_utils import cached_property
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
from ..generation.test_generation_flax_utils import FlaxGenerationTesterMixin
from ..test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
os.environ["XLA_PYTHON_CLIENT_ALLOCATOR"] = "platform"
import jax
import jax.numpy as jnp
from transformers import MarianTokenizer
from transformers.models.marian.modeling_flax_marian import FlaxMarianModel, FlaxMarianMTModel, shift_tokens_right
def prepare_marian_inputs_dict(
config,
input_ids,
decoder_input_ids=None,
attention_mask=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
):
if attention_mask is None:
attention_mask = np.where(input_ids != config.pad_token_id, 1, 0)
if decoder_attention_mask is None:
decoder_attention_mask = np.where(decoder_input_ids != config.pad_token_id, 1, 0)
if head_mask is None:
head_mask = np.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
decoder_head_mask = np.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
cross_attn_head_mask = np.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class FlaxMarianModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_labels=False,
vocab_size=99,
hidden_size=16,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=4,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=32,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
initializer_range=0.02,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.initializer_range = initializer_range
def prepare_config_and_inputs(self):
input_ids = np.clip(ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size), 3, self.vocab_size)
input_ids = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1), dtype=np.int64)), -1)
decoder_input_ids = shift_tokens_right(input_ids, 1, 2)
config = MarianConfig(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
initializer_range=self.initializer_range,
use_cache=False,
)
inputs_dict = prepare_marian_inputs_dict(config, input_ids, decoder_input_ids)
return config, inputs_dict
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
def check_use_cache_forward(self, model_class_name, config, inputs_dict):
max_decoder_length = 20
model = model_class_name(config)
encoder_outputs = model.encode(inputs_dict["input_ids"])
decoder_input_ids, decoder_attention_mask = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
past_key_values = model.init_cache(decoder_input_ids.shape[0], max_decoder_length, encoder_outputs)
decoder_attention_mask = jnp.ones((decoder_input_ids.shape[0], max_decoder_length), dtype="i4")
decoder_position_ids = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :],
(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1),
)
outputs_cache = model.decode(
decoder_input_ids[:, :-1],
encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
past_key_values=past_key_values,
decoder_position_ids=decoder_position_ids,
)
decoder_position_ids = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype="i4")
outputs_cache_next = model.decode(
decoder_input_ids[:, -1:],
encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
past_key_values=outputs_cache.past_key_values,
decoder_position_ids=decoder_position_ids,
)
outputs = model.decode(decoder_input_ids, encoder_outputs)
diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}")
def check_use_cache_forward_with_attn_mask(self, model_class_name, config, inputs_dict):
max_decoder_length = 20
model = model_class_name(config)
encoder_outputs = model.encode(inputs_dict["input_ids"])
decoder_input_ids, decoder_attention_mask = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
decoder_attention_mask_cache = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
],
axis=-1,
)
past_key_values = model.init_cache(decoder_input_ids.shape[0], max_decoder_length, encoder_outputs)
decoder_position_ids = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :],
(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1),
)
outputs_cache = model.decode(
decoder_input_ids[:, :-1],
encoder_outputs,
decoder_attention_mask=decoder_attention_mask_cache,
past_key_values=past_key_values,
decoder_position_ids=decoder_position_ids,
)
decoder_position_ids = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype="i4")
outputs_cache_next = model.decode(
decoder_input_ids[:, -1:],
encoder_outputs,
past_key_values=outputs_cache.past_key_values,
decoder_attention_mask=decoder_attention_mask_cache,
decoder_position_ids=decoder_position_ids,
)
outputs = model.decode(decoder_input_ids, encoder_outputs, decoder_attention_mask=decoder_attention_mask)
diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}")
@require_flax
class FlaxMarianModelTest(FlaxModelTesterMixin, unittest.TestCase, FlaxGenerationTesterMixin):
is_encoder_decoder = True
all_model_classes = (FlaxMarianModel, FlaxMarianMTModel) if is_flax_available() else ()
all_generative_model_classes = (FlaxMarianMTModel,) if is_flax_available() else ()
def setUp(self):
self.model_tester = FlaxMarianModelTester(self)
def test_use_cache_forward(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(model_class, config, inputs_dict)
def test_use_cache_forward_with_attn_mask(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(model_class, config, inputs_dict)
def test_encode(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
@jax.jit
def encode_jitted(input_ids, attention_mask=None, **kwargs):
return model.encode(input_ids=input_ids, attention_mask=attention_mask)
with self.subTest("JIT Enabled"):
jitted_outputs = encode_jitted(**prepared_inputs_dict).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
outputs = encode_jitted(**prepared_inputs_dict).to_tuple()
self.assertEqual(len(outputs), len(jitted_outputs))
for jitted_output, output in zip(jitted_outputs, outputs):
self.assertEqual(jitted_output.shape, output.shape)
def test_decode(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
model = model_class(config)
encoder_outputs = model.encode(inputs_dict["input_ids"], inputs_dict["attention_mask"])
prepared_inputs_dict = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(decoder_input_ids, decoder_attention_mask, encoder_outputs):
return model.decode(
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
encoder_outputs=encoder_outputs,
)
with self.subTest("JIT Enabled"):
jitted_outputs = decode_jitted(**prepared_inputs_dict).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
outputs = decode_jitted(**prepared_inputs_dict).to_tuple()
self.assertEqual(len(outputs), len(jitted_outputs))
for jitted_output, output in zip(jitted_outputs, outputs):
self.assertEqual(jitted_output.shape, output.shape)
@slow
def test_model_from_pretrained(self):
for model_class_name in self.all_model_classes:
model = model_class_name.from_pretrained("Helsinki-NLP/opus-mt-en-de")
# FlaxMarianForSequenceClassification expects eos token in input_ids
input_ids = np.ones((1, 1)) * model.config.eos_token_id
outputs = model(input_ids)
self.assertIsNotNone(outputs)
@require_flax
@require_sentencepiece
@require_tokenizers
class MarianIntegrationTest(unittest.TestCase):
src = None
tgt = None
@classmethod
def setUpClass(cls) -> None:
cls.model_name = f"Helsinki-NLP/opus-mt-{cls.src}-{cls.tgt}"
return cls
@cached_property
def tokenizer(self):
return MarianTokenizer.from_pretrained(self.model_name)
@property
def eos_token_id(self) -> int:
return self.tokenizer.eos_token_id
@cached_property
def model(self):
model: FlaxMarianMTModel = FlaxMarianMTModel.from_pretrained(self.model_name)
self.assertEqual(model.config.decoder_start_token_id, model.config.pad_token_id)
return model
def _assert_generated_batch_equal_expected(self, **tokenizer_kwargs):
generated_words = self.translate_src_text(**tokenizer_kwargs)
self.assertListEqual(self.expected_text, generated_words)
def translate_src_text(self, **tokenizer_kwargs):
model_inputs = self.tokenizer(self.src_text, padding=True, return_tensors="np", **tokenizer_kwargs)
generated_ids = self.model.generate(
model_inputs.input_ids,
attention_mask=model_inputs.attention_mask,
num_beams=2,
max_length=128,
).sequences
generated_words = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
return generated_words
@require_flax
@require_sentencepiece
@require_tokenizers
class TestMarian_EN_FR(MarianIntegrationTest):
src = "en"
tgt = "fr"
src_text = [
"I am a small frog.",
"Now I can forget the 100 words of german that I know.",
]
expected_text = [
"Je suis une petite grenouille.",
"Maintenant, je peux oublier les 100 mots d'allemand que je connais.",
]
@slow
def test_batch_generation_en_fr(self):
self._assert_generated_batch_equal_expected()
@require_flax
@require_sentencepiece
@require_tokenizers
class TestMarian_FR_EN(MarianIntegrationTest):
src = "fr"
tgt = "en"
src_text = [
"Donnez moi le micro.",
"Tom et Mary étaient assis à une table.", # Accents
]
expected_text = [
"Give me the microphone.",
"Tom and Mary were sitting at a table.",
]
@slow
def test_batch_generation_fr_en(self):
self._assert_generated_batch_equal_expected()
@require_flax
@require_sentencepiece
@require_tokenizers
class TestMarian_MT_EN(MarianIntegrationTest):
"""Cover low resource/high perplexity setting. This breaks without adjust_logits_generation overwritten"""
src = "mt"
tgt = "en"
src_text = ["Billi messu b'mod ġentili, Ġesù fejjaq raġel li kien milqut bil - marda kerha tal - ġdiem."]
expected_text = ["Touching gently, Jesus healed a man who was affected by the sad disease of leprosy."]
@slow
def test_batch_generation_mt_en(self):
self._assert_generated_batch_equal_expected()
@require_flax
@require_sentencepiece
@require_tokenizers
class TestMarian_EN_DE(MarianIntegrationTest):
src = "en"
tgt = "de"
src_text = [
"I am a small frog.",
"Now I can forget the 100 words of german that I know.",
"Tom asked his teacher for advice.",
"That's how I would do it.",
"Tom really admired Mary's courage.",
"Turn around and close your eyes.",
]
expected_text = [
"Ich bin ein kleiner Frosch.",
"Jetzt kann ich die 100 Wörter des Deutschen vergessen, die ich kenne.",
"Tom bat seinen Lehrer um Rat.",
"So würde ich das machen.",
"Tom bewunderte Marias Mut wirklich.",
"Drehen Sie sich um und schließen Sie die Augen.",
]
@slow
def test_batch_generation_en_de(self):
self._assert_generated_batch_equal_expected()
@require_flax
@require_sentencepiece
@require_tokenizers
class TestMarian_en_zh(MarianIntegrationTest):
src = "en"
tgt = "zh"
src_text = ["My name is Wolfgang and I live in Berlin"]
expected_text = ["我叫沃尔夫冈 我住在柏林"]
@slow
def test_batch_generation_eng_zho(self):
self._assert_generated_batch_equal_expected()
@require_flax
@require_sentencepiece
@require_tokenizers
class TestMarian_RU_FR(MarianIntegrationTest):
src = "ru"
tgt = "fr"
src_text = ["Он показал мне рукопись своей новой пьесы."]
expected_text = ["Il m'a montré le manuscrit de sa nouvelle pièce."]
@slow
def test_batch_generation_ru_fr(self):
self._assert_generated_batch_equal_expected()
@require_flax
@require_sentencepiece
@require_tokenizers
class TestMarian_en_ROMANCE(MarianIntegrationTest):
"""Multilingual on target side."""
src = "en"
tgt = "ROMANCE"
src_text = [
">>fr<< Don't spend so much time watching TV.",
">>pt<< Your message has been sent.",
">>es<< He's two years older than me.",
]
expected_text = [
"Ne passez pas autant de temps à regarder la télé.",
"A sua mensagem foi enviada.",
"Es dos años más viejo que yo.",
]
@slow
def test_batch_generation_en_ROMANCE_multi(self):
self._assert_generated_batch_equal_expected()
| 18,340 | 36.430612 | 118 | py |
robust-transformers | robust-transformers-main/tests/marian/test_tokenization_marian.py | # coding=utf-8
# Copyright 2020 Huggingface
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
from os.path import dirname
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.file_utils import is_sentencepiece_available, is_tf_available, is_torch_available
from transformers.testing_utils import require_sentencepiece, slow
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ..test_tokenization_common import TokenizerTesterMixin
SAMPLE_SP = os.path.join(dirname(dirname(os.path.abspath(__file__))), "fixtures/test_sentencepiece.model")
mock_tokenizer_config = {"target_lang": "fi", "source_lang": "en"}
zh_code = ">>zh<<"
ORG_NAME = "Helsinki-NLP/"
if is_torch_available():
FRAMEWORK = "pt"
elif is_tf_available():
FRAMEWORK = "tf"
else:
FRAMEWORK = "jax"
@require_sentencepiece
class MarianTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = MarianTokenizer
test_rust_tokenizer = False
test_sentencepiece = True
def setUp(self):
super().setUp()
vocab = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
save_dir = Path(self.tmpdirname)
save_json(vocab_tokens, save_dir / VOCAB_FILES_NAMES["vocab"])
save_json(mock_tokenizer_config, save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"])
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(SAMPLE_SP, save_dir / VOCAB_FILES_NAMES["source_spm"])
copyfile(SAMPLE_SP, save_dir / VOCAB_FILES_NAMES["target_spm"])
tokenizer = MarianTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def get_tokenizer(self, **kwargs) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self, tokenizer):
return (
"This is a test",
"This is a test",
)
def test_convert_token_and_id(self):
"""Test ``_convert_token_to_id`` and ``_convert_id_to_token``."""
token = "</s>"
token_id = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token)
def test_get_vocab(self):
vocab_keys = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], "</s>")
self.assertEqual(vocab_keys[1], "<unk>")
self.assertEqual(vocab_keys[-1], "<pad>")
self.assertEqual(len(vocab_keys), 9)
def test_vocab_size(self):
self.assertEqual(self.get_tokenizer().vocab_size, 9)
def test_tokenizer_equivalence_en_de(self):
en_de_tokenizer = MarianTokenizer.from_pretrained(f"{ORG_NAME}opus-mt-en-de")
batch = en_de_tokenizer(["I am a small frog"], return_tensors=None)
self.assertIsInstance(batch, BatchEncoding)
expected = [38, 121, 14, 697, 38848, 0]
self.assertListEqual(expected, batch.input_ids[0])
save_dir = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(save_dir)
contents = [x.name for x in Path(save_dir).glob("*")]
self.assertIn("source.spm", contents)
MarianTokenizer.from_pretrained(save_dir)
def test_outputs_not_longer_than_maxlen(self):
tok = self.get_tokenizer()
batch = tok(
["I am a small frog" * 1000, "I am a small frog"], padding=True, truncation=True, return_tensors=FRAMEWORK
)
self.assertIsInstance(batch, BatchEncoding)
self.assertEqual(batch.input_ids.shape, (2, 512))
def test_outputs_can_be_shorter(self):
tok = self.get_tokenizer()
batch_smaller = tok(["I am a tiny frog", "I am a small frog"], padding=True, return_tensors=FRAMEWORK)
self.assertIsInstance(batch_smaller, BatchEncoding)
self.assertEqual(batch_smaller.input_ids.shape, (2, 10))
@slow
def test_tokenizer_integration(self):
# fmt: off
expected_encoding = {'input_ids': [[43495, 462, 20, 42164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 38999, 6, 8, 464, 132, 1703, 492, 13, 4669, 37867, 13, 7525, 27, 1593, 988, 13, 33972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 12338, 2, 13958, 387, 2, 3629, 6953, 188, 2900, 2, 13958, 8011, 11501, 23, 8460, 4073, 34009, 20, 435, 11439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 37867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 26453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10767, 6, 316, 304, 4239, 3, 0], [148, 15722, 19, 1839, 12, 1350, 13, 22327, 5082, 5418, 47567, 35938, 59, 318, 19552, 108, 2183, 54, 14976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 19088, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100], [36, 6395, 12570, 39147, 11597, 6, 266, 4, 45405, 7296, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=expected_encoding,
model_name="Helsinki-NLP/opus-mt-en-de",
revision="1a8c2263da11e68e50938f97e10cd57820bd504c",
decode_kwargs={"use_source_tokenizer": True},
)
| 7,926 | 56.861314 | 2,878 | py |
robust-transformers | robust-transformers-main/tests/marian/test_modeling_tf_marian.py | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import unittest
import warnings
from transformers import AutoTokenizer, MarianConfig, MarianTokenizer, TranslationPipeline, is_tf_available
from transformers.file_utils import cached_property
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeq2SeqLM, TFMarianModel, TFMarianMTModel
@require_tf
class TFMarianModelTester:
config_cls = MarianConfig
config_updates = {}
hidden_act = "gelu"
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_labels=False,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=20,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
def prepare_config_and_inputs_for_common(self):
input_ids = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size)
eos_tensor = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1)
input_ids = tf.concat([input_ids, eos_tensor], axis=1)
decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
config = self.config_cls(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
eos_token_ids=[2],
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.pad_token_id,
**self.config_updates,
)
inputs_dict = prepare_marian_inputs_dict(config, input_ids, decoder_input_ids)
return config, inputs_dict
def check_decoder_model_past_large_inputs(self, config, inputs_dict):
model = TFMarianModel(config=config).get_decoder()
input_ids = inputs_dict["input_ids"]
input_ids = input_ids[:1, :]
attention_mask = inputs_dict["attention_mask"][:1, :]
head_mask = inputs_dict["head_mask"]
self.batch_size = 1
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = tf.cast(ids_tensor((self.batch_size, 3), 2), tf.int8)
# append to next input_ids and
next_input_ids = tf.concat([input_ids, next_tokens], axis=-1)
next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)[0]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1])
# select random slice
random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1]))
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx]
output_from_past_slice = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3)
def prepare_marian_inputs_dict(
config,
input_ids,
decoder_input_ids,
attention_mask=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
):
if attention_mask is None:
attention_mask = tf.cast(tf.math.not_equal(input_ids, config.pad_token_id), tf.int8)
if decoder_attention_mask is None:
decoder_attention_mask = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.int8),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id), tf.int8),
],
axis=-1,
)
if head_mask is None:
head_mask = tf.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
decoder_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
cross_attn_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class TFMarianModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = (TFMarianMTModel, TFMarianModel) if is_tf_available() else ()
all_generative_model_classes = (TFMarianMTModel,) if is_tf_available() else ()
is_encoder_decoder = True
test_pruning = False
test_onnx = False
def setUp(self):
self.model_tester = TFMarianModelTester(self)
self.config_tester = ConfigTester(self, config_class=MarianConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_decoder_model_past_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*config_and_inputs)
def test_compile_tf_model(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
optimizer = tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08, clipnorm=1.0)
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
metric = tf.keras.metrics.SparseCategoricalAccuracy("accuracy")
model_class = self.all_generative_model_classes[0]
input_ids = {
"decoder_input_ids": tf.keras.Input(batch_shape=(2, 2000), name="decoder_input_ids", dtype="int32"),
"input_ids": tf.keras.Input(batch_shape=(2, 2000), name="input_ids", dtype="int32"),
}
# Prepare our model
model = model_class(config)
model(self._prepare_for_class(inputs_dict, model_class)) # Model must be called before saving.
# Let's load it from the disk to be sure we can use pre-trained weights
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model = model_class.from_pretrained(tmpdirname)
outputs_dict = model(input_ids)
hidden_states = outputs_dict[0]
# Add a dense layer on top to test integration with other keras modules
outputs = tf.keras.layers.Dense(2, activation="softmax", name="outputs")(hidden_states)
# Compile extended model
extended_model = tf.keras.Model(inputs=[input_ids], outputs=[outputs])
extended_model.compile(optimizer=optimizer, loss=loss, metrics=[metric])
def test_model_common_attributes(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
if model_class in self.all_generative_model_classes:
x = model.get_output_embeddings()
assert isinstance(x, tf.keras.layers.Layer)
name = model.get_bias()
assert isinstance(name, dict)
for k, v in name.items():
assert isinstance(v, tf.Variable)
else:
x = model.get_output_embeddings()
assert x is None
name = model.get_bias()
assert name is None
def test_saved_model_creation(self):
# This test is too long (>30sec) and makes fail the CI
pass
def test_resize_token_embeddings(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(model, embedding_layer):
if hasattr(embedding_layer, "weight"):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model(model.dummy_inputs)
if hasattr(embedding_layer, "weight"):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10, None]:
# build the embeddings
model = model_class(config=config)
old_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings())
old_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings())
old_final_logits_bias = model.get_bias()
# reshape the embeddings
model.resize_token_embeddings(size)
new_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings())
new_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings())
new_final_logits_bias = model.get_bias()
# check that the resized embeddings size matches the desired size.
assert_size = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0], assert_size)
# check that weights remain the same after resizing
models_equal = True
for p1, p2 in zip(old_input_embeddings.value(), new_input_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
models_equal = False
self.assertTrue(models_equal)
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0], assert_size)
models_equal = True
for p1, p2 in zip(old_output_embeddings.value(), new_output_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
models_equal = False
self.assertTrue(models_equal)
if old_final_logits_bias is not None and new_final_logits_bias is not None:
old_final_logits_bias = old_final_logits_bias["final_logits_bias"]
new_final_logits_bias = new_final_logits_bias["final_logits_bias"]
self.assertEqual(new_final_logits_bias.shape[0], 1)
self.assertEqual(new_final_logits_bias.shape[1], assert_size)
models_equal = True
for old, new in zip(old_final_logits_bias.value(), new_final_logits_bias.value()):
for p1, p2 in zip(old, new):
if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
models_equal = False
self.assertTrue(models_equal)
def _assert_tensors_equal(a, b, atol=1e-12, prefix=""):
"""If tensors not close, or a and b arent both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if tf.debugging.assert_near(a, b, atol=atol):
return True
raise
except Exception:
if len(prefix) > 0:
prefix = f"{prefix}: "
raise AssertionError(f"{prefix}{a} != {b}")
def _long_tensor(tok_lst):
return tf.constant(tok_lst, dtype=tf.int32)
@require_tf
class AbstractMarianIntegrationTest(unittest.TestCase):
maxDiff = 1000 # show more chars for failing integration tests
@classmethod
def setUpClass(cls) -> None:
cls.model_name = f"Helsinki-NLP/opus-mt-{cls.src}-{cls.tgt}"
return cls
@cached_property
def tokenizer(self) -> MarianTokenizer:
return AutoTokenizer.from_pretrained(self.model_name)
@property
def eos_token_id(self) -> int:
return self.tokenizer.eos_token_id
@cached_property
def model(self):
warnings.simplefilter("error")
model: TFMarianMTModel = TFAutoModelForSeq2SeqLM.from_pretrained(self.model_name)
assert isinstance(model, TFMarianMTModel)
c = model.config
self.assertListEqual(c.bad_words_ids, [[c.pad_token_id]])
self.assertEqual(c.max_length, 512)
self.assertEqual(c.decoder_start_token_id, c.pad_token_id)
return model
def _assert_generated_batch_equal_expected(self, **tokenizer_kwargs):
generated_words = self.translate_src_text(**tokenizer_kwargs)
self.assertListEqual(self.expected_text, generated_words)
def translate_src_text(self, **tokenizer_kwargs):
model_inputs = self.tokenizer(self.src_text, **tokenizer_kwargs, padding=True, return_tensors="tf")
generated_ids = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, max_length=128
)
generated_words = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=True)
return generated_words
@require_sentencepiece
@require_tokenizers
@require_tf
class TestMarian_MT_EN(AbstractMarianIntegrationTest):
"""Cover low resource/high perplexity setting. This breaks if pad_token_id logits not set to LARGE_NEGATIVE."""
src = "mt"
tgt = "en"
src_text = ["Billi messu b'mod ġentili, Ġesù fejjaq raġel li kien milqut bil - marda kerha tal - ġdiem."]
expected_text = ["Touching gently, Jesus healed a man who was affected by the sad disease of leprosy."]
@unittest.skip("Skipping until #12647 is resolved.")
@slow
def test_batch_generation_mt_en(self):
self._assert_generated_batch_equal_expected()
@require_sentencepiece
@require_tokenizers
@require_tf
class TestMarian_en_zh(AbstractMarianIntegrationTest):
src = "en"
tgt = "zh"
src_text = ["My name is Wolfgang and I live in Berlin"]
expected_text = ["我叫沃尔夫冈 我住在柏林"]
@unittest.skip("Skipping until #12647 is resolved.")
@slow
def test_batch_generation_en_zh(self):
self._assert_generated_batch_equal_expected()
@require_sentencepiece
@require_tokenizers
@require_tf
class TestMarian_en_ROMANCE(AbstractMarianIntegrationTest):
"""Multilingual on target side."""
src = "en"
tgt = "ROMANCE"
src_text = [
">>fr<< Don't spend so much time watching TV.",
">>pt<< Your message has been sent.",
">>es<< He's two years older than me.",
]
expected_text = [
"Ne passez pas autant de temps à regarder la télé.",
"A sua mensagem foi enviada.",
"Es dos años más viejo que yo.",
]
@unittest.skip("Skipping until #12647 is resolved.")
@slow
def test_batch_generation_en_ROMANCE_multi(self):
self._assert_generated_batch_equal_expected()
@unittest.skip("Skipping until #12647 is resolved.")
@slow
def test_pipeline(self):
pipeline = TranslationPipeline(self.model, self.tokenizer, framework="tf")
output = pipeline(self.src_text)
self.assertEqual(self.expected_text, [x["translation_text"] for x in output])
| 17,741 | 39.506849 | 117 | py |
robust-transformers | robust-transformers-main/tests/flaubert/test_modeling_flaubert.py | # coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ..test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class FlaubertModelTester(object):
def __init__(
self,
parent,
):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.is_training = True
self.use_input_lengths = True
self.use_token_type_ids = True
self.use_labels = True
self.gelu_activation = True
self.sinusoidal_embeddings = False
self.causal = False
self.asm = False
self.n_langs = 2
self.vocab_size = 99
self.n_special = 0
self.hidden_size = 32
self.num_hidden_layers = 5
self.num_attention_heads = 4
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 512
self.type_vocab_size = 12
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.summary_type = "last"
self.use_proj = None
self.scope = None
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = random_attention_mask([self.batch_size, self.seq_length])
input_lengths = None
if self.use_input_lengths:
input_lengths = (
ids_tensor([self.batch_size], vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.n_langs)
sequence_labels = None
token_labels = None
is_impossible_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
is_impossible_labels = ids_tensor([self.batch_size], 2).float()
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def get_config(self):
return FlaubertConfig(
vocab_size=self.vocab_size,
n_special=self.n_special,
emb_dim=self.hidden_size,
n_layers=self.num_hidden_layers,
n_heads=self.num_attention_heads,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
gelu_activation=self.gelu_activation,
sinusoidal_embeddings=self.sinusoidal_embeddings,
asm=self.asm,
causal=self.causal,
n_langs=self.n_langs,
max_position_embeddings=self.max_position_embeddings,
initializer_range=self.initializer_range,
summary_type=self.summary_type,
use_proj=self.use_proj,
)
def create_and_check_flaubert_model(
self,
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
):
model = FlaubertModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, lengths=input_lengths, langs=token_type_ids)
result = model(input_ids, langs=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_flaubert_lm_head(
self,
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
):
model = FlaubertWithLMHeadModel(config)
model.to(torch_device)
model.eval()
result = model(input_ids, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_flaubert_simple_qa(
self,
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
):
model = FlaubertForQuestionAnsweringSimple(config)
model.to(torch_device)
model.eval()
result = model(input_ids)
result = model(input_ids, start_positions=sequence_labels, end_positions=sequence_labels)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def create_and_check_flaubert_qa(
self,
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
):
model = FlaubertForQuestionAnswering(config)
model.to(torch_device)
model.eval()
result = model(input_ids)
result_with_labels = model(
input_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
cls_index=sequence_labels,
is_impossible=is_impossible_labels,
p_mask=input_mask,
)
result_with_labels = model(
input_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
cls_index=sequence_labels,
is_impossible=is_impossible_labels,
)
(total_loss,) = result_with_labels.to_tuple()
result_with_labels = model(input_ids, start_positions=sequence_labels, end_positions=sequence_labels)
(total_loss,) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, ())
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top)
)
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top)
)
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,))
def create_and_check_flaubert_sequence_classif(
self,
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
):
model = FlaubertForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids)
result = model(input_ids, labels=sequence_labels)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def create_and_check_flaubert_token_classif(
self,
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
):
config.num_labels = self.num_labels
model = FlaubertForTokenClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_flaubert_multiple_choice(
self,
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
):
config.num_choices = self.num_choices
model = FlaubertForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
result = model(
multiple_choice_inputs_ids,
attention_mask=multiple_choice_input_mask,
token_type_ids=multiple_choice_token_type_ids,
labels=choice_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class FlaubertModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
# Flaubert has 2 QA models -> need to manually set the correct labels for one of them here
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
inputs_dict["start_positions"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
inputs_dict["end_positions"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
return inputs_dict
def setUp(self):
self.model_tester = FlaubertModelTester(self)
self.config_tester = ConfigTester(self, config_class=FlaubertConfig, emb_dim=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_flaubert_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*config_and_inputs)
def test_flaubert_lm_head(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*config_and_inputs)
def test_flaubert_simple_qa(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*config_and_inputs)
def test_flaubert_qa(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*config_and_inputs)
def test_flaubert_sequence_classif(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*config_and_inputs)
def test_flaubert_token_classif(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*config_and_inputs)
def test_flaubert_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = FlaubertModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@slow
@require_torch_gpu
def test_torchscript_device_change(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
config.torchscript = True
model = model_class(config=config)
inputs_dict = self._prepare_for_class(inputs_dict, model_class)
traced_model = torch.jit.trace(
model, (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu"))
)
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(traced_model, os.path.join(tmp, "traced_model.pt"))
loaded = torch.jit.load(os.path.join(tmp, "traced_model.pt"), map_location=torch_device)
loaded(inputs_dict["input_ids"].to(torch_device), inputs_dict["attention_mask"].to(torch_device))
@require_torch
class FlaubertModelIntegrationTest(unittest.TestCase):
@slow
def test_inference_no_head_absolute_embedding(self):
model = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased")
input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
output = model(input_ids)[0]
expected_shape = torch.Size((1, 11, 768))
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]]
)
self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
| 16,077 | 34.888393 | 117 | py |
robust-transformers | robust-transformers-main/tests/mbart/test_modeling_flax_mbart.py | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import MBartConfig, is_flax_available
from transformers.file_utils import cached_property
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
from ..generation.test_generation_flax_utils import FlaxGenerationTesterMixin
from ..test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
os.environ["XLA_PYTHON_CLIENT_ALLOCATOR"] = "platform"
import jax
import jax.numpy as jnp
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
shift_tokens_right,
)
def prepare_mbart_inputs_dict(
config,
input_ids,
decoder_input_ids=None,
attention_mask=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
):
if attention_mask is None:
attention_mask = np.where(input_ids != config.pad_token_id, 1, 0)
if decoder_attention_mask is None:
decoder_attention_mask = np.where(decoder_input_ids != config.pad_token_id, 1, 0)
if head_mask is None:
head_mask = np.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
decoder_head_mask = np.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
cross_attn_head_mask = np.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
class FlaxMBartModelTester(unittest.TestCase):
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_labels=False,
vocab_size=99,
hidden_size=16,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=4,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=32,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
decoder_start_token_id=2,
initializer_range=0.02,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.decoder_start_token_id = decoder_start_token_id
self.initializer_range = initializer_range
def prepare_config_and_inputs(self):
input_ids = np.clip(ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size), 3, self.vocab_size)
input_ids = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1), dtype=np.int64)), -1)
decoder_input_ids = shift_tokens_right(input_ids, 1)
config = MBartConfig(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.decoder_start_token_id,
initializer_range=self.initializer_range,
use_cache=False,
)
inputs_dict = prepare_mbart_inputs_dict(config, input_ids, decoder_input_ids)
return config, inputs_dict
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
def check_use_cache_forward(self, model_class_name, config, inputs_dict):
max_decoder_length = 20
model = model_class_name(config)
encoder_outputs = model.encode(inputs_dict["input_ids"])
decoder_input_ids, decoder_attention_mask = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
past_key_values = model.init_cache(decoder_input_ids.shape[0], max_decoder_length, encoder_outputs)
decoder_attention_mask = jnp.ones((decoder_input_ids.shape[0], max_decoder_length), dtype="i4")
decoder_position_ids = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :],
(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1),
)
outputs_cache = model.decode(
decoder_input_ids[:, :-1],
encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
past_key_values=past_key_values,
decoder_position_ids=decoder_position_ids,
)
decoder_position_ids = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype="i4")
outputs_cache_next = model.decode(
decoder_input_ids[:, -1:],
encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
past_key_values=outputs_cache.past_key_values,
decoder_position_ids=decoder_position_ids,
)
outputs = model.decode(decoder_input_ids, encoder_outputs)
diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}")
def check_use_cache_forward_with_attn_mask(self, model_class_name, config, inputs_dict):
max_decoder_length = 20
model = model_class_name(config)
encoder_outputs = model.encode(inputs_dict["input_ids"])
decoder_input_ids, decoder_attention_mask = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
decoder_attention_mask_cache = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
],
axis=-1,
)
past_key_values = model.init_cache(decoder_input_ids.shape[0], max_decoder_length, encoder_outputs)
decoder_position_ids = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :],
(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1),
)
outputs_cache = model.decode(
decoder_input_ids[:, :-1],
encoder_outputs,
decoder_attention_mask=decoder_attention_mask_cache,
past_key_values=past_key_values,
decoder_position_ids=decoder_position_ids,
)
decoder_position_ids = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype="i4")
outputs_cache_next = model.decode(
decoder_input_ids[:, -1:],
encoder_outputs,
past_key_values=outputs_cache.past_key_values,
decoder_attention_mask=decoder_attention_mask_cache,
decoder_position_ids=decoder_position_ids,
)
outputs = model.decode(decoder_input_ids, encoder_outputs, decoder_attention_mask=decoder_attention_mask)
diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}")
@require_flax
class MBartHeadTests(unittest.TestCase):
vocab_size = 99
def _get_config_and_data(self):
input_ids = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
],
dtype=np.int64,
)
batch_size = input_ids.shape[0]
config = MBartConfig(
vocab_size=self.vocab_size,
d_model=24,
encoder_layers=2,
decoder_layers=2,
encoder_attention_heads=2,
decoder_attention_heads=2,
encoder_ffn_dim=32,
decoder_ffn_dim=32,
max_position_embeddings=48,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
)
return config, input_ids, batch_size
def test_sequence_classification_forward(self):
config, input_ids, batch_size = self._get_config_and_data()
model = FlaxMBartForSequenceClassification(config)
outputs = model(input_ids=input_ids, decoder_input_ids=input_ids)
expected_shape = (batch_size, config.num_labels)
self.assertEqual(outputs["logits"].shape, expected_shape)
def test_question_answering_forward(self):
config, input_ids, batch_size = self._get_config_and_data()
model = FlaxMBartForQuestionAnswering(config)
outputs = model(input_ids=input_ids)
self.assertEqual(outputs["start_logits"].shape, input_ids.shape)
self.assertEqual(outputs["end_logits"].shape, input_ids.shape)
# @timeout_decorator.timeout(1) # not working with the decorator so far
def test_lm_forward(self):
config, input_ids, batch_size = self._get_config_and_data()
lm_model = FlaxMBartForConditionalGeneration(config)
outputs = lm_model(input_ids=input_ids)
expected_shape = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape, expected_shape)
def test_lm_uneven_forward(self):
config = MBartConfig(
vocab_size=self.vocab_size,
d_model=14,
encoder_layers=2,
decoder_layers=2,
encoder_attention_heads=2,
decoder_attention_heads=2,
encoder_ffn_dim=8,
decoder_ffn_dim=8,
max_position_embeddings=48,
)
lm_model = FlaxMBartForConditionalGeneration(config)
context = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]], dtype=np.int64)
summary = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]], dtype=np.int64)
outputs = lm_model(input_ids=context, decoder_input_ids=summary)
expected_shape = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape, expected_shape)
def test_shift_tokens_right(self):
input_ids = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]], dtype=np.int64)
shifted = shift_tokens_right(input_ids, 1)
n_pad_before = np.equal(input_ids, 1).astype(np.float32).sum()
n_pad_after = np.equal(shifted, 1).astype(np.float32).sum()
self.assertEqual(shifted.shape, input_ids.shape)
self.assertEqual(n_pad_after, n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0], 2).all())
@require_flax
class FlaxMBartModelTest(FlaxModelTesterMixin, unittest.TestCase, FlaxGenerationTesterMixin):
is_encoder_decoder = True
all_model_classes = (
(
FlaxMBartModel,
FlaxMBartForConditionalGeneration,
FlaxMBartForSequenceClassification,
FlaxMBartForQuestionAnswering,
)
if is_flax_available()
else ()
)
all_generative_model_classes = (FlaxMBartForConditionalGeneration,) if is_flax_available() else ()
def setUp(self):
self.model_tester = FlaxMBartModelTester(self)
def test_use_cache_forward(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(model_class, config, inputs_dict)
def test_use_cache_forward_with_attn_mask(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(model_class, config, inputs_dict)
def test_encode(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
@jax.jit
def encode_jitted(input_ids, attention_mask=None, **kwargs):
return model.encode(input_ids=input_ids, attention_mask=attention_mask)
with self.subTest("JIT Enabled"):
jitted_outputs = encode_jitted(**prepared_inputs_dict).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
outputs = encode_jitted(**prepared_inputs_dict).to_tuple()
self.assertEqual(len(outputs), len(jitted_outputs))
for jitted_output, output in zip(jitted_outputs, outputs):
self.assertEqual(jitted_output.shape, output.shape)
def test_decode(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
model = model_class(config)
encoder_outputs = model.encode(inputs_dict["input_ids"], inputs_dict["attention_mask"])
prepared_inputs_dict = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(decoder_input_ids, decoder_attention_mask, encoder_outputs):
return model.decode(
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
encoder_outputs=encoder_outputs,
)
with self.subTest("JIT Enabled"):
jitted_outputs = decode_jitted(**prepared_inputs_dict).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
outputs = decode_jitted(**prepared_inputs_dict).to_tuple()
self.assertEqual(len(outputs), len(jitted_outputs))
for jitted_output, output in zip(jitted_outputs, outputs):
self.assertEqual(jitted_output.shape, output.shape)
@slow
def test_model_from_pretrained(self):
for model_class_name in self.all_model_classes:
model = model_class_name.from_pretrained("facebook/mbart-large-cc25", from_pt=True)
# FlaxMBartForSequenceClassification expects eos token in input_ids
input_ids = np.ones((1, 1)) * model.config.eos_token_id
outputs = model(input_ids)
self.assertIsNotNone(outputs)
@require_flax
@require_sentencepiece
@require_tokenizers
class FlaxMBartModelIntegrationTest(unittest.TestCase):
src_text = [
" UN Chief Says There Is No Military Solution in Syria",
]
expected_text = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
]
model_name = "facebook/mbart-large-en-ro"
@cached_property
def tokenizer(self):
return AutoTokenizer.from_pretrained(self.model_name)
@cached_property
def model(self):
model = FlaxMBartForConditionalGeneration.from_pretrained(self.model_name, from_pt=True)
return model
def _assert_generated_batch_equal_expected(self, **tokenizer_kwargs):
generated_words = self.translate_src_text(**tokenizer_kwargs)
self.assertListEqual(self.expected_text, generated_words)
def translate_src_text(self, **tokenizer_kwargs):
model_inputs = self.tokenizer(self.src_text, **tokenizer_kwargs, return_tensors="np")
generated_ids = self.model.generate(
model_inputs.input_ids,
attention_mask=model_inputs.attention_mask,
decoder_start_token_id=self.tokenizer.lang_code_to_id["ro_RO"],
early_stopping=True,
num_beams=2,
).sequences
generated_words = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
return generated_words
@slow
def test_batch_generation_en_ro(self):
self._assert_generated_batch_equal_expected()
| 18,947 | 39.748387 | 118 | py |
robust-transformers | robust-transformers-main/tests/mbart/test_tokenization_mbart.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import nested_simplify, require_sentencepiece, require_tokenizers, require_torch
from ..test_tokenization_common import TokenizerTesterMixin
SAMPLE_VOCAB = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
EN_CODE = 250004
RO_CODE = 250020
@require_sentencepiece
@require_tokenizers
class MBartTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = MBartTokenizer
rust_tokenizer_class = MBartTokenizerFast
test_rust_tokenizer = True
test_sentencepiece = True
def setUp(self):
super().setUp()
# We have a SentencePiece fixture for testing
tokenizer = MBartTokenizer(SAMPLE_VOCAB, keep_accents=True)
tokenizer.save_pretrained(self.tmpdirname)
def test_full_tokenizer(self):
tokenizer = MBartTokenizer(SAMPLE_VOCAB, keep_accents=True)
tokens = tokenizer.tokenize("This is a test")
self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(tokens),
[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]],
)
tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
tokens,
[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
],
)
ids = tokenizer.convert_tokens_to_ids(tokens)
self.assertListEqual(
ids,
[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
],
)
back_tokens = tokenizer.convert_ids_to_tokens(ids)
self.assertListEqual(
back_tokens,
[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
],
)
# overwrite from test_tokenization_common to speed up test
def test_save_pretrained(self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
self.tokenizers_list[0] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tmpdirname2 = tempfile.mkdtemp()
tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2)
tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files))
tokenizer_r_files = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f)
self.assertSequenceEqual(tokenizer_r_files, tokenizer_p_files)
# Checks everything loads correctly in the same way
tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2)
tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(tokenizer_rp, key))
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(tmpdirname2)
# Save tokenizer rust, legacy_format=True
tmpdirname2 = tempfile.mkdtemp()
tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2, legacy_format=True)
tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2)
# Checks it save with the same files
self.assertSequenceEqual(tokenizer_r_files, tokenizer_p_files)
# Checks everything loads correctly in the same way
tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2)
tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(tokenizer_rp, key))
shutil.rmtree(tmpdirname2)
# Save tokenizer rust, legacy_format=False
tmpdirname2 = tempfile.mkdtemp()
tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2, legacy_format=False)
tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2)
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2)
tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(tokenizer_rp, key))
shutil.rmtree(tmpdirname2)
@require_torch
@require_sentencepiece
@require_tokenizers
class MBartEnroIntegrationTest(unittest.TestCase):
checkpoint_name = "facebook/mbart-large-en-ro"
src_text = [
" UN Chief Says There Is No Military Solution in Syria",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
tgt_text = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
expected_src_tokens = [8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2, EN_CODE]
@classmethod
def setUpClass(cls):
cls.tokenizer: MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name, src_lang="en_XX", tgt_lang="ro_RO"
)
cls.pad_token_id = 1
return cls
def check_language_codes(self):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"], 250001)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"], 250004)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"], 250020)
def test_enro_tokenizer_batch_encode_plus(self):
ids = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens, ids)
def test_enro_tokenizer_decode_ignores_language_codes(self):
self.assertIn(RO_CODE, self.tokenizer.all_special_ids)
generated_ids = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
result = self.tokenizer.decode(generated_ids, skip_special_tokens=True)
expected_romanian = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=True)
self.assertEqual(result, expected_romanian)
self.assertNotIn(self.tokenizer.eos_token, result)
def test_enro_tokenizer_truncation(self):
src_text = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0], str)
desired_max_length = 10
ids = self.tokenizer(src_text, max_length=desired_max_length, truncation=True).input_ids[0]
self.assertEqual(ids[-2], 2)
self.assertEqual(ids[-1], EN_CODE)
self.assertEqual(len(ids), desired_max_length)
def test_mask_token(self):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"]), [250026, 250001])
def test_special_tokens_unaffacted_by_save_load(self):
tmpdirname = tempfile.mkdtemp()
original_special_tokens = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(tmpdirname)
new_tok = MBartTokenizer.from_pretrained(tmpdirname)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, original_special_tokens)
@require_torch
def test_batch_fairseq_parity(self):
batch = self.tokenizer(self.src_text, padding=True)
with self.tokenizer.as_target_tokenizer():
targets = self.tokenizer(self.tgt_text, padding=True, return_tensors="pt")
labels = targets["input_ids"]
batch["decoder_input_ids"] = shift_tokens_right(labels, self.tokenizer.pad_token_id).tolist()
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:] == [2, EN_CODE]
assert batch.decoder_input_ids[1][0] == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def test_enro_tokenizer_prepare_batch(self):
batch = self.tokenizer(
self.src_text, padding=True, truncation=True, max_length=len(self.expected_src_tokens), return_tensors="pt"
)
with self.tokenizer.as_target_tokenizer():
targets = self.tokenizer(
self.tgt_text,
padding=True,
truncation=True,
max_length=len(self.expected_src_tokens),
return_tensors="pt",
)
labels = targets["input_ids"]
batch["decoder_input_ids"] = shift_tokens_right(labels, self.tokenizer.pad_token_id)
self.assertIsInstance(batch, BatchEncoding)
self.assertEqual((2, 14), batch.input_ids.shape)
self.assertEqual((2, 14), batch.attention_mask.shape)
result = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, result)
self.assertEqual(2, batch.decoder_input_ids[0, -1]) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [])
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id, EN_CODE])
def test_seq2seq_max_length(self):
batch = self.tokenizer(self.src_text, padding=True, truncation=True, max_length=3, return_tensors="pt")
with self.tokenizer.as_target_tokenizer():
targets = self.tokenizer(self.tgt_text, padding=True, truncation=True, max_length=10, return_tensors="pt")
labels = targets["input_ids"]
batch["decoder_input_ids"] = shift_tokens_right(labels, self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1], 3)
self.assertEqual(batch.decoder_input_ids.shape[1], 10)
@require_torch
def test_tokenizer_translation(self):
inputs = self.tokenizer._build_translation_inputs(
"A test", return_tensors="pt", src_lang="en_XX", tgt_lang="ar_AR"
)
self.assertEqual(
nested_simplify(inputs),
{
# A, test, EOS, en_XX
"input_ids": [[62, 3034, 2, 250004]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 250001,
},
)
| 13,969 | 41.852761 | 300 | py |
robust-transformers | robust-transformers-main/tests/mbart/test_modeling_tf_mbart.py | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.file_utils import cached_property
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeq2SeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class TFMBartModelTester:
config_cls = MBartConfig
config_updates = {}
hidden_act = "gelu"
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_labels=False,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=20,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
def prepare_config_and_inputs_for_common(self):
input_ids = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size)
eos_tensor = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1)
input_ids = tf.concat([input_ids, eos_tensor], axis=1)
decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
config = self.config_cls(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
eos_token_ids=[2],
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.pad_token_id,
**self.config_updates,
)
inputs_dict = prepare_mbart_inputs_dict(config, input_ids, decoder_input_ids)
return config, inputs_dict
def check_decoder_model_past_large_inputs(self, config, inputs_dict):
model = TFMBartModel(config=config).get_decoder()
input_ids = inputs_dict["input_ids"]
input_ids = input_ids[:1, :]
attention_mask = inputs_dict["attention_mask"][:1, :]
head_mask = inputs_dict["head_mask"]
self.batch_size = 1
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
past_key_values = past_key_values[1]
def test_compile_tf_model(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
optimizer = tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08, clipnorm=1.0)
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
metric = tf.keras.metrics.SparseCategoricalAccuracy("accuracy")
model_class = self.all_generative_model_classes[0]
input_ids = {
"decoder_input_ids": tf.keras.Input(batch_shape=(2, 2000), name="decoder_input_ids", dtype="int32"),
"input_ids": tf.keras.Input(batch_shape=(2, 2000), name="input_ids", dtype="int32"),
}
# Prepare our model
model = model_class(config)
model(self._prepare_for_class(inputs_dict, model_class)) # Model must be called before saving.
# Let's load it from the disk to be sure we can use pretrained weights
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model = model_class.from_pretrained(tmpdirname)
outputs_dict = model(input_ids)
hidden_states = outputs_dict[0]
# Add a dense layer on top to test integration with other keras modules
outputs = tf.keras.layers.Dense(2, activation="softmax", name="outputs")(hidden_states)
# Compile extended model
extended_model = tf.keras.Model(inputs=[input_ids], outputs=[outputs])
extended_model.compile(optimizer=optimizer, loss=loss, metrics=[metric])
def prepare_mbart_inputs_dict(
config,
input_ids,
decoder_input_ids,
attention_mask=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
):
if attention_mask is None:
attention_mask = tf.cast(tf.math.not_equal(input_ids, config.pad_token_id), tf.int8)
if decoder_attention_mask is None:
decoder_attention_mask = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.int8),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id), tf.int8),
],
axis=-1,
)
if head_mask is None:
head_mask = tf.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
decoder_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
cross_attn_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class TFMBartModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
all_generative_model_classes = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
is_encoder_decoder = True
test_pruning = False
test_onnx = False
def setUp(self):
self.model_tester = TFMBartModelTester(self)
self.config_tester = ConfigTester(self, config_class=MBartConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_decoder_model_past_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*config_and_inputs)
def test_model_common_attributes(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
if model_class in self.all_generative_model_classes:
x = model.get_output_embeddings()
assert isinstance(x, tf.keras.layers.Layer)
name = model.get_bias()
assert isinstance(name, dict)
for k, v in name.items():
assert isinstance(v, tf.Variable)
else:
x = model.get_output_embeddings()
assert x is None
name = model.get_bias()
assert name is None
def test_resize_token_embeddings(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(model, embedding_layer):
if hasattr(embedding_layer, "weight"):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model(model.dummy_inputs)
if hasattr(embedding_layer, "weight"):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10, None]:
# build the embeddings
model = model_class(config=config)
old_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings())
old_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings())
old_final_logits_bias = model.get_bias()
# reshape the embeddings
model.resize_token_embeddings(size)
new_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings())
new_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings())
new_final_logits_bias = model.get_bias()
# check that the resized embeddings size matches the desired size.
assert_size = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0], assert_size)
# check that weights remain the same after resizing
models_equal = True
for p1, p2 in zip(old_input_embeddings.value(), new_input_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
models_equal = False
self.assertTrue(models_equal)
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0], assert_size)
models_equal = True
for p1, p2 in zip(old_output_embeddings.value(), new_output_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
models_equal = False
self.assertTrue(models_equal)
if old_final_logits_bias is not None and new_final_logits_bias is not None:
old_final_logits_bias = old_final_logits_bias["final_logits_bias"]
new_final_logits_bias = new_final_logits_bias["final_logits_bias"]
self.assertEqual(new_final_logits_bias.shape[0], 1)
self.assertEqual(new_final_logits_bias.shape[1], assert_size)
models_equal = True
for old, new in zip(old_final_logits_bias.value(), new_final_logits_bias.value()):
for p1, p2 in zip(old, new):
if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
models_equal = False
self.assertTrue(models_equal)
def test_saved_model_creation(self):
# This test is too long (>30sec) and makes fail the CI
pass
def _assert_tensors_equal(a, b, atol=1e-12, prefix=""):
"""If tensors not close, or a and b arent both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if tf.debugging.assert_near(a, b, atol=atol):
return True
raise
except Exception:
if len(prefix) > 0:
prefix = f"{prefix}: "
raise AssertionError(f"{prefix}{a} != {b}")
def _long_tensor(tok_lst):
return tf.constant(tok_lst, dtype=tf.int32)
TOLERANCE = 1e-4
@require_sentencepiece
@require_tokenizers
@require_tf
class TFMBartModelIntegrationTest(unittest.TestCase):
src_text = [
" UN Chief Says There Is No Military Solution in Syria",
]
expected_text = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
]
model_name = "facebook/mbart-large-en-ro"
@cached_property
def tokenizer(self):
return AutoTokenizer.from_pretrained(self.model_name)
@cached_property
def model(self):
model = TFAutoModelForSeq2SeqLM.from_pretrained(self.model_name)
return model
def _assert_generated_batch_equal_expected(self, **tokenizer_kwargs):
generated_words = self.translate_src_text(**tokenizer_kwargs)
self.assertListEqual(self.expected_text, generated_words)
def translate_src_text(self, **tokenizer_kwargs):
model_inputs = self.tokenizer(self.src_text, **tokenizer_kwargs, return_tensors="tf")
generated_ids = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2
)
generated_words = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
return generated_words
@slow
def test_batch_generation_en_ro(self):
self._assert_generated_batch_equal_expected()
| 14,316 | 40.378613 | 112 | py |
robust-transformers | robust-transformers-main/tests/mbart/test_modeling_mbart.py | # coding=utf-8
# Copyright 2021, The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch MBART model. """
import copy
import tempfile
import unittest
from transformers import MBartConfig, is_torch_available
from transformers.file_utils import cached_property
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ..generation.test_generation_utils import GenerationTesterMixin
from ..test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, ids_tensor
if is_torch_available():
import torch
from transformers import (
AutoTokenizer,
BatchEncoding,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
)
from transformers.models.mbart.modeling_mbart import MBartDecoder, MBartEncoder
def prepare_mbart_inputs_dict(
config,
input_ids,
decoder_input_ids,
attention_mask=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
):
if attention_mask is None:
attention_mask = input_ids.ne(config.pad_token_id)
if decoder_attention_mask is None:
decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id)
if head_mask is None:
head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device)
if decoder_head_mask is None:
decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device)
if cross_attn_head_mask is None:
cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class MBartModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_labels=False,
vocab_size=99,
hidden_size=16,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=4,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=100,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp(
3,
)
input_ids[:, -1] = self.eos_token_id # Eos Token
decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
config = self.get_config()
inputs_dict = prepare_mbart_inputs_dict(config, input_ids, decoder_input_ids)
return config, inputs_dict
def get_config(self):
return MBartConfig(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
)
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict):
model = MBartModel(config=config).get_decoder().to(torch_device).eval()
input_ids = inputs_dict["input_ids"]
attention_mask = inputs_dict["attention_mask"]
head_mask = inputs_dict["head_mask"]
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = ids_tensor((self.batch_size, 3), 2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[
"last_hidden_state"
]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def check_encoder_decoder_model_standalone(self, config, inputs_dict):
model = MBartModel(config=config).to(torch_device).eval()
outputs = model(**inputs_dict)
encoder_last_hidden_state = outputs.encoder_last_hidden_state
last_hidden_state = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
encoder = model.get_encoder()
encoder.save_pretrained(tmpdirname)
encoder = MBartEncoder.from_pretrained(tmpdirname).to(torch_device)
encoder_last_hidden_state_2 = encoder(inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"])[
0
]
self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3)
with tempfile.TemporaryDirectory() as tmpdirname:
decoder = model.get_decoder()
decoder.save_pretrained(tmpdirname)
decoder = MBartDecoder.from_pretrained(tmpdirname).to(torch_device)
last_hidden_state_2 = decoder(
input_ids=inputs_dict["decoder_input_ids"],
attention_mask=inputs_dict["decoder_attention_mask"],
encoder_hidden_states=encoder_last_hidden_state,
encoder_attention_mask=inputs_dict["attention_mask"],
)[0]
self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3)
@require_torch
class MBartModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (
(MBartModel, MBartForConditionalGeneration, MBartForSequenceClassification, MBartForQuestionAnswering)
if is_torch_available()
else ()
)
all_generative_model_classes = (MBartForConditionalGeneration,) if is_torch_available() else ()
is_encoder_decoder = True
test_pruning = False
test_missing_keys = False
def setUp(self):
self.model_tester = MBartModelTester(self)
self.config_tester = ConfigTester(self, config_class=MBartConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_save_load_strict(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True)
self.assertEqual(info["missing_keys"], [])
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_encoder_decoder_model_standalone(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs)
# MBartForSequenceClassification does not support inputs_embeds
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MBartModel, MBartForConditionalGeneration, MBartForQuestionAnswering):
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
if not self.is_encoder_decoder:
input_ids = inputs["input_ids"]
del inputs["input_ids"]
else:
encoder_input_ids = inputs["input_ids"]
decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
del inputs["input_ids"]
inputs.pop("decoder_input_ids", None)
wte = model.get_input_embeddings()
if not self.is_encoder_decoder:
inputs["inputs_embeds"] = wte(input_ids)
else:
inputs["inputs_embeds"] = wte(encoder_input_ids)
inputs["decoder_inputs_embeds"] = wte(decoder_input_ids)
with torch.no_grad():
model(**inputs)[0]
def test_generate_fp16(self):
config, input_dict = self.model_tester.prepare_config_and_inputs()
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
model = MBartForConditionalGeneration(config).eval().to(torch_device)
if torch_device == "cuda":
model.half()
model.generate(input_ids, attention_mask=attention_mask)
model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3)
def assert_tensors_close(a, b, atol=1e-12, prefix=""):
"""If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if torch.allclose(a, b, atol=atol):
return True
raise
except Exception:
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
if a.numel() > 100:
msg = f"tensor values are {pct_different:.1%} percent different."
else:
msg = f"{a} != {b}"
if prefix:
msg = prefix + ": " + msg
raise AssertionError(msg)
def _long_tensor(tok_lst):
return torch.tensor(tok_lst, dtype=torch.long, device=torch_device)
@require_torch
@require_sentencepiece
@require_tokenizers
class AbstractSeq2SeqIntegrationTest(unittest.TestCase):
maxDiff = 1000 # longer string compare tracebacks
checkpoint_name = None
@classmethod
def setUpClass(cls):
cls.tokenizer = AutoTokenizer.from_pretrained(cls.checkpoint_name, use_fast=False)
return cls
@cached_property
def model(self):
"""Only load the model if needed."""
model = MBartForConditionalGeneration.from_pretrained(self.checkpoint_name).to(torch_device)
if "cuda" in torch_device:
model = model.half()
return model
@require_torch
@require_sentencepiece
@require_tokenizers
class MBartEnroIntegrationTest(AbstractSeq2SeqIntegrationTest):
checkpoint_name = "facebook/mbart-large-en-ro"
src_text = [
" UN Chief Says There Is No Military Solution in Syria",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
tgt_text = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor face decât să înrăutăţească violenţa şi mizeria pentru milioane de oameni.',
]
expected_src_tokens = [8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2, 250004]
@slow
def test_enro_generate_one(self):
batch: BatchEncoding = self.tokenizer(
["UN Chief Says There Is No Military Solution in Syria"], return_tensors="pt"
).to(torch_device)
translated_tokens = self.model.generate(**batch)
decoded = self.tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)
self.assertEqual(self.tgt_text[0], decoded[0])
# self.assertEqual(self.tgt_text[1], decoded[1])
@slow
def test_enro_generate_batch(self):
batch: BatchEncoding = self.tokenizer(self.src_text, return_tensors="pt", padding=True, truncation=True).to(
torch_device
)
translated_tokens = self.model.generate(**batch)
decoded = self.tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)
assert self.tgt_text == decoded
def test_mbart_enro_config(self):
mbart_models = ["facebook/mbart-large-en-ro"]
expected = {"scale_embedding": True, "output_past": True}
for name in mbart_models:
config = MBartConfig.from_pretrained(name)
for k, v in expected.items():
try:
self.assertEqual(v, getattr(config, k))
except AssertionError as e:
e.args += (name, k)
raise
def test_mbart_fast_forward(self):
config = MBartConfig(
vocab_size=99,
d_model=24,
encoder_layers=2,
decoder_layers=2,
encoder_attention_heads=2,
decoder_attention_heads=2,
encoder_ffn_dim=32,
decoder_ffn_dim=32,
max_position_embeddings=48,
add_final_layer_norm=True,
)
lm_model = MBartForConditionalGeneration(config).to(torch_device)
context = torch.tensor(
[[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]], device=torch_device, dtype=torch.long
)
summary = torch.tensor([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]], device=torch_device, dtype=torch.long)
result = lm_model(input_ids=context, decoder_input_ids=summary, labels=summary)
expected_shape = (*summary.shape, config.vocab_size)
self.assertEqual(result.logits.shape, expected_shape)
@require_torch
@require_sentencepiece
@require_tokenizers
class MBartCC25IntegrationTest(AbstractSeq2SeqIntegrationTest):
checkpoint_name = "facebook/mbart-large-cc25"
src_text = [
" UN Chief Says There Is No Military Solution in Syria",
" I ate lunch twice yesterday",
]
tgt_text = ["Şeful ONU declară că nu există o soluţie militară în Siria", "to be padded"]
@unittest.skip("This test is broken, still generates english")
def test_cc25_generate(self):
inputs = self.tokenizer([self.src_text[0]], return_tensors="pt").to(torch_device)
translated_tokens = self.model.generate(
input_ids=inputs["input_ids"].to(torch_device),
decoder_start_token_id=self.tokenizer.lang_code_to_id["ro_RO"],
)
decoded = self.tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)
self.assertEqual(self.tgt_text[0], decoded[0])
@slow
def test_fill_mask(self):
inputs = self.tokenizer(["One of the best <mask> I ever read!"], return_tensors="pt").to(torch_device)
outputs = self.model.generate(
inputs["input_ids"], decoder_start_token_id=self.tokenizer.lang_code_to_id["en_XX"], num_beams=1
)
prediction: str = self.tokenizer.batch_decode(
outputs, clean_up_tokenization_spaces=True, skip_special_tokens=True
)[0]
self.assertEqual(prediction, "of the best books I ever read!")
class MBartStandaloneDecoderModelTester:
def __init__(
self,
parent,
vocab_size=99,
batch_size=13,
d_model=16,
decoder_seq_length=7,
is_training=True,
is_decoder=True,
use_attention_mask=True,
use_cache=False,
use_labels=True,
decoder_start_token_id=2,
decoder_ffn_dim=32,
decoder_layers=4,
encoder_attention_heads=4,
decoder_attention_heads=4,
max_position_embeddings=30,
is_encoder_decoder=False,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.decoder_seq_length = decoder_seq_length
# For common tests
self.seq_length = self.decoder_seq_length
self.is_training = is_training
self.use_attention_mask = use_attention_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.d_model = d_model
self.hidden_size = d_model
self.num_hidden_layers = decoder_layers
self.decoder_layers = decoder_layers
self.decoder_ffn_dim = decoder_ffn_dim
self.encoder_attention_heads = encoder_attention_heads
self.decoder_attention_heads = decoder_attention_heads
self.num_attention_heads = decoder_attention_heads
self.eos_token_id = eos_token_id
self.bos_token_id = bos_token_id
self.pad_token_id = pad_token_id
self.decoder_start_token_id = decoder_start_token_id
self.use_cache = use_cache
self.max_position_embeddings = max_position_embeddings
self.is_encoder_decoder = is_encoder_decoder
self.scope = None
self.decoder_key_length = decoder_seq_length
self.base_model_out_len = 2
self.decoder_attention_idx = 1
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
attention_mask = None
if self.use_attention_mask:
attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2)
lm_labels = None
if self.use_labels:
lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
config = MBartConfig(
vocab_size=self.vocab_size,
d_model=self.d_model,
decoder_layers=self.decoder_layers,
decoder_ffn_dim=self.decoder_ffn_dim,
encoder_attention_heads=self.encoder_attention_heads,
decoder_attention_heads=self.decoder_attention_heads,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
use_cache=self.use_cache,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.decoder_start_token_id,
max_position_embeddings=self.max_position_embeddings,
is_encoder_decoder=self.is_encoder_decoder,
)
return (
config,
input_ids,
attention_mask,
lm_labels,
)
def create_and_check_decoder_model_past(
self,
config,
input_ids,
attention_mask,
lm_labels,
):
config.use_cache = True
model = MBartDecoder(config=config).to(torch_device).eval()
# first forward pass
outputs = model(input_ids, use_cache=True)
outputs_use_cache_conf = model(input_ids)
outputs_no_past = model(input_ids, use_cache=False)
self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))
self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)
past_key_values = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
output_from_no_past = model(next_input_ids)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)
def create_and_check_decoder_model_attention_mask_past(
self,
config,
input_ids,
attention_mask,
lm_labels,
):
model = MBartDecoder(config=config).to(torch_device).eval()
# create attention mask
attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
half_seq_length = input_ids.shape[-1] // 2
attn_mask[:, half_seq_length:] = 0
# first forward pass
past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# change a random masked slice from input_ids
random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1
random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1)
input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens
# append to next input_ids and attn_mask
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
attn_mask = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)],
dim=1,
)
# get two different outputs
output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"]
output_from_past = model(next_tokens, attention_mask=attn_mask, past_key_values=past_key_values)[
"last_hidden_state"
]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
attention_mask,
lm_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class MBartStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (MBartDecoder, MBartForCausalLM) if is_torch_available() else ()
all_generative_model_classes = (MBartForCausalLM,) if is_torch_available() else ()
test_pruning = False
is_encoder_decoder = False
def setUp(
self,
):
self.model_tester = MBartStandaloneDecoderModelTester(self, is_training=False)
self.config_tester = ConfigTester(self, config_class=MBartConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_decoder_model_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*config_and_inputs)
def test_decoder_model_attn_mask_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs)
def test_retain_grad_hidden_states_attentions(self):
# decoder cannot keep gradients
return
| 26,441 | 39.18541 | 298 | py |
robust-transformers | robust-transformers-main/tests/bigbird_pegasus/test_modeling_bigbird_pegasus.py | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch BigBirdPegasus model. """
import copy
import tempfile
import unittest
from transformers import BigBirdPegasusConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ..generation.test_generation_utils import GenerationTesterMixin
from ..test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, ids_tensor
if is_torch_available():
import torch
from transformers import (
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
PegasusTokenizer,
)
from transformers.models.bigbird_pegasus.modeling_bigbird_pegasus import (
BigBirdPegasusDecoder,
BigBirdPegasusEncoder,
)
MODEL_ID = "google/bigbird-pegasus-large-pubmed"
def prepare_bigbird_pegasus_inputs_dict(
config,
input_ids,
decoder_input_ids,
attention_mask=None,
decoder_attention_mask=None,
):
if attention_mask is None:
attention_mask = input_ids.ne(config.pad_token_id)
if decoder_attention_mask is None:
decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id)
input_dict = {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
input_dict = {k: input_dict[k].to(torch_device) for k in input_dict}
return input_dict
class BigBirdPegasusModelTester:
def __init__(
self,
parent,
batch_size=7,
seq_length=256,
is_training=True,
use_labels=False,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=31,
hidden_act="gelu_fast",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=260,
eos_token_id=1,
pad_token_id=0,
bos_token_id=2,
attention_type="block_sparse",
use_bias=False,
block_size=16,
num_random_blocks=3,
scale_embedding=True,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.attention_type = attention_type
self.use_bias = use_bias
self.block_size = block_size
self.num_random_blocks = num_random_blocks
self.scale_embedding = scale_embedding
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp(
3,
)
input_ids[:, -1] = self.eos_token_id # Eos Token
decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
config = self.get_config()
inputs_dict = prepare_bigbird_pegasus_inputs_dict(config, input_ids, decoder_input_ids)
return config, inputs_dict
def get_config(self):
return BigBirdPegasusConfig(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
attention_type=self.attention_type,
use_bias=self.use_bias,
block_size=self.block_size,
num_random_blocks=self.num_random_blocks,
scale_embedding=self.scale_embedding,
)
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict):
model = BigBirdPegasusModel(config=config).get_decoder().to(torch_device).eval()
input_ids = inputs_dict["input_ids"]
attention_mask = inputs_dict["attention_mask"]
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = ids_tensor((self.batch_size, 3), 2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[
"last_hidden_state"
]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2))
def check_encoder_decoder_model_standalone(self, config, inputs_dict):
model = BigBirdPegasusModel(config=config).to(torch_device).eval()
outputs = model(**inputs_dict)
encoder_last_hidden_state = outputs.encoder_last_hidden_state
last_hidden_state = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
encoder = model.get_encoder()
encoder.save_pretrained(tmpdirname)
encoder = BigBirdPegasusEncoder.from_pretrained(tmpdirname).to(torch_device)
encoder_last_hidden_state_2 = encoder(inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"])[
0
]
self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3)
with tempfile.TemporaryDirectory() as tmpdirname:
decoder = model.get_decoder()
decoder.save_pretrained(tmpdirname)
decoder = BigBirdPegasusDecoder.from_pretrained(tmpdirname).to(torch_device)
last_hidden_state_2 = decoder(
input_ids=inputs_dict["decoder_input_ids"],
attention_mask=inputs_dict["decoder_attention_mask"],
encoder_hidden_states=encoder_last_hidden_state,
encoder_attention_mask=inputs_dict["attention_mask"],
)[0]
self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3)
def create_and_check_model(self, config, inputs_dict):
model = BigBirdPegasusModel(config=config).to(torch_device).eval()
input_ids = inputs_dict["input_ids"]
decoder_input_ids = inputs_dict["decoder_input_ids"]
result = model(input_ids, decoder_input_ids=decoder_input_ids, use_cache=True)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
@require_torch
class BigBirdPegasusModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (
(
BigBirdPegasusModel,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusForQuestionAnswering,
)
if is_torch_available()
else ()
)
all_generative_model_classes = (BigBirdPegasusForConditionalGeneration,) if is_torch_available() else ()
is_encoder_decoder = True
test_missing_keys = False
test_pruning = False
test_head_masking = False
# torchscript tests are not passing for now.
# Also torchscript is not an important feature to have in the beginning.
test_torchscript = False
# overwrite from GenerationTesterMixin to solve problem
# with conflicting random seeds
def _get_input_ids_and_config(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.attention_type = "original_full"
input_ids = inputs_dict[self.input_name]
attention_mask = torch.ones_like(input_ids, dtype=torch.long)
# cut to half length & take max batch_size 3
max_batch_size = 2
sequence_length = input_ids.shape[-1] // 2
input_ids = input_ids[:max_batch_size, :sequence_length]
attention_mask = attention_mask[:max_batch_size, :sequence_length]
# generate max 3 tokens
max_length = input_ids.shape[-1] + 3
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
config.pad_token_id = config.eos_token_id
return config, input_ids, attention_mask, max_length
def setUp(self):
self.model_tester = BigBirdPegasusModelTester(self)
self.config_tester = ConfigTester(self, config_class=BigBirdPegasusConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_save_load_strict(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True)
self.assertEqual(info["missing_keys"], [])
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_encoder_decoder_model_standalone(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs)
def test_model_various_attn_type(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
for type in ["original_full", "block_sparse"]:
config_and_inputs[0].attention_type = type
self.model_tester.create_and_check_model(*config_and_inputs)
def test_generate_without_input_ids(self):
if self.model_tester.attention_type == "block_sparse":
# this test can never pass for BigBird-block-sparse attention since input_ids must be multiple of block_size
return
super().test_generate_without_input_ids()
def test_retain_grad_hidden_states_attentions(self):
if self.model_tester.attention_type == "block_sparse":
# this test can't pass since attention matrix (which is getting returned) can't have gradients (& just 0 at many locations)
return
super().test_retain_grad_hidden_states_attentions()
# BigBirdPegasusForSequenceClassification does not support inputs_embeds
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (
BigBirdPegasusModel,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
):
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
if not self.is_encoder_decoder:
input_ids = inputs["input_ids"]
del inputs["input_ids"]
else:
encoder_input_ids = inputs["input_ids"]
decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
del inputs["input_ids"]
inputs.pop("decoder_input_ids", None)
wte = model.get_input_embeddings()
if not self.is_encoder_decoder:
inputs["inputs_embeds"] = wte(input_ids)
else:
inputs["inputs_embeds"] = wte(encoder_input_ids)
inputs["decoder_inputs_embeds"] = wte(decoder_input_ids)
with torch.no_grad():
model(**inputs)[0]
def test_generate_fp16(self):
config, input_dict = self.model_tester.prepare_config_and_inputs()
input_dict.pop("decoder_attention_mask")
input_dict.pop("decoder_input_ids")
model = BigBirdPegasusForConditionalGeneration(config).eval().to(torch_device)
if torch_device == "cuda":
model.half()
model.generate(**input_dict)
model.generate(**input_dict, do_sample=True, early_stopping=False, num_return_sequences=3)
@slow
def test_batched_forward_original_full(self):
self._check_batched_forward(attn_type="original_full")
@slow
def test_batched_forward_block_sparse(self):
self._check_batched_forward(attn_type="block_sparse", tolerance=1e-1)
def _check_batched_forward(self, attn_type, tolerance=1e-3):
config, _ = self.model_tester.prepare_config_and_inputs()
config.max_position_embeddings = 128
config.block_size = 16
config.attention_type = attn_type
model = BigBirdPegasusForConditionalGeneration(config).to(torch_device)
model.eval()
chunk_length = 32
sample_with_padding = [3, 8, 11] * chunk_length + [0] * chunk_length
sample_without_padding = [4, 7, 9, 13] * chunk_length
target_ids_without_padding = [2, 3] * 8
target_ids_with_padding = [7, 8] * 6 + 4 * [-100]
attention_mask = torch.tensor(
[[1] * 3 * chunk_length + [0] * chunk_length, [1] * 4 * chunk_length],
device=torch_device,
dtype=torch.long,
)
input_ids = torch.tensor([sample_with_padding, sample_without_padding], device=torch_device, dtype=torch.long)
labels = torch.tensor(
[target_ids_without_padding, target_ids_with_padding], device=torch_device, dtype=torch.long
)
with torch.no_grad():
logits_batched = model(input_ids=input_ids, attention_mask=attention_mask, labels=labels).logits
with torch.no_grad():
logits_single_first = model(input_ids=input_ids[:1, :-chunk_length], labels=labels[:1]).logits
self.assertTrue(torch.allclose(logits_batched[0, -3:], logits_single_first[0, -3:], atol=tolerance))
with torch.no_grad():
logits_single_second = model(input_ids=input_ids[1:], labels=labels[1:, :-4]).logits
self.assertTrue(torch.allclose(logits_batched[1, :3], logits_single_second[0, :3], atol=tolerance))
def test_auto_padding(self):
ids = [[7, 6, 9] * 65]
config, _ = self.model_tester.prepare_config_and_inputs()
input_ids = torch.tensor(ids, device=torch_device, dtype=torch.long)
attention_mask = input_ids.new_ones(input_ids.shape)
decoder_input_ids = torch.tensor([[33, 5, 8] * 3], device=torch_device, dtype=torch.long)
config.block_size = 8
model = BigBirdPegasusForConditionalGeneration(config).eval().to(torch_device)
output1 = model(input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids)[
"logits"
]
ids = [[7, 6, 9] * 65 + [0] * 5]
input_ids = torch.tensor(ids, device=torch_device, dtype=torch.long)
attention_mask = torch.tensor([[1] * 3 * 65 + [0] * 5], device=torch_device, dtype=torch.long)
output2 = model(input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids)[
"logits"
]
self.assertTrue(torch.allclose(output1, output2, atol=1e-5))
def test_for_change_to_full_attn(self):
self.model_tester.seq_length = 9
config, input_dict = self.model_tester.prepare_config_and_inputs()
# automatic switch will happen
config.attention_type = "block_sparse"
model = BigBirdPegasusForConditionalGeneration(config).eval().to(torch_device)
state_dict = model.state_dict()
outputs1 = model(**input_dict)["logits"]
config.attention_type = "original_full"
model = BigBirdPegasusForConditionalGeneration(config).eval().to(torch_device)
model.load_state_dict(state_dict)
outputs2 = model(**input_dict)["logits"]
self.assertTrue(torch.allclose(outputs1, outputs2, atol=1e-5))
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class BigBirdPegasusModelIntegrationTests(unittest.TestCase):
def _get_dummy_input_ids(self):
# fmt: off
ids = torch.tensor(
[[685, 560, 630, 193, 836, 764, 708, 360, 10, 724, 278, 755, 805, 600, 71, 473, 601, 397, 315, 706, 487, 552, 88, 175, 601, 850, 678, 538, 846, 73, 778, 917, 116, 977, 756, 710, 1023, 848, 432, 449, 851, 100, 985, 178, 756, 798, 660, 148, 911, 424, 289, 962, 266, 698, 640, 545, 544, 715, 245, 152, 676, 511, 460, 883, 184, 29, 803, 129, 129, 933, 54, 902, 551, 489, 757, 274, 336, 389, 618, 43, 443, 544, 889, 258, 322, 1000, 938, 58, 292, 871, 120, 780, 431, 83, 92, 897, 399, 612, 566, 909, 634, 939, 85, 204, 325, 775, 965, 48, 640, 1013, 132, 973, 869, 181, 1001, 847, 144, 661, 228, 955, 792, 720, 910, 374, 854, 561, 306, 582, 170, 676, 449, 96, 198, 607, 257, 882, 691, 293, 931, 817, 862, 388, 611, 555, 974, 369, 1000, 918, 202, 384, 513, 907, 371, 556, 955, 384, 24, 700, 131, 378, 99, 575, 932, 735, 124, 964, 595, 943, 740, 149, 210, 563, 412, 783, 42, 59, 706, 37, 779, 87, 44, 873, 12, 771, 308, 81, 33, 183, 129, 807, 276, 175, 555, 372, 185, 445, 489, 590, 287, 281, 638, 771, 516, 95, 227, 876, 270, 881, 297, 329, 20, 608, 841, 411, 451, 249, 181, 324, 1005, 830, 783, 865, 261, 964, 750, 140, 1021, 599, 462, 890, 622, 844, 697, 529, 153, 926, 150, 111, 26, 465, 957, 890, 887, 118, 446, 596, 674, 873, 929, 229, 508, 764, 122, 327, 470, 288, 526, 840, 697, 153, 592, 42, 275, 553, 439, 208, 780, 167, 112, 350, 1018, 130, 736, 887, 813, 217, 382, 25, 68, 979, 1008, 772, 235, 717, 999, 292, 727, 1023, 702, 710, 728, 556, 33, 12, 617, 213, 139, 695, 1004, 422, 638, 669, 624, 489, 771, 540, 980, 218, 664, 822, 308, 175, 149, 950, 542, 580, 548, 808, 394, 74, 298, 920, 900, 815, 731, 947, 877, 772, 800, 778, 395, 540, 430, 200, 424, 62, 342, 866, 45, 803, 931, 89, 34, 646, 233, 768, 37, 769, 460, 291, 198, 895, 950, 255, 81, 447, 137, 190, 130, 210, 369, 292, 377, 348, 169, 885, 805, 177, 538, 324, 872, 509, 804, 115, 799, 30, 754, 290, 147, 274, 222, 341, 510, 515, 70, 358, 909, 557, 886, 766, 323, 624, 92, 342, 424, 552, 972, 663, 415, 658, 711, 968, 275, 861, 44, 84, 434, 810, 94, 175, 406, 202, 858, 499, 481, 988, 330, 541, 1004, 210, 618, 955, 897, 983, 576, 17, 107, 165, 607, 537, 629, 192, 196, 308, 137, 953, 860, 94, 892, 751, 88, 161, 148, 585, 456, 88, 14, 315, 594, 121, 885, 952, 833, 716, 733, 933, 282, 801, 427, 783, 471, 285, 277, 979, 325, 535, 228, 891, 596, 648, 969, 574, 654, 518, 257, 137, 208, 464, 950, 140, 5, 424, 349, 942, 283, 587, 821, 1007, 434, 220, 820, 740, 874, 787, 374, 291, 564, 671, 438, 827, 940, 824, 509, 1021, 787, 942, 856, 450, 327, 491, 54, 817, 95, 60, 337, 667, 637, 164, 571, 946, 107, 202, 301, 782, 890, 839, 551, 680, 649, 14, 1017, 904, 721, 1017, 535, 505, 848, 986, 777, 740, 775, 210, 456, 469, 474, 963, 573, 401, 57, 883, 750, 664, 281, 5, 613, 1005, 306, 344, 543, 567, 154, 789, 354, 358, 698, 408, 412, 30, 930, 372, 822, 632, 948, 855, 503, 8, 618, 1010, 138, 695, 897, 852, 377, 933, 722, 149, 886, 1009, 260, 127, 811, 578, 533, 805, 325, 977, 113, 944, 651, 238, 361, 991, 860, 556, 64, 928, 917, 455, 266, 445, 604, 624, 420, 340, 845, 275, 370, 843, 227, 226, 940, 644, 909, 229, 827, 898, 370, 129, 808, 25, 699, 293, 356, 838, 135, 4, 227, 890, 681, 445, 418, 285, 837, 27, 737, 249, 366, 948, 202, 438, 198, 930, 648, 638, 607, 73, 247, 853, 136, 708, 214, 476, 621, 324, 103, 853, 328, 596, 224, 257, 646, 348, 108, 927, 970, 980, 520, 150, 998, 477, 393, 684, 559, 1, 361, 692, 551, 90, 75, 500, 739, 636, 344, 97, 852, 283, 719, 33, 116, 455, 866, 429, 828, 826, 691, 174, 746, 133, 442, 94, 348, 402, 420, 707, 405, 942, 186, 976, 376, 677, 874, 703, 517, 498, 499, 206, 415, 366, 856, 739, 420, 586, 219, 952, 539, 375, 23, 461, 720, 355, 603, 52, 999, 815, 721, 574, 445, 816, 1019, 105, 641, 395, 972, 910, 328, 607, 519, 686, 246, 415, 528, 170, 167, 310, 940, 595, 392, 221, 834, 682, 835, 115, 861, 335, 742, 220, 247, 101, 416, 222, 179, 509, 175, 606, 627, 674, 781, 737, 746, 849, 67, 457, 1012, 126, 139, 625, 731, 156, 697, 121, 322, 449, 710, 857, 291, 976, 4, 701, 239, 678, 172, 724, 857, 583, 661, 903, 797, 628, 903, 835, 605, 989, 615, 870, 380, 710, 110, 330, 101, 695, 846, 918, 508, 672, 594, 36, 238, 244, 251, 393, 767, 282, 22, 430, 230, 983, 401, 154, 1007, 120, 678, 896, 386, 390, 711, 397, 347, 587, 1020, 951, 79, 831, 585, 200, 814, 134, 560, 700, 171, 452, 139, 755, 314, 476, 346, 388, 126, 719, 851, 198, 699, 901, 18, 710, 448, 351, 665, 644, 326, 425, 165, 571, 178, 440, 665, 674, 915, 866, 463, 754, 136, 950, 748, 47, 497, 1013, 640, 930, 338, 158, 525, 631, 815, 887, 289, 803, 116, 600, 637, 410, 175, 499, 876, 565, 1002, 623, 577, 333, 887, 586, 147, 773, 776, 644, 49, 77, 294, 117, 494, 561, 110, 979, 180, 562, 72, 859, 434, 1007, 286, 516, 75, 597, 491, 322, 888, 533, 209, 43, 499, 29, 411, 856, 181, 305, 963, 615, 778, 259, 373, 877, 746, 858, 381, 886, 613, 91, 69, 618, 523, 13, 617, 226, 422, 168, 929, 379, 290, 923, 100, 218, 307, 345, 211, 789, 735, 669, 585, 275, 410, 921, 552, 235, 636, 285, 665, 659, 708, 173, 724, 302, 823, 1, 139, 708, 903, 732, 868, 442, 967, 916, 163, 51, 243, 871]], # noqa: E231
dtype=torch.long,
device=torch_device,
)
# fmt: on
return ids
def _get_dummy_target_ids(self):
# fmt: off
ids = torch.tensor(
[[13, 6, 1, 4, 12, 4, 8, 10, 4, 6, 3, 5, 8, 7, 9, 9]], # noqa: E231
dtype=torch.long,
device=torch_device,
)
# fmt: on
return ids
def test_inference_block_sparse(self):
model = BigBirdPegasusForConditionalGeneration.from_pretrained(
MODEL_ID, attention_type="block_sparse", block_size=16, num_random_blocks=3
)
model.to(torch_device)
input_ids = self._get_dummy_input_ids()
target_ids = self._get_dummy_target_ids()
outputs = model(input_ids, labels=target_ids)
prediction_logits = outputs.logits
self.assertEqual(prediction_logits.shape, torch.Size((1, 16, 96103)))
# fmt: off
expected_prediction_logits_slice = torch.tensor(
[[1.7769, 5.8479, 6.2375, 2.2745, 8.6157, 4.7483, 5.0647, 6.5358, 2.3393, 7.8333, 3.8403, 0.0255, 7.219, 5.2759, 3.097, 6.387, 4.9341, 7.1409, 5.1179, 0.1144, 6.8268, 0.7598, 0.6258, 2.373, 0.4627, -1.9919, 1.8422, 3.4578], [1.8026, 5.9604, 5.954, 2.8642, 9.0608, 4.394, 5.3779, 7.0216, 1.543, 7.8744, 4.4231, -0.0398, 7.6091, 5.6611, 3.3536, 6.8624, 4.7699, 6.5241, 4.8893, 0.5791, 6.8368, 0.1034, 0.0338, 2.9393, 0.5034, -2.5509, 2.0172, 3.2858], [1.8426, 5.9151, 5.5374, 3.0426, 9.1762, 3.6287, 5.3916, 7.4621, 1.2582, 7.9244, 4.694, -0.1308, 7.4725, 5.5385, 3.4598, 7.0422, 4.2455, 5.797, 4.5927, 0.7478, 6.7467, -0.2695, -0.3207, 3.0269, 0.4714, -2.8134, 2.0406, 3.1089], [1.6527, 5.8416, 5.4558, 3.0044, 9.3478, 3.2607, 5.3887, 7.52, 0.9362, 7.8877, 4.8465, -0.1705, 7.3932, 5.6352, 3.5744, 7.2623, 4.0485, 5.2788, 4.5859, 0.8325, 6.6088, -0.3676, -0.6287, 3.1731, 0.4483, -3.1573, 2.0522, 2.8868]], # noqa: E231
device=torch_device,
)
# fmt: on
self.assertTrue(
torch.allclose(prediction_logits[0, 4:8, 128:156], expected_prediction_logits_slice, atol=1e-4)
)
def test_inference_full_attn(self):
model = BigBirdPegasusForConditionalGeneration.from_pretrained(MODEL_ID, attention_type="original_full")
model.to(torch_device)
input_ids = self._get_dummy_input_ids()
target_ids = self._get_dummy_target_ids()
outputs = model(input_ids, labels=target_ids)
prediction_logits = outputs.logits
self.assertEqual(prediction_logits.shape, torch.Size((1, 16, 96103)))
# fmt: off
expected_prediction_logits_slice = torch.tensor(
[[1.3418, 5.8304, 6.5662, 2.0448, 8.7702, 4.6579, 4.9947, 6.429, 2.4296, 7.9431, 4.217, 0.0672, 7.334, 5.1966, 2.9603, 6.0814, 4.6756, 7.5522, 5.076, 0.213, 6.6638, 0.6577, 0.244, 2.1221, 0.7531, -2.4076, 1.8731, 3.5594], [1.5525, 6.0524, 6.309, 2.6245, 9.229, 4.5213, 5.0913, 7.0622, 1.7992, 8.0962, 4.7994, -0.0248, 7.7168, 5.5878, 3.0883, 6.5248, 4.7895, 6.9974, 4.8787, 0.5445, 6.6686, 0.0102, -0.1659, 2.6195, 0.7389, -2.8956, 1.9928, 3.3777], [1.6407, 6.2104, 6.0331, 2.8076, 9.4074, 3.9772, 5.0574, 7.5316, 1.4201, 8.3035, 5.0212, -0.1031, 7.553, 5.5023, 3.1427, 6.7674, 4.4409, 6.457, 4.525, 0.728, 6.5422, -0.6234, -0.4726, 2.7486, 0.6985, -3.0804, 1.9669, 3.2365], [1.5065, 6.1271, 5.8296, 2.8405, 9.5649, 3.6834, 5.1214, 7.546, 0.9758, 8.3335, 5.1952, -0.1395, 7.4348, 5.6893, 3.2942, 7.0356, 4.1665, 5.9695, 4.3898, 0.8931, 6.3988, -0.8957, -0.7522, 2.8924, 0.6498, -3.4358, 1.8654, 2.9735]], # noqa: E231
device=torch_device,
)
# fmt: on
self.assertTrue(
torch.allclose(prediction_logits[0, 4:8, 128:156], expected_prediction_logits_slice, atol=1e-4)
)
def test_seq_to_seq_generation(self):
MODEL_ID = "google/bigbird-pegasus-large-arxiv"
model = BigBirdPegasusForConditionalGeneration.from_pretrained(MODEL_ID).to(torch_device)
tokenizer = PegasusTokenizer.from_pretrained(MODEL_ID)
ARTICLE_LEP = r"""the lep experiments at the resonance of @xmath1-boson have tested the standard model ( sm ) at quantum level , measuring the @xmath1-decay into fermion pairs with an accuracy of one part in ten thousands . the good agreement of the lep data with the sm predictions have severely constrained the behavior of new physics at the @xmath1-pole . taking these achievements into account one can imagine that the physics of @xmath1-boson will again play the central role in the frontier of particle physics if the next generation @xmath1 factory comes true with the generated @xmath1 events several orders of magnitude higher than that of the lep . this factory can be realized in the gigaz option of the international linear collider ( ilc)@xcite . the ilc is a proposed electron - positron collider with tunable energy ranging from @xmath12 to @xmath13 and polarized beams in its first phase , and the gigaz option corresponds to its operation on top of the resonance of @xmath1 boson by adding a bypass to its main beam line . given the high luminosity , @xmath14 , and the cross section at the resonance of @xmath1 boson , @xmath15 , about @xmath16 @xmath1 events can be generated in an operational year of @xmath17 of gigaz , which implies that the expected sensitivity to the branching ratio of @xmath1-decay can be improved from @xmath18 at the lep to @xmath19 at the gigaz@xcite . in light of this , the @xmath1-boson properties , especially its exotic or rare decays which are widely believed to be sensitive to new physics , should be investigated comprehensively to evaluate their potential in probing new physics . among the rare @xmath1-decays , the flavor changing ( fc ) processes were most extensively studied to explore the flavor texture in new physics @xcite , and it was found that , although these processes are severely suppressed in the sm , their branching ratios in new physics models can be greatly enhanced to @xmath19 for lepton flavor violation decays @xcite and @xmath20 for quark flavor violation decays @xcite . besides the fc processes , the @xmath1-decay into light higgs boson(s ) is another type of rare process that was widely studied , e.g. the decay @xmath21 ( @xmath22 ) with the particle @xmath0 denoting a light higgs boson was studied in @xcite , the decay @xmath23 was studied in the two higgs doublet model ( 2hdm)@xcite and the minimal supersymmetric standard model ( mssm)@xcite , and the decay @xmath4 was studied in a model independent way @xcite , in 2hdm@xcite and also in mssm@xcite . these studies indicate that , in contrast with the kinematic forbidden of these decays in the sm , the rates of these decays can be as large as @xmath18 in new physics models , which lie within the expected sensitivity of the gigaz . in this work , we extend the previous studies of these decays to some new models and investigate these decays altogether . we are motivated by some recent studies on the singlet extension of the mssm , such as the next - to - minimal supersymmetric standard model ( nmssm ) @xcite and the nearly minimal supersymmetric standard model ( nmssm ) @xcite , where a light cp - odd higgs boson @xmath0 with singlet - dominant component may naturally arise from the spontaneous breaking of some approximate global symmetry like @xmath24 or peccei - quuin symmetry @xcite . these non - minimal supersymmetric models can not only avoid the @xmath25-problem , but also alleviate the little hierarchy by having such a light higgs boson @xmath0 @xcite . we are also motivated by that , with the latest experiments , the properties of the light higgs boson are more stringently constrained than before . so it is worth updating the previous studies . so far there is no model - independent lower bound on the lightest higgs boson mass . in the sm , it must be heavier than @xmath26 gev , obtained from the null observation of the higgs boson at lep experiments . however , due to the more complex structure of the higgs sector in the extensions of the sm , this lower bound can be significantly relaxed according to recent studies , e.g. , for the cp - odd higgs boson @xmath0 we have @xmath27 gev in the nmssm @xcite , @xmath28 gev in the nmssm @xcite , and @xmath29 gev in the lepton - specific 2hdm ( l2hdm ) @xcite . with such a light cp - odd higgs boson , the z - decay into one or more @xmath0 is open up . noting that the decay @xmath30 is forbidden due to bose symmetry , we in this work study the rare @xmath1-decays @xmath6 ( @xmath22 ) , @xmath31 and @xmath4 in a comparative way for four models , namely the type - ii 2hdm@xcite , the l2hdm @xcite , the nmssm and the nmssm . in our study , we examine carefully the constraints on the light @xmath0 from many latest experimental results . this work is organized as follows . in sec . ii we briefly describe the four new physics models . in sec . iii we present the calculations of the rare @xmath1-decays . in sec . iv we list the constraints on the four new physics models . in sec . v we show the numerical results for the branching ratios of the rare @xmath1-decays in various models . finally , the conclusion is given in sec . as the most economical way , the sm utilizes one higgs doublet to break the electroweak symmetry . as a result , the sm predicts only one physical higgs boson with its properties totally determined by two free parameters . in new physics models , the higgs sector is usually extended by adding higgs doublets and/or singlets , and consequently , more physical higgs bosons are predicted along with more free parameters involved in . the general 2hdm contains two @xmath32 doublet higgs fields @xmath33 and @xmath34 , and with the assumption of cp - conserving , its scalar potential can be parameterized as@xcite : @xmath35,\end{aligned}\ ] ] where @xmath36 ( @xmath37 ) are free dimensionless parameters , and @xmath38 ( @xmath39 ) are the parameters with mass dimension . after the electroweak symmetry breaking , the spectrum of this higgs sector includes three massless goldstone modes , which become the longitudinal modes of @xmath40 and @xmath1 bosons , and five massive physical states : two cp - even higgs bosons @xmath41 and @xmath42 , one neutral cp - odd higgs particle @xmath0 and a pair of charged higgs bosons @xmath43 . noting the constraint @xmath44 with @xmath45 and @xmath46 denoting the vacuum expectation values ( vev ) of @xmath33 and @xmath34 respectively , we choose @xmath47 as the input parameters with @xmath48 , and @xmath49 being the mixing angle that diagonalizes the mass matrix of the cp - even higgs fields . the difference between the type - ii 2hdm and the l2hdm comes from the yukawa coupling of the higgs bosons to quark / lepton . in the type - ii 2hdm , one higgs doublet @xmath34 generates the masses of up - type quarks and the other doublet @xmath33 generates the masses of down - type quarks and charged leptons ; while in the l2hdm one higgs doublet @xmath33 couples only to leptons and the other doublet @xmath34 couples only to quarks . so the yukawa interactions of @xmath0 to fermions in these two models are given by @xcite @xmath50 with @xmath51 denoting generation index . obviously , in the type - ii 2hdm the @xmath52 coupling and the @xmath53 coupling can be simultaneously enhanced by @xmath54 , while in the l2hdm only the @xmath53 coupling is enhanced by @xmath55 . the structures of the nmssm and the nmssm are described by their superpotentials and corresponding soft - breaking terms , which are given by @xcite @xmath56 where @xmath57 is the superpotential of the mssm without the @xmath25 term , @xmath58 and @xmath59 are higgs doublet and singlet superfields with @xmath60 and @xmath61 being their scalar component respectively , @xmath62 , @xmath63 , @xmath64 , @xmath65 , @xmath66 and @xmath67 are soft breaking parameters , and @xmath68 and @xmath69 are coefficients of the higgs self interactions . with the superpotentials and the soft - breaking terms , one can get the higgs potentials of the nmssm and the nmssm respectively . like the 2hdm , the higgs bosons with same cp property will mix and the mass eigenstates are obtained by diagonalizing the corresponding mass matrices : @xmath70 where the fields on the right hands of the equations are component fields of @xmath71 , @xmath72 and @xmath61 defined by @xmath73 @xmath74 and @xmath75 are respectively the cp - even and cp - odd neutral higgs bosons , @xmath76 and @xmath77 are goldstone bosons eaten by @xmath1 and @xmath78 , and @xmath79 is the charged higgs boson . so both the nmssm and nmssm predict three cp - even higgs bosons , two cp - odd higgs bosons and one pair of charged higgs bosons . in general , the lighter cp - odd higgs @xmath0 in these model is the mixture of the singlet field @xmath80 and the doublet field combination , @xmath81 , i.e. @xmath82 and its couplings to down - type quarks are then proportional to @xmath83 . so for singlet dominated @xmath0 , @xmath84 is small and the couplings are suppressed . as a comparison , the interactions of @xmath0 with the squarks are given by@xcite @xmath85 i.e. the interaction does not vanish when @xmath86 approaches zero . just like the 2hdm where we use the vevs of the higgs fields as fundamental parameters , we choose @xmath68 , @xmath69 , @xmath87 , @xmath88 , @xmath66 and @xmath89 as input parameters for the nmssm@xcite and @xmath68 , @xmath54 , @xmath88 , @xmath65 , @xmath90 and @xmath91 as input parameters for the nmssm@xcite . about the nmssm and the nmssm , three points should be noted . the first is for the two models , there is no explicit @xmath92term , and the effective @xmath25 parameter ( @xmath93 ) is generated when the scalar component of @xmath59 develops a vev . the second is , the nmssm is actually same as the nmssm with @xmath94@xcite , because the tadpole terms @xmath95 and its soft breaking term @xmath96 in the nmssm do not induce any interactions , except for the tree - level higgs boson masses and the minimization conditions . and the last is despite of the similarities , the nmssm has its own peculiarity , which comes from its neutralino sector . in the basis @xmath97 , its neutralino mass matrix is given by @xcite @xmath98 where @xmath99 and @xmath100 are @xmath101 and @xmath102 gaugino masses respectively , @xmath103 , @xmath104 , @xmath105 and @xmath106 . after diagonalizing this matrix one can get the mass eigenstate of the lightest neutralino @xmath107 with mass taking the following form @xcite @xmath108 this expression implies that @xmath107 must be lighter than about @xmath109 gev for @xmath110 ( from lower bound on chargnio mass ) and @xmath111 ( perturbativity bound ) . like the other supersymmetric models , @xmath107 as the lightest sparticle acts as the dark matter in the universe , but due to its singlino - dominated nature , it is difficult to annihilate sufficiently to get the correct density in the current universe . so the relic density of @xmath107 plays a crucial way in selecting the model parameters . for example , as shown in @xcite , for @xmath112 , there is no way to get the correct relic density , and for the other cases , @xmath107 mainly annihilates by exchanging @xmath1 boson for @xmath113 , or by exchanging a light cp - odd higgs boson @xmath0 with mass satisfying the relation @xmath114 for @xmath115 . for the annihilation , @xmath54 and @xmath25 are required to be less than 10 and @xmath116 respectively because through eq.([mass - exp ] ) a large @xmath87 or @xmath25 will suppress @xmath117 to make the annihilation more difficult . the properties of the lightest cp - odd higgs boson @xmath0 , such as its mass and couplings , are also limited tightly since @xmath0 plays an important role in @xmath107 annihilation . the phenomenology of the nmssm is also rather special , and this was discussed in detail in @xcite . in the type - ii 2hdm , l2hdm , nmssm and nmssm , the rare @xmath1-decays @xmath118 ( @xmath22 ) , @xmath3 and @xmath4 may proceed by the feynman diagrams shown in fig.[fig1 ] , fig.[fig2 ] and fig.[fig3 ] respectively . for these diagrams , the intermediate state @xmath119 represents all possible cp - even higgs bosons in the corresponding model , i.e. @xmath41 and @xmath42 in type - ii 2hdm and l2hdm and @xmath41 , @xmath42 and @xmath120 in nmssm and nmssm . in order to take into account the possible resonance effects of @xmath119 in fig.[fig1](c ) for @xmath2 and fig.[fig3 ] ( a ) for @xmath11 , we have calculated all the decay modes of @xmath119 and properly included the width effect in its propagator . as to the decay @xmath121 , two points should be noted . one is , unlike the decays @xmath6 and @xmath11 , this process proceeds only through loops mediated by quarks / leptons in the type - ii 2hdm and l2hdm , and additionally by sparticles in the nmssm and nmssm . so in most cases its rate should be much smaller than the other two . the other is due to cp - invariance , loops mediated by squarks / sleptons give no contribution to the decay@xcite . in actual calculation , this is reflected by the fact that the coupling coefficient of @xmath122 differs from that of @xmath123 by a minus sign ( see eq.([asqsq ] ) ) , and as a result , the squark - mediated contributions to @xmath121 are completely canceled out . with regard to the rare decay @xmath11 , we have more explanations . in the lowest order , this decay proceeds by the diagram shown in fig.[fig3 ] ( a ) , and hence one may think that , as a rough estimate , it is enough to only consider the contributions from fig.[fig3](a ) . however , we note that in some cases of the type - ii 2hdm and l2hdm , due to the cancelation of the contributions from different @xmath119 in fig.[fig3 ] ( a ) and also due to the potentially largeness of @xmath124 couplings ( i.e. larger than the electroweak scale @xmath125 ) , the radiative correction from the higgs - mediated loops may dominate over the tree level contribution even when the tree level prediction of the rate , @xmath126 , exceeds @xmath20 . on the other hand , we find the contribution from quark / lepton - mediated loops can be safely neglected if @xmath127 in the type - ii 2hdm and the l2hdm . in the nmssm and the nmssm , besides the corrections from the higgs- and quark / lepton - mediated loops , loops involving sparticles such as squarks , charginos and neutralinos can also contribute to the decay . we numerically checked that the contributions from squarks and charginos can be safely neglected if @xmath127 . we also calculated part of potentially large neutralino correction ( note that there are totally about @xmath128 diagrams for such correction ! ) and found they can be neglected too . since considering all the radiative corrections will make our numerical calculation rather slow , we only include the most important correction , namely that from higgs - mediated loops , in presenting our results for the four models . one can intuitively understand the relative smallness of the sparticle contribution to @xmath11 as follows . first consider the squark contribution which is induced by the @xmath129 interaction ( @xmath130 denotes the squark in chirality state ) and the @xmath131 interaction through box diagrams . because the @xmath132 interaction conserves the chirality of the squarks while the @xmath133 interaction violates the chirality , to get non - zero contribution to @xmath11 from the squark loops , at least four chiral flippings are needed , with three of them provided by @xmath131 interaction and the rest provided by the left - right squark mixing . this means that , if one calculates the amplitude in the chirality basis with the mass insertion method , the amplitude is suppressed by the mixing factor @xmath134 with @xmath135 being the off diagonal element in squark mass matrix . next consider the chargino / neutralino contributions . since for a light @xmath0 , its doublet component , parameterized by @xmath84 in eq.([mixing ] ) , is usually small , the couplings of @xmath0 with the sparticles will never be tremendously large@xcite . so the chargino / neutralino contributions are not important too . in our calculation of the decays , we work in the mass eigenstates of sparticles instead of in the chirality basis . for the type - ii 2hdm and the l2hdm , we consider the following constraints @xcite : * theoretical constraints on @xmath136 from perturbativity , unitarity and requirements that the scalar potential is finit at large field values and contains no flat directions @xcite , which imply that @xmath137 * the constraints from the lep search for neutral higgs bosons . we compute the signals from the higgs - strahlung production @xmath138 ( @xmath139 ) with @xmath140 @xcite and from the associated production @xmath141 with @xmath142 @xcite , and compare them with the corresponding lep data which have been inputted into our code . we also consider the constraints from @xmath138 by looking for a peak of @xmath143 recoil mass distribution of @xmath1-boson @xcite and the constraint of @xmath144 mev when @xmath145 @xcite . + these constraints limit the quantities such as @xmath146 \times br ( h_i \to \bar{b } b ) $ ] on the @xmath147 plane with the the subscript @xmath148 denoting the coupling coefficient of the @xmath149 interaction . they also impose a model - dependent lower bound on @xmath150 , e.g. , @xmath151 for the type - ii 2hdm ( from our scan results ) , @xmath152 for the l2hdm@xcite , and @xmath153 for the nmssm @xcite . these bounds are significantly lower than that of the sm , i.e. @xmath154 , partially because in new physics models , unconventional decay modes of @xmath155 such as @xmath156 are open up . as to the nmssm , another specific reason for allowing a significantly lighter cp - even higgs boson is that the boson may be singlet - dominated in this model . + with regard to the lightest cp - odd higgs boson @xmath0 , we checked that there is no lower bound on its mass so long as the @xmath157 interaction is weak or @xmath155 is sufficiently heavy . * the constraints from the lep search for a light higgs boson via the yukawa process @xmath158 with @xmath22 and @xmath61 denoting a scalar @xcite . these constraints can limit the @xmath159 coupling versus @xmath160 in new physics models . * the constraints from the cleo - iii limit on @xmath161 and the latest babar limits on @xmath162 . these constraints will put very tight constraints on the @xmath163 coupling for @xmath164 . in our analysis , we use the results of fig.8 in the second paper of @xcite to excluded the unfavored points . * the constraints from @xmath165 couplings . since the higgs sector can give sizable higher order corrections to @xmath165 couplings , we calculate them to one loop level and require the corrected @xmath165 couplings to lie within the @xmath166 range of their fitted value . the sm predictions for the couplings at @xmath1-pole are given by @xmath167 and @xmath168 @xcite , and the fitted values are given by @xmath169 and @xmath170 , respectively@xcite . we adopt the formula in @xcite to the 2hdm in our calculation . * the constraints from @xmath171 leptonic decay . we require the new physics correction to the branching ratio @xmath172 to be in the range of @xmath173 @xcite . we use the formula in @xcite in our calculation . + about the constraints ( 5 ) and ( 6 ) , two points should be noted . one is all higgs bosons are involved in the constraints by entering the self energy of @xmath171 lepton , the @xmath174 vertex correction or the @xmath175 vertex correction , and also the box diagrams for @xmath176@xcite . since the yukawa couplings of the higgs bosons to @xmath171 lepton get enhanced by @xmath54 and so do the corrections , @xmath54 must be upper bounded for given spectrum of the higgs sector . generally speaking , the lighter @xmath0 is , the more tightly @xmath54 is limited@xcite . the other point is in the type - ii 2hdm , @xmath177 , b - physics observables as well as @xmath178 decays discussed above can constraint the model in a tighter way than the constraints ( 5 ) and ( 6 ) since the yukawa couplings of @xmath171 lepton and @xmath179 quark are simultaneously enhanced by @xmath54 . but for the l2hdm , because only the yukawa couplings of @xmath171 lepton get enhanced ( see eq.[yukawa ] ) , the constraints ( 5 ) and ( 6 ) are more important in limiting @xmath54 . * indirect constraints from the precision electroweak observables such as @xmath180 , @xmath181 and @xmath182 , or their combinations @xmath183 @xcite . we require @xmath184 to be compatible with the lep / sld data at @xmath185 confidence level@xcite . we also require new physics prediction of @xmath186 is within the @xmath187 range of its experimental value . the latest results for @xmath188 are @xmath189 ( measured value ) and @xmath190 ( sm prediction ) for @xmath191 gev @xcite . in our code , we adopt the formula for these observables presented in @xcite to the type - ii 2hdm and the l2hdm respectively . + in calculating @xmath180 , @xmath181 and @xmath182 , we note that these observables get dominant contributions from the self energies of the gauge bosons @xmath1 , @xmath192 and @xmath193 . since there is no @xmath194 coupling or @xmath195 coupling , @xmath0 must be associated with the other higgs bosons to contribute to the self energies . so by the uv convergence of these quantities , one can infer that , for the case of a light @xmath0 and @xmath196 , these quantities depend on the spectrum of the higgs sector in a way like @xmath197 at leading order , which implies that a light @xmath0 can still survive the constraints from the precision electroweak observables given the splitting between @xmath150 and @xmath198 is moderate@xcite . * the constraints from b physics observables such as the branching ratios for @xmath199 , @xmath200 and @xmath201 , and the mass differences @xmath202 and @xmath203 . we require their theoretical predications to agree with the corresponding experimental values at @xmath187 level . + in the type - ii 2hdm and the l2hdm , only the charged higgs boson contributes to these observables by loops , so one can expect that @xmath198 versus @xmath54 is to be limited . combined analysis of the limits in the type - ii 2hdm has been done by the ckmfitter group , and the lower bound of @xmath204 as a function of @xmath87 was given in fig.11 of @xcite . this analysis indicates that @xmath198 must be heavier than @xmath205 at @xmath185 c.l . regardless the value of @xmath54 . in this work , we use the results of fig.11 in @xcite to exclude the unfavored points . as for the l2hdm , b physics actually can not put any constraints@xcite because in this model the couplings of the charged higgs boson to quarks are proportional to @xmath206 and in the case of large @xmath54 which we are interested in , they are suppressed . in our analysis of the l2hdm , we impose the lep bound on @xmath198 , i.e. @xmath207@xcite . * the constraints from the muon anomalous magnetic moment @xmath208 . now both the theoretical prediction and the experimental measured value of @xmath208 have reached a remarkable precision , but a significant deviation still exists : @xmath209 @xcite . in the 2hdm , @xmath208 gets additional contributions from the one - loop diagrams induced by the higgs bosons and also from the two - loop barr - zee diagrams mediated by @xmath0 and @xmath155@xcite . if the higgs bosons are much heavier than @xmath25 lepton mass , the contributions from the barr - zee diagrams are more important , and to efficiently alleviate the discrepancy of @xmath208 , one needs a light @xmath0 along with its enhanced couplings to @xmath25 lepton and also to heavy fermions such as bottom quark and @xmath171 lepton to push up the effects of the barr - zee diagram@xcite . the cp - even higgs bosons are usually preferred to be heavy since their contributions to @xmath208 are negative . + in the type - ii 2hdm , because @xmath54 is tightly constrained by the process @xmath210 at the lep@xcite and the @xmath178 decay@xcite , the barr - zee diagram contribution is insufficient to enhance @xmath208 to @xmath187 range around its measured value@xcite . so in our analysis , we require the type - ii 2hdm to explain @xmath208 at @xmath211 level . while for the l2hdm , @xmath54 is less constrained compared with the type - ii 2hdm , and the barr - zee diagram involving the @xmath171-loop is capable to push up greatly the theoretical prediction of @xmath208@xcite . therefore , we require the l2hdm to explain the discrepancy at @xmath187 level . + unlike the other constraints discussed above , the @xmath208 constraint will put a two - sided bound on @xmath54 since on the one hand , it needs a large @xmath54 to enhance the barr - zee contribution , but on the other hand , too large @xmath54 will result in an unacceptable large @xmath208 . * since this paper concentrates on a light @xmath0 , the decay @xmath212 is open up with a possible large decay width . we require the width of any higgs boson to be smaller than its mass to avoid a too fat higgs boson@xcite . we checked that for the scenario characterized by @xmath213 , the coefficient of @xmath214 interaction is usually larger than the electroweak scale @xmath125 , and consequently a large decay width is resulted . for the nmssm and nmssm , the above constraints become more complicated because in these models , not only more higgs bosons are involved in , but also sparticles enter the constraints . so it is not easy to understand some of the constraints intuitively . take the process @xmath199 as an example . in the supersymmetric models , besides the charged higgs contribution , chargino loops , gluino loops as well as neutralino loops also contribute to the process@xcite , and depending on the susy parameters , any of these contributions may become dominated over or be canceled by other contributions . as a result , although the charged higgs affects the process in the same way as that in the type - ii 2hdm , charged higgs as light as @xmath215 is still allowed even for @xmath216@xcite . since among the constraints , @xmath208 is rather peculiar in that it needs new physics to explain the discrepancy between @xmath217 and @xmath218 , we discuss more about its dependence on susy parameters . in the nmssm and the nmssm , @xmath208 receives contributions from higgs loops and neutralino / chargino loops . for the higgs contribution , it is quite similar to that of the type - ii 2hdm except that more higgs bosons are involved in@xcite . for the neutralino / chargino contribution , in the light bino limit ( i.e. @xmath219 ) , it can be approximated by@xcite @xmath220 for @xmath221 with @xmath222 being smuon mass . so combining the two contributions together , one can learn that a light @xmath0 along with large @xmath54 and/or light smuon with moderate @xmath87 are favored to dilute the discrepancy . because more parameters are involved in the constraints on the supersymmetric models , we consider following additional constraints to further limit their parameters : * direct bounds on sparticle masses from the lep1 , the lep2 and the tevatron experiments @xcite . * the lep1 bound on invisible z decay @xmath223 ; the lep2 bound on neutralino production @xmath224 and @xmath225@xcite . * dark matter constraints from the wmap relic density 0.0975 @xmath226 0.1213 @xcite . note that among the above constraints , the constraint ( 2 ) on higgs sector and the constraint ( c ) on neutralino sector are very important . this is because in the supersymmetric models , the sm - like higgs is upper bounded by about @xmath227 at tree level and by about @xmath228 at loop level , and that the relic density restricts the lsp annihilation cross section in a certain narrow range . in our analysis of the nmssm , we calculate the constraints ( 3 ) and ( 5 - 7 ) by ourselves and utilize the code nmssmtools @xcite to implement the rest constraints . we also extend nmssmtools to the nmssm to implement the constraints . for the extension , the most difficult thing we faced is how to adapt the code micromegas@xcite to the nmssm case . we solve this problem by noting the following facts : * as we mentioned before , the nmssm is actually same as the nmssm with the trilinear singlet term setting to zero . so we can utilize the model file of the nmssm as the input of the micromegas and set @xmath229 . * since in the nmssm , the lsp is too light to annihilate into higgs pairs , there is no need to reconstruct the effective higgs potential to calculate precisely the annihilation channel @xmath230 with @xmath61 denoting any of higgs bosons@xcite . we thank the authors of the nmssmtools for helpful discussion on this issue when we finish such extension@xcite . with the above constraints , we perform four independent random scans over the parameter space of the type - ii 2hdm , the l2hdm , the nmssm and the nmssm respectively . we vary the parameters in following ranges : @xmath231 for the type - ii 2hdm , @xmath232 for the l2hdm , @xmath233 for the nmssm , and @xmath234 for the nmssm . in performing the scans , we note that for the nmssm and the nmssm , some constraints also rely on the gaugino masses and the soft breaking parameters in the squark sector and the slepton sector . since these parameters affect little on the properties of @xmath0 , we fix them to reduce the number of free parameters in our scan . for the squark sector , we adopt the @xmath235 scenario which assumes that the soft mass parameters for the third generation squarks are degenerate : @xmath236 800 gev , and that the trilinear couplings of the third generation squarks are also degenerate , @xmath237 with @xmath238 . for the slepton sector , we assume all the soft - breaking masses and trilinear parameters to be 100 gev . this setting is necessary for the nmssm since this model is difficult to explain the muon anomalous moment at @xmath239 level for heavy sleptons@xcite . finally , we assume the grand unification relation @xmath240 for the gaugino masses with @xmath241 being fine structure constants of the different gauge group . with large number of random points in the scans , we finally get about @xmath242 , @xmath243 , @xmath244 and @xmath242 samples for the type - ii 2hdm , the l2hdm , the nmssm and the nmssm respectively which survive the constraints and satisfy @xmath245 . analyzing the properties of the @xmath0 indicates that for most of the surviving points in the nmssm and the nmssm , its dominant component is the singlet field ( numerically speaking , @xmath246 ) so that its couplings to the sm fermions are suppressed@xcite . our analysis also indicates that the main decay products of @xmath0 are @xmath247 for the l2hdm@xcite , @xmath248 ( dominant ) and @xmath247 ( subdominant ) for the type - ii 2hdm , the nmssm and the nmssm , and in some rare cases , neutralino pairs in the nmssm@xcite . in fig.[fig4 ] , we project the surviving samples on the @xmath249 plane . this figure shows that the allowed range of @xmath54 is from @xmath250 to @xmath251 in the type - ii 2hdm , and from @xmath252 to @xmath253 in the l2hdm . just as we introduced before , the lower bounds of @xmath254 come from the fact that we require the models to explain the muon anomalous moment , while the upper bound is due to we have imposed the constraint from the lep process @xmath255 , which have limited the upper reach of the @xmath256 coupling for light @xmath61 @xcite(for the dependence of @xmath256 coupling on @xmath54 , see sec . this figure also indicates that for the nmssm and the nmssm , @xmath54 is upper bounded by @xmath257 . for the nmssm , this is because large @xmath87 can suppress the dark matter mass to make its annihilation difficult ( see @xcite and also sec . ii ) , but for the nmssm , this is because we choose a light slepton mass so that large @xmath54 can enhance @xmath208 too significantly to be experimentally unacceptable . we checked that for the slepton mass as heavy as @xmath258 , @xmath259 is still allowed for the nmssm . in fig.[fig5 ] and fig.[fig6 ] , we show the branching ratios of @xmath260 and @xmath261 respectively . fig.[fig5 ] indicates , among the four models , the type - ii 2hdm predicts the largest ratio for @xmath260 with its value varying from @xmath262 to @xmath263 . the underlying reason is in the type - ii 2hdm , the @xmath264 coupling is enhanced by @xmath54 ( see fig.[fig4 ] ) , while in the other three model , the coupling is suppressed either by @xmath265 or by the singlet component of the @xmath0 . fig.[fig6 ] shows that the l2hdm predicts the largest rate for @xmath266 with its value reaching @xmath5 in optimum case , and for the other three models , the ratio of @xmath261 is at least about one order smaller than that of @xmath267 . this feature can be easily understood from the @xmath268 coupling introduced in sect . we emphasize that , if the nature prefers a light @xmath0 , @xmath260 and/or @xmath269 in the type - ii 2hdm and the l2hdm will be observable at the gigaz . then by the rates of the two decays , one can determine whether the type - ii 2hdm or the l2hdm is the right theory . on the other hand , if both decays are observed with small rates or fail to be observed , the singlet extensions of the mssm are favored . in fig.[fig7 ] , we show the rate of @xmath3 as the function of @xmath270 . this figure indicates that the branching ratio of @xmath121 can reach @xmath271 , @xmath272 , @xmath273 and @xmath274 for the optimal cases of the type - ii 2hdm , the l2hdm , the nmssm and the nmssm respectively , which implies that the decay @xmath121 will never be observable at the gigaz if the studied model is chosen by nature . the reason for the smallness is , as we pointed out before , that the decay @xmath121 proceeds only at loop level . comparing the optimum cases of the type - ii 2hdm , the nmssm and the nmssm shown in fig.5 - 7 , one may find that the relation @xmath275 holds for any of the decays . this is because the decays are all induced by the yukawa couplings with similar structure for the models . in the supersymmetric models , the large singlet component of the light @xmath0 is to suppress the yukawa couplings , and the @xmath0 in the nmssm has more singlet component than that in the nmssm . next we consider the decay @xmath11 , which , unlike the above decays , depends on the higgs self interactions . in fig.[fig8 ] we plot its rate as a function of @xmath270 and this figure indicates that the @xmath276 may be the largest among the ratios of the exotic @xmath1 decays , reaching @xmath277 in the optimum cases of the type - ii 2hdm , the l2hdm and the nmssm . the underlying reason is , in some cases , the intermediate state @xmath119 in fig.[fig3 ] ( a ) may be on - shell . in fact , we find this is one of the main differences between the nmssm and the nmssm , that is , in the nmssm , @xmath119 in fig.[fig3 ] ( a ) may be on - shell ( corresponds to the points with large @xmath278 ) while in the nmssm , this seems impossible . so we conclude that the decay @xmath11 may serve as an alternative channel to test new physics models , especially it may be used to distinguish the nmssm from the nmssm if the supersymmetry is found at the lhc and the @xmath11 is observed at the gigaz with large rate . before we end our discussion , we note that in the nmssm , the higgs boson @xmath0 may be lighter than @xmath279 without conflicting with low energy data from @xmath178 decays and the other observables ( see fig.[fig4]-[fig8 ] ) . in this case , @xmath0 is axion - like as pointed out in @xcite . we checked that , among the rare @xmath1 decays discussed in this paper , the largest branching ratio comes from @xmath280 which can reach @xmath281 . since in this case , the decay product of @xmath0 is highly collinear muon pair , detecting the decay @xmath280 may need some knowledge about detectors , which is beyond our discussion . in this paper , we studied the rare @xmath1-decays @xmath2 ( @xmath7 ) , @xmath282 and @xmath4 in the type - ii 2hdm , lepton - specific 2hdm , nmssm and nmssm , which predict a light cp - odd higgs boson @xmath0 . in the parameter space allowed by current experiments , the branching ratio can be as large as @xmath5 for @xmath118 , @xmath8 for @xmath3 and @xmath9 for @xmath4 , which implies that the decays @xmath2 and @xmath283 may be accessible at the gigaz option . since different models predict different size of branching ratios , these decays can be used to distinguish different model through the measurement of these rare decays . this work was supported in part by hastit under grant no . 2009hastit004 , by the national natural science foundation of china ( nnsfc ) under grant nos . 10821504 , 10725526 , 10635030 , 10775039 , 11075045 and by the project of knowledge innovation program ( pkip ) of chinese academy of sciences under grant no . . for some reviews , see , e.g. , m. a. perez , g. tavares - velasco and j. j. toscano , int . j. mod . a * 19 * , 159 ( 2004 ) ; j. m. yang , arxiv:1006.2594 . j. i. illana , m. masip , 67 , 035004 ( 2003 ) ; j. cao , z. xiong , j. m. yang , 32 , 245 ( 2004 ) . d. atwood _ et al_. , 66 , 093005 ( 2002 ) . j. kalinowski , and s. pokorski , 219 , 116 ( 1989 ) ; a. djouadi , p. m. zerwas and j. zunft , 259 , 175 ( 1991 ) ; a. djouadi , j. kalinowski , and p. m. zerwas , z. phys . c * 54 * , 255 ( 1992 ) . m. krawczyk , _ et al . _ , 19 , 463 ( 2001 ) ; 8 , 495 ( 1999 ) . j. f. gunion , g. gamberini and s. f. novaes , 38 , 3481 ( 1988 ) ; thomas j. weiler and tzu - chiang yuan , 318 , 337 ( 1989 ) ; a. djouadi , _ et al . _ , 1 , 163 ( 1998)[hep - ph/9701342 ] . d. chang and w. y. keung , phys . lett . * 77 * , 3732 ( 1996 ) . e. keith and e. ma , 57 , 2017 ( 1998 ) ; m. a. perez , g. tavares - velasco and j. j. toscano , int . j. mod.phys . a * 19 * , 159 ( 2004 ) . f. larios , g. tavares - velasco and c. p. yuan , 64 , 055004 ( 2001 ) ; 66 , 075006 ( 2002 ) . a. djouadi , _ et al . _ , 10 , 27 ( 1999 ) [ hep - ph/9903229 ] . for a detailed introduction of the nmssm , see f. franke and h. fraas , int . j. mod . a * 12 * ( 1997 ) 479 ; for a recent review of the nmssm , see for example , u. ellwanger , c. hugonie , and a. m. teixeira , arxiv : 0910.1785 . see , e.g. , j. r. ellis , j. f. gunion , h. e. haber , l. roszkowski and f. zwirner , phys . rev . d * 39 * ( 1989 ) 844 ; m. drees , int . j. mod . phys . a * 4 * ( 1989 ) 3635 ; u. ellwanger , m. rausch de traubenberg and c. a. savoy , phys . b * 315 * ( 1993 ) 331 ; nucl . b * 492 * ( 1997 ) 21 ; d.j . miller , r. nevzorov , p.m. zerwas , 681 , 3 ( 2004 ) . c. panagiotakopoulos , k. tamvakis , 446 , 224 ( 1999 ) ; 469 , 145 ( 1999 ) ; c. panagiotakopoulos , a. pilaftsis , 63 , 055003 ( 2001 ) ; a. dedes , _ et al . _ , 63 , 055009 ( 2001 ) ; a. menon , _ et al . _ , 70 , 035005 ( 2004 ) ; v. barger , _ et al . _ , 630 , 85 ( 2005 ) . c. balazs , _ et al . _ , 0706 , 066 ( 2007 ) . b. a. dobrescu , k. t. matchev , 0009 , 031 ( 2000 ) ; a. arhrib , k. cheung , t. j. hou , k. w. song , hep - ph/0611211 ; 0703 , 073 ( 2007 ) ; x. g. he , j. tandean , and g. valencia , 98 , 081802 ( 2007 ) ; 0806 , 002 ( 2008 ) ; f. domingo _ et al_. , 0901 , 061 ( 2009 ) ; gudrun hiller , 70 , 034018 ( 2004 ) ; r. dermisek , and john f. gunion , 75 , 075019 ( 2007 ) ; 79 , 055014 ( 2009 ) ; 81 , 055001 ( 2010 ) ; r. dermisek , john f. gunion , and b. mcelrath , 76 , 051105 ( 2007 ) ; z. heng , _ et al_. , 77 , 095012 ( 2008 ) ; a. belyaev _ et al_. , 81 , 075021 ( 2010 ) ; d. das and u. ellwanger , arxiv:1007.1151 [ hep - ph ] . s. andreas , o. lebedev , s. ramos - sanchez and a. ringwald , arxiv:1005.3978 [ hep - ph ] . j. f. gunion , jhep * 0908 * , 032 ( 2009 ) ; r. dermisek and j. f. gunion , phys . rev . d * 81 * , 075003 ( 2010 ) . r. dermisek and j. f. gunion , phys . lett . * 95 * , 041801 ( 2005 ) ; phys . d * 73 * , 111701 ( 2006 ) . j. cao , h. e. logan , j. m. yang , 79 , 091701 ( 2009 ) . j. cao , p. wan , l. wu , j. m. yang , 80 , 071701 ( 2009 ) . j. f. gunion and h. e. haber , 67 , 075019 ( 2003 ) . r. m. barnett , _ et al . _ , phys . b * 136 * , 191 ( 1984 ) ; r. m. barnett , g. senjanovic and d. wyler , phys . d * 30 * , 1529 ( 1984 ) ; y. grossman , nucl . b * 426 * , 355 ( 1994 ) . h. s. goh , l. j. hall and p. kumar , jhep * 0905 * , 097 ( 2009 ) ; a. g. akeroyd and w. j. stirling , nucl . b * 447 * , 3 ( 1995 ) ; a. g. akeroyd , phys . b * 377 * , 95 ( 1996 ) ; h. e. logan and d. maclennan , phys . rev . d * 79 * , 115022 ( 2009 ) ; m. aoki , _ et al . _ , arxiv:0902.4665 [ hep - ph ] . v. barger , p. langacker , h. s. lee and g. shaughnessy , phys . d * 73 * , 115010 ( 2006 ) . s. hesselbach , _ et . _ , arxiv:0810.0511v2 [ hep - ph ] . de vivie and p. janot [ aleph collaboration ] , pa13 - 027 contribution to the international conference on high energy physics , warsaw , poland , 2531 july 1996 ; j. kurowska , o. grajek and p. zalewski [ delphi collaboration ] , cern - open-99 - 385 . [ aleph collaboration and delphi collaboration and l3 collaboration ] , phys . rept . * 427 * , 257 ( 2006 ) . j. cao and j. m. yang , jhep * 0812 * , 006 ( 2008 ) . m. krawczyk and d. temes , eur . j. c * 44 * , 435 ( 2005 ) . g. altarelli and r. barbieri , 253 , 161 ( 1991 ) ; m. e. peskin , t. takeuchi , 46 , 381 ( 1992 ) . c. amsler , _ et al . _ , ( particle data group ) , 667 , 1 ( 2008 ) . o. deschamps , s. descotes - genon , s. monteil , v. niess , s. tjampens and v. tisserand , arxiv:0907.5135 [ hep - ph ] . s. su and b. thomas , phys . d * 79 * , 095014 ( 2009 ) . g. abbiendi , _ et al . _ , eur . phys . j. c * 32 * , 453 ( 2004 ) . m. davier , _ et al . _ , 66 , 1 ( 2010 ) . k. cheung , _ et al . _ , phys . d * 64 * , 111301 ( 2001 ) . k. cheung and o. c. w. kong , phys . d * 68 * , 053003 ( 2003 ) . t. besmer , c. greub , t.hurth , 609 , 359 ( 2001 ) ; f. borzumati , _ et al . _ , 62 , 075005(2000 ) . j. cao , k. i. hikasa , w. wang , j. m. yang and l. x. yu , phys . d * 82 * , 051701 ( 2010 ) [ arxiv:1006.4811 [ hep - ph ] ] . j. f. gunion , _ et . d * 73 * , 015011 ( 2006 ) . martin and j. d. wells , phys . d * 64 * , 035003 ( 2001 ) . j. abdallah _ et al . _ , eur . j. c * 31 * , 421 ( 2004 ) ; g. abbiendi _ et al . _ , eur . j. c * 35 * , 1 ( 2004 ) . j. dunkley _ et al . _ [ wmap collaboration ] , astrophys . j. suppl . * 180 * , 306 ( 2009 ) [ arxiv:0803.0586 [ astro - ph ] ] . u. ellwanger _ et al . _ , 02 , 066 ( 2005 ) . g. belanger , f. boudjema , a. pukhov and a. semenov , comput . commun . * 174 * , 577 ( 2006 ) ; comput . phys . commun . * 176 * , 367 ( 2007 ) . g. belanger , f. boudjema , c. hugonie , a. pukhov and a. semenov , jcap * 0509 * , 001 ( 2005 ) ."""
ARTICLE_MAGNET = r"""it is well known that the classical magnetoresistance ( mr ) in metals or semiconductors with a closed free electron fermi surface increases quadratically with increasing magnetic field @xmath2 for @xmath3 and saturates when @xmath4 . here @xmath5 is the zero - magnetic - field mobility . hence , the extraordinarily high and linear mr ( lmr ) , which breaks this familiar rule , has been gaining much attention as soon as its discovery . in the past decade , this unexpected lmr has been reported in silver chalcogenide,@xcite indium antimonide,@xcite silicon,@xcite mnas - gaas composite material,@xcite and graphene.@xcite kapitza s linear law@xcite indicates that the metal shows a magnetoresistance linear in perpendicular magnetic field when it has an open fermi surface and a mean free path longer than the electronic larmor radius . recently , another two models , irrespective of the open fermi surface , have been constructed to provide possible mechanisms for the lmr phenomenon . abrikosov suggested a quantum - limit origin of lmr for the homogenous system with a gapless linear energy spectrum.@xcite his model requires that landau levels are well formed and the carrier concentration is small that all electrons occupy only the lowest landau band . alternatively , parish and littlewood developed a classical model without involving linear spectrum.@xcite ignoring the concrete microscopic mechanism , they attributed this unusual mr to the mobility fluctuations in a strongly inhomogenous system . topological insulators@xcite ( tis ) are novel materials with a full energy gap in bulk , while there are gapless surface states . due to its unique band structure with only one helical dirac cone and linear energy dispersion,@xcite the surface states of the ti bi@xmath0se@xmath1 become an excellent platform for the study of quantum - limit lmr . the recent experiment in this flat surface system , however , reported that a large positive mr , which becomes very linear above a characteristic field of @xmath6@xmath7@xmath8 t , was observed even in an opposite situation where the carrier sheet density is high that electrons occupy more than one landau levels.@xcite moreover , they found that raising temperature to room temperature almost has no influence on the observed lmr . it is striking that this observation is in conflict with abrikosov s model and also with the classical parish - littlewood model . so far a reliable theoretical scheme capable of explaining this novel experiment has still been lacking . in this paper , we generalize the balance - equation approach@xcite to a system modeling the surface states of a three - dimensional ti to investigate the two - dimensional magnetotransport in it . we find that a positive , nonsaturating and dominantly linear magnetoresistance can appear within quite wide magnetic - field range in the ti surface state having a positive and finite effective g - factor . this linear magnetoresistance shows up in the system of high carrier concentration and low mobility when electrons are in extended states and spread over many smeared landau levels , and persists up to room temperature , providing a possible mechanism for the recently observed linear magnetoresistance in topological insulator bi@xmath0se@xmath1 nanoribbons.@xcite we consider the surface state of a bi@xmath0se@xmath1-type large bulk gap ti in the @xmath9-@xmath10 plane under the influence of a uniform magnetic field @xmath11 applied along the @xmath12 direction.@xcite following the experimental observation,@xcite we assume that the fermi energy locates in the gap of the bulk band and above the dirac point , i.e. the surface carriers are electrons . further , the separations of the fermi energy from the bottom of bulk band and dirac point are much larger than the highest temperature ( @xmath13 ) considered in this work . hence , the contribution from the bulk band to the magnetotransport is negligible . these electrons , scattered by randomly distributed impurities and by phonons , are driven by a uniform in - plane electric field @xmath14 in the topological surface . the hamiltonian of this many - electron and phonon system consists of an electron part @xmath15 , a phonon part @xmath16 , and electron - impurity and electron - phonon interactions @xmath17 and @xmath18 : @xmath19 here , the electron hamiltonian is taken in the form @xmath20 , \ ] ] in which @xmath21 , @xmath22 , @xmath23 and @xmath24 , stand , respectively , for the canonical momentum , coordinate , momentum and spin operators of the @xmath25th electron having charge @xmath26 , @xmath27 is the vector potential of the perpendicular magnetic field @xmath28 in the landau gauge , @xmath29 is the fermi velocity , @xmath30 is the effective g - factor of the surface electron , and @xmath31 is the bohr magneton with @xmath32 the free electron mass . the sum index @xmath25 in eq.([helectron ] ) goes over all electrons of total number @xmath33 in the surface state of unit area . in the frame work of balance equation approach,@xcite the two - dimensional center - of - mass ( c.m . ) momentum and coordinate @xmath34 and @xmath35 , and the relative - electron momenta and coordinates @xmath36 and @xmath37 are introduced to write the hamiltonian @xmath15 into the sum of a single - particle c.m . part @xmath38 and a many - particle relative - electron part @xmath39 : @xmath40 , with @xmath41.\end{aligned}\ ] ] in this , @xmath42 is the canonical momentum of the center - of - mass and @xmath43 is the canonical momentum for the @xmath25th relative electron . here we have also introduced c.m . spin operators @xmath44 and @xmath45 . the commutation relations between the c.m . spin operators @xmath46 and @xmath47 and the spin operators @xmath48 , @xmath49 and @xmath50 of the @xmath25th electron are of order of @xmath51 : @xmath52= n^{-1}2\,{\rm i}\,\varepsi lon_{\beta_1\beta_2\beta_3}\sigma_j^{\beta_3}$ ] with @xmath53 . therefore , for a macroscopic large @xmath33 system , the c.m . part @xmath38 actually commutes with the relative - electron part @xmath54 in the hamiltonian , i.e. the c.m . motion and the relative motion of electrons are truly separated from each other . the couplings between the two emerge only through the electron impurity and electron phonon interactions . furthermore , the electric field @xmath55 shows up only in @xmath38 . and , in view of @xmath56={\rm i}\delta_{\alpha \beta}(\delta_{ij}-1/n)\simeq { \rm i}\delta_{\alpha\beta}\delta_{ij}$ ] , i.e. the relative - electron momenta and coordinates can be treated as canonical conjugate variables , the relative - motion part @xmath54 is just the hamiltonian of @xmath33 electrons in the surface state of ti in the magnetic field without the presence of the electric field . in terms of the c.m . coordinate @xmath57 and the relative electron density operator @xmath58 , the electron impurity and electron phonon interactions can be written as@xcite @xmath59 here @xmath60 and @xmath61 are respectively the impurity potential ( an impurity at randomly distributed position @xmath62 ) and electron phonon coupling matrix element in the plane - wave representation , and @xmath63 with @xmath64 and @xmath65 being the creation and annihilation operators for a phonon of wavevector @xmath66 in branch @xmath67 having frequency @xmath68 . velocity ( operator ) @xmath69 is the time variation of its coordinate : @xmath70= v_{\rm f}(\sigma_{\rm c}^y\ , \hat{i}-\sigma_{\rm c}^x\ , \hat{j})$ ] . to derive a force - balance equation for steady state transport we consider the heisenberg equation for the rate of change of the c.m . canonical momentum @xmath71 : @xmath72= - n e({\bm v}\times { \bm b})- n e{\bm e}+{\bm { f}}_{\rm i}+{\bm { f}}_{\rm p},\ ] ] in which the frictional forces @xmath73 and @xmath74 share the same expressions as given in ref .. the statistical average of the operator equation can be determined to linear order in the electron impurity and electron phonon interactions @xmath17 and @xmath18 with the initial density matrix @xmath75 at temperature @xmath76 when the in - plane electric field @xmath77 is not strong . for steady - transport states we have @xmath78 , leading to a force - balance equation of the form @xmath79 here @xmath80 , the statistically averaged velocity of the moving center - of - mass , is identified as the average rate of change of its position , i.e. the drift velocity of the electron system driven by the electric field @xmath77 , and @xmath81 and @xmath82 are frictional forces experienced by the center - of - mass due to impurity and phonon scatterings : @xmath83,\label{fp}\end{aligned}\ ] ] in which @xmath84 is the bose distribution function , @xmath85 , and @xmath86 stands for the imaginary part of the fourier spectrum of the relative - electron density correlation function defined by @xmath87\big\rangle_{0},\ ] ] where @xmath88 and @xmath89 denotes the statistical averaging over the initial density matrix @xmath90.@xcite the force - balance equation describes the steady - state two - dimensional magnetotransport in the surface state of a ti . note that the frictional forces @xmath81 and @xmath82 are in the opposite direction of the drift velocity @xmath91 and their magnitudes are functions of @xmath92 only . with the drift velocity @xmath93 in the @xmath9 direction , the force - balance equation eq . yields a transverse resistivity @xmath94 , and a longitudinal resistivity @xmath95 . the linear one is in the form @xmath96 for calculating the electron density correlation function @xmath97 we proceed in the landau representation.@xcite the landau levels of the single - particle hamiltonian @xmath98 of the relative - electron system in the absence of electric field are composed of a positive `` @xmath99 '' and a negative `` @xmath100 '' branch@xcite @xmath101 with @xmath102 and @xmath103 , and a zero ( @xmath104 ) level @xmath105 the corresponding landau wave functions are @xmath106 and @xmath107 for @xmath108 ; and @xmath109 for @xmath104 . here @xmath110 is the wavevector of the system along @xmath9 direction ; @xmath111 with @xmath112 ; and @xmath113 is the harmonic oscillator eigenfunction with @xmath114 being the hermite polynomial , @xmath115 , and @xmath116 . each landau level contains @xmath117 electron states for system of unit surface area . the positive branch @xmath118 and the @xmath104 level @xmath119 of the above energy spectra are indeed quite close to those of the surface states in the bulk gap of bi@xmath0se@xmath1-family materials derived from microscopic band calculation.@xcite the landau levels are broadened due to impurity , phonon and electron - electron scatterings . we model the imaginary part of the retarded green s function , or the density - of - states , of the broadened landau level @xmath120 ( written for `` + ' ' -branch and @xmath104 levels ) , using a gaussian - type form:@xcite @xmath121,\ ] ] with a half - width @xmath122 of the form:@xcite @xmath123^{1/2}$ ] . here @xmath124 is the single - particle lifetime and @xmath125 is the cyclotron frequency of linear - energy - dispersion system with @xmath126 being the zero - temperature fermi level . using a semi - empirical parameter @xmath127 to relate @xmath124 with the transport scattering time @xmath128 , and expressing @xmath129 with the zero - field mobility @xmath5 at finite temperature,@xcite we can write the landau - level broadening as @xmath130^{1/2}.\ ] ] in the present study we consider the case of @xmath120-doping , i.e. the fermi level is high enough above the energy zero of the dirac cone in the range of `` + ' ' -branch levels and the states of `` @xmath100''-branch levels are completely filled , that they are irrelevant to electron transport . special attention has to be paid to the @xmath104 level , since , depending on the direction of exchange potential the effective g - factor of a ti surface state , @xmath30 , can be positive , zero or negative.@xcite the sign and magnitude of the effective g - factor determines how many states of the zero level should be included in or excluded from the available states for electron occupation in the case of @xmath120-doping at a magnetic field . ( i ) if @xmath131 , the @xmath104 level center is exactly at @xmath132 and the system is electron - hole symmetric . the total number of negative energy states ( including the states of the lower half of the @xmath104 level and states of the @xmath100"-branch levels ) and that of positive energy states ( including the states of the upper half of the @xmath104 level and states of the @xmath99"-branch levels ) do not change when changing magnetic field . therefore , the lower - half negative energy states of this level are always filled and the upper - half positive - energy states of it are available for the occupation of particles which are counted as electrons participating in transport in the case of @xmath120-doping . ( ii ) for a finite positive @xmath133 , the @xmath104 level @xmath134 moves downward to negative energy and its distance to the nearest @xmath100"-branch level is @xmath135 closer than to the nearest + " -branch level at finite magnetic field strength @xmath2 . this is equivalent to the opening of an increasingly enlarged ( with increasing @xmath2 ) energy gap between the + " -branch states and the states of the zero - level and the @xmath100"-branch levels . the opening of a sufficient energy gap implies that with increasing magnetic field the states in the + " -branch levels would no longer shrink into the zero - level , and thus the @xmath104 level should be completely excluded from the conduction band , i.e. only particles occupying the + " -branch states are counted as electrons participating in transport in the case of @xmath120-doping , when the magnetic field @xmath2 gets larger than a certain value ( depending on the magnitude of @xmath30 ) . ( iii ) for a finite negative @xmath136 , the @xmath104 level @xmath134 moves upward to positive energy and an increasingly enlarged energy gap will be opened between the states of the zero - level and the + " -branch and the states of @xmath100"-branch levels , and particles occupying the @xmath104 level and + " -branch states are electrons participating in transport when the magnetic field @xmath2 gets larger than a certain value . as a result , the experimentally accessible sheet density @xmath33 of electrons participating in transport is related to the fermi energy @xmath137 by the following equation valid at finite @xmath30 for the magnetic field @xmath2 larger than a certain value : @xmath138 in which @xmath139 + 1\}^{-1}$ ] is the fermi distribution function at temperature @xmath76 and the summation index @xmath120 goes over @xmath140 for @xmath133 , or @xmath141 for @xmath136 . in the case of @xmath131 , @xmath142\ ] ] valid for arbitrary magnetic field , in which @xmath143 . the imaginary part of relative - electron density correlation function in the presence of a magnetic field , @xmath86 , can be expressed in the landau representation as@xcite @xmath144 in which the transform factor @xmath145 ^ 2,\end{aligned}\ ] ] with @xmath146 , @xmath147 , @xmath148 , and @xmath149 being associated laguerre polynomials . the landau - representation correlation function @xmath150 in eq.([piqw ] ) can be constructed with the imaginary part of the retarded green s function @xmath151 , or the density - of - states , of the @xmath120th landau level as@xcite @xmath152\nonumber\\ & \hspace{1.2cm}\times{\rm im}g_n(\epsilon+\omega){\rm im}g_{n'}(\epsilon).\end{aligned}\ ] ] the summation indices @xmath120 and @xmath153 in eq.([piqw ] ) are taken over @xmath140 for @xmath133 , or @xmath154 for @xmath136 . in the case of @xmath131 , eq.([piqw ] ) still works and the summation indices @xmath120 and @xmath153 go over @xmath154 but with @xmath155 replaced by @xmath156 in eq.([p2nn ] ) . numerical calculations are performed for the magnetoresistivity @xmath157 of surface state in a uniform ti bi@xmath0se@xmath1 . at zero temperature the elastic scattering contributing to the resistivity is modeled by a coulomb potential due to charged impurities:@xcite @xmath158 with @xmath159 being the impurity density , which is determined by the zero - magnetic - field mobility @xmath5 . at temperatures higher than @xmath160,@xcite phonon scatterings play increasingly important role and the dominant inelastic contribution comes from optical phonons . for this polar material , the scattering by optical phonons via the deformation potential can be neglected . hence , we take account of inelastic scattering from optical phonons via frhlich coupling : @xmath161 . in the numerical calculation we use the following parameters:@xcite fermi velocity @xmath162 , static dielectric constant @xmath163 , optical dielectric constant @xmath164 , and phonon energy @xmath165 . the broadening parameter is taken to be @xmath166 . as a function of the magnetic field @xmath2 having different effective g - factors : @xmath167 and @xmath168 for a ti surface system with electron sheet density @xmath169 in the cases of zero - magnetic - field mobility @xmath170 ( a ) and @xmath171 ( b ) . several integer - number positions of filling factor @xmath172 are marked in ( b).,scaledwidth=40.0% ] fig.[diffg ] shows the calculated magnetoresistivity @xmath157 versus the magnetic field strength @xmath2 for a ti surface system with electron sheet density @xmath169 but having different effective g - factors : @xmath167 and @xmath168 for two values of zero - magnetic - field mobility @xmath170 and @xmath171 , representing different degree of landau - level broadening . in the case without zeeman splitting ( @xmath131 ) the resistivity @xmath157 exhibits almost no change with changing magnetic field up to 10 t , except the shubnikov - de haas ( sdh ) oscillation showing up in the case of @xmath171 . this kind of magnetoresistance behavior was indeed seen experimentally in the electron - hole symmetrical massless system of single - layer graphene.@xcite in the case of a positive g - factor , @xmath173 , the magnetoresistivity increases linearly with increasing magnetic field ; while for a negative g - factor , @xmath174 , the magnetoresistivity decreases linearly with increasing magnetic field . is shown as a function of the magnetic field @xmath2 for different values of zero - magnetic - field mobility : ( a ) @xmath175 , ( b ) @xmath176 , ( c ) @xmath177 , ( d ) @xmath178 , ( e ) @xmath179 , and ( f ) @xmath180 . the inset of ( a ) illustrates the same for a larger magnetic - field range @xmath181 . the filling factor @xmath182 is plotted versus the magnetic field in ( f ) ; and several integer - number positions of @xmath182 are also marked in ( d ) and ( e ) . here the surface electron density @xmath169 and the lattice temperature @xmath183.,scaledwidth=47.0% ] in the following we will give more detailed examination on the linearly increasing magnetoresistance in the positive @xmath30 case . fig.[rhob ] shows the calculated resistivity @xmath157 versus the magnetic field strength @xmath2 at lattice temperature @xmath183 for system of carrier sheet density @xmath169 and @xmath173 , having different zero - field mobility @xmath184 and @xmath180 . all resistivity curves for mobility @xmath185 exhibit clear linearity in the magnetic - field range and appear no tendency of saturation at the highest field shown in the figure . especially , for the case @xmath170 , the linear behavior extends even up to the magnetic field of @xmath186 , as illustrated in the inset of fig.[rhob](a ) . this feature contradicts the classical mr which saturates at sufficiently large magnetic field @xmath187 . note that here we only present the calculated @xmath157 for magnetic field @xmath2 larger than @xmath188 t , for which a sufficient energy gap @xmath135 is assumed to open that with further increase of the magnetic field the states in the `` + ' ' -branch levels no longer shrink into the zero level and thus it should be excluded from the conduction band . this is of course not true for very weak magnetic field . when @xmath189 the energy gap @xmath190 , the situation becomes similar to the case of @xmath131 : the whole upper half of the zero - level states are available to electron occupation and we should have a flat resistivity @xmath157 when changing magnetic field . with increasing @xmath2 the portion of the zero - level states available to conduction electrons decreases until the magnetic field reaches @xmath191 . as a result the resistivity @xmath157 should exhibit a crossover from a flat changing at small @xmath2 to positively linear increasing at @xmath192 . this is just the behavior observed in the ti bi@xmath0se@xmath1.@xcite note that in the case of @xmath170 , the broadened landau - level widths are always larger than the neighboring level interval : @xmath193 , which requires @xmath194 ^ 2 $ ] , even for the lowest landau level @xmath195 , i.e. the whole landau - level spectrum is smeared . with increasing the zero - field mobility the magnitude of resistivity @xmath157 decreases , and when the broadened landau - level width becomes smaller than the neighboring level interval , @xmath196 , a weak sdh oscillation begin to occur around the linearly - dependent average value of @xmath157 at higher portion of the magnetic field range , as seen in fig.[rhob](c ) , ( d ) and ( e ) for @xmath197 and @xmath198 . on the other hand , in the case of large mobility , e.g. @xmath199 , where the broadened landau - level widths @xmath200 are much smaller than the neighboring level interval even for level index @xmath120 as large as @xmath201 , the magnetoresistivity shows pronounced sdh oscillation and the linear - dependent behavior disappears , before the appearance of quantum hall effect,@xcite as shown in fig.[rhob](f ) . abrikosov s model for the lmr requires the applied magnetic field large enough to reach the quantum limit at which all the carriers are within the lowest landau level,@xcite while it is obvious that more than one landau levels are occupied in the experimental samples in the field range in which the linear and non - saturating magnetoresistivity was observed.@xcite for the given electron surface density @xmath202 , the number of occupied landau levels , or the filling factor @xmath172 , at different magnetic fields is shown in fig.[rhob](f ) , as well as in the fig.[rhob](d ) and ( e ) , where the integer - number positions of @xmath203 , i.e. filling up to entire @xmath182 landau levels , coincide with the minima of the density - of - states or the dips of sdh oscillation . this is in contrast with @xmath131 case , where the integer number of @xmath203 , which implies a filling up to the center position of the @xmath182th landau levels , locates at a peak of sdh oscillation , as shown in fig.[diffg]b . the observed sdh oscillations in the bi@xmath0se@xmath1 nanoribbon exhibiting nonsaturating surface lmr in the experiment@xcite favor the former case : a finite positive effective @xmath133 . is plotted as a function of the surface electron density @xmath33 at magnetic field @xmath204 : ( a ) at different values of zero - field mobility @xmath5 , and ( b ) at different values of zero - field conductivity @xmath205.,scaledwidth=40.0% ] at various lattice temperatures . here the zero - magnetic - field mobility at zero temperature is @xmath206.,scaledwidth=35.0% ] next , we examine the density - dependence of the linear magnetoresistivity . to compare with abrikosov s quantum magnetoresistance which suggests a @xmath207 behavior,@xcite we show the calculated @xmath208 for above lmr versus the carrier sheet density @xmath33 in fig.[rhon ] at fixed magnetic field @xmath209 t . the mobility is taken respectively to be @xmath210 and @xmath211m@xmath212/vs to make the resistivity in the lmr regime . a clearly linear dependence of @xmath213 on the surface density @xmath33 is seen in all cases , indicating that this non - saturating linear resistivity is almost inversely proportional to the carrier density . in the figure we also show @xmath208 versus @xmath33 under the condition of different given conductivity @xmath214 and @xmath215 . in this case the half - width @xmath216 is independent of surface density . the linear dependence still holds , indicating that this linear behavior is not sensitive to the modest @xmath33-dependence of landau level broadening @xmath216 as long as the system is in the overlapped landau level regime . from the above discussion , it is obvious that lmr shows up in the system having overlapped landau levels and the separation of landau levels makes the mr departure from the linear increase . at high temperature , the thermal energy would smear the level separation and phonon scatterings further broaden landau levels . hence , it is believed that this lmr will be robust against raising temperature . this is indeed the case as seen in fig.[rhot ] , where we plot the calculated magnetoresistivity @xmath157 for the above system with zero - temperature linear mobility @xmath217m@xmath212/vs versus the magnetic field at different lattice temperatures . we can see that raising temperature to room temperature has little effect on the linearity of mr . due to the decreased mobility at higher temperature from phonon scattering , the weak sdh oscillation on the linear background tends to vanish . these features are in good agreement with the experimental report.@xcite in summary , we have studied the two - dimensional magnetotransport in the flat surface of a three - dimensional ti , which arises from the surface states with a wavevector - linear energy dispersion and a finite , positive zeeman splitting within the bulk energy gap . when the level broadening is comparable to or larger than the landau - level separation and the conduction electrons spread over many landau levels , a positive , dominantly linear and non - saturating magnetoresistance appears within a quite wide range of magnetic field and persists up to room temperature . this remarkable lmr provides a possible mechanism for the recently observed linear magnetoresistance in topological insulator bi@xmath0se@xmath1 nanoribbons.@xcite in contrast to quantum hall effect which appears in the case of well formed landau levels and to abrikosov s quantum magnetotransport,@xcite which is limited to the extreme quantum limit that all electrons coalesce into the lowest landau level , the discussed lmr is a phenomena of pure classical two - dimensional magnetotransport in a system having linear - energy - dispersion , appearing in the regime of overlapped landau levels , irrespective of its showing up in relatively high magnetic field range . furthermore , the present scheme deals with spatially uniform case without invoking the mobility fluctuation in a strongly inhomogeneous system , which is required in the classical parish and littlewood model to produce a lmr.@xcite the appearance of this significant positive - increasing linear magnetoresistance depends on the existence of a positive and sizable effective g - factor . if the zeeman energy splitting is quite small the resistivity @xmath157 would exhibit little change with changing magnetic field . in the case of a negative and sizable effective g - factor the magnetoresistivity would decrease linearly with increasing magnetic field . therefore , the behavior of the longitudinal resistivity versus magnetic field may provide a useful way for judging the direction and the size of the effective zeeman energy splitting in ti surface states . this work was supported by the national science foundation of china ( grant no . 11104002 ) , the national basic research program of china ( grant no . 2012cb927403 ) and by the program for science&technology innovation talents in universities of henan province ( grant no . 2012hastit029 ) ."""
inputs = tokenizer(
[ARTICLE_LEP, ARTICLE_MAGNET],
max_length=1024,
padding="max_length",
truncation=True,
return_tensors="pt",
)
inputs = {k: inputs[k].to(torch_device) for k in inputs}
hypotheses_batch = model.generate(**inputs)
EXPECTED_LEP = "motivated by some recent studies on the light cp - odd higgs boson @xmath0 in non - minimal supersymmetric models, we investigate the rare @xmath1-decays @xmath2 ( @xmath3 ) in the two higgs doublet model ( 2hdm ), the nearly minimal supersymmetric standard model ( nmssm ), the next - to - minimal supersymmetric standard model ( nmssm ) and the minimal supersymmetric standard model ( mssm ).<n> we find that the branching ratios of @xmath4 can reach @xmath5 in 2hdm, @xmath6 in nmssm and @xmath7 in mssm, which are at the level of @xmath8 in 2hdm, @xmath9 in nmssm and @xmath10 in mssm, respectively.<n> these rates can be significantly enhanced in new physics models which lie within the expected sensitivity of the gigaz option of the international linear collider ( ilc ). <n> = # 1,nucl. <n> phys. <n> b * # 1"
EXPECTED_MAGNET = "a positive, nonsaturating and dominantly linear magnetoresistance can appear within quite wide magnetic - field range in the surface state of a topological insulator having a positive and finite effective g - factor. this linear magnetoresistance shows up in the system of high carrier concentration and low mobility when electrons are in extended states and spread over many smeared landau levels, and persists up to room temperature, providing a possible mechanism for the recently observed linear magnetoresistance in topological insulator bi@xmath0se@xmath1 nanoribbons."
generated = tokenizer.batch_decode(
hypotheses_batch.tolist(), clean_up_tokenization_spaces=True, skip_special_tokens=True
)
self.assertTrue(generated == [EXPECTED_LEP, EXPECTED_MAGNET])
class BigBirdPegasusStandaloneDecoderModelTester:
def __init__(
self,
parent,
vocab_size=99,
batch_size=7,
d_model=32,
decoder_seq_length=7,
is_training=True,
is_decoder=True,
use_attention_mask=True,
use_cache=False,
use_labels=True,
decoder_start_token_id=2,
decoder_ffn_dim=32,
decoder_layers=4,
encoder_attention_heads=4,
decoder_attention_heads=4,
max_position_embeddings=30,
is_encoder_decoder=False,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
scope=None,
attention_type="original_full",
use_bias=True,
block_size=16,
num_random_blocks=3,
):
self.parent = parent
self.batch_size = batch_size
self.decoder_seq_length = decoder_seq_length
# For common tests
self.seq_length = self.decoder_seq_length
self.is_training = is_training
self.use_attention_mask = use_attention_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.d_model = d_model
self.hidden_size = d_model
self.num_hidden_layers = decoder_layers
self.decoder_layers = decoder_layers
self.decoder_ffn_dim = decoder_ffn_dim
self.encoder_attention_heads = encoder_attention_heads
self.decoder_attention_heads = decoder_attention_heads
self.num_attention_heads = decoder_attention_heads
self.eos_token_id = eos_token_id
self.bos_token_id = bos_token_id
self.pad_token_id = pad_token_id
self.decoder_start_token_id = decoder_start_token_id
self.use_cache = use_cache
self.max_position_embeddings = max_position_embeddings
self.is_encoder_decoder = is_encoder_decoder
self.scope = None
self.decoder_key_length = decoder_seq_length
self.base_model_out_len = 2
self.decoder_attention_idx = 1
self.attention_type = attention_type
self.use_bias = use_bias
self.block_size = block_size
self.num_random_blocks = num_random_blocks
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
attention_mask = None
if self.use_attention_mask:
attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2)
lm_labels = None
if self.use_labels:
lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
config = BigBirdPegasusConfig(
vocab_size=self.vocab_size,
d_model=self.d_model,
decoder_layers=self.decoder_layers,
decoder_ffn_dim=self.decoder_ffn_dim,
encoder_attention_heads=self.encoder_attention_heads,
decoder_attention_heads=self.decoder_attention_heads,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
use_cache=self.use_cache,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.decoder_start_token_id,
max_position_embeddings=self.max_position_embeddings,
is_encoder_decoder=self.is_encoder_decoder,
attention_type=self.attention_type,
use_bias=self.use_bias,
block_size=self.block_size,
num_random_blocks=self.num_random_blocks,
)
return (
config,
input_ids,
attention_mask,
lm_labels,
)
def create_and_check_decoder_model_past(
self,
config,
input_ids,
attention_mask,
lm_labels,
):
config.use_cache = True
model = BigBirdPegasusDecoder(config=config).to(torch_device).eval()
# first forward pass
outputs = model(input_ids, use_cache=True)
outputs_use_cache_conf = model(input_ids)
outputs_no_past = model(input_ids, use_cache=False)
self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))
self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)
past_key_values = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
output_from_no_past = model(next_input_ids)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)
def create_and_check_decoder_model_attention_mask_past(
self,
config,
input_ids,
attention_mask,
lm_labels,
):
model = BigBirdPegasusDecoder(config=config).to(torch_device).eval()
# create attention mask
attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
half_seq_length = input_ids.shape[-1] // 2
attn_mask[:, half_seq_length:] = 0
# first forward pass
past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# change a random masked slice from input_ids
random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1
random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1)
input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens
# append to next input_ids and attn_mask
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
attn_mask = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)],
dim=1,
)
# get two different outputs
output_from_no_past = model(next_input_ids)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
# big bird has extremely high logits which requires
# such a high error tolerance here
assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=5e-1)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, attention_mask, lm_labels = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class BigBirdPegasusStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (BigBirdPegasusDecoder, BigBirdPegasusForCausalLM) if is_torch_available() else ()
all_generative_model_classes = (BigBirdPegasusForCausalLM,) if is_torch_available() else ()
test_pruning = False
is_encoder_decoder = False
def setUp(
self,
):
self.model_tester = BigBirdPegasusStandaloneDecoderModelTester(self, is_training=False)
self.config_tester = ConfigTester(self, config_class=BigBirdPegasusConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_decoder_model_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*config_and_inputs)
def test_decoder_model_attn_mask_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs)
def test_retain_grad_hidden_states_attentions(self):
# decoder cannot keep gradients
return
| 109,884 | 141.153946 | 43,043 | py |
robust-transformers | robust-transformers-main/utils/tests_fetcher.py | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import os
import re
from contextlib import contextmanager
from pathlib import Path
from git import Repo
# This script is intended to be run from the root of the repo but you can adapt this constant if you need to.
PATH_TO_TRANFORMERS = "."
@contextmanager
def checkout_commit(repo, commit_id):
"""
Context manager that checks out a commit in the repo.
"""
current_head = repo.head.commit if repo.head.is_detached else repo.head.ref
try:
repo.git.checkout(commit_id)
yield
finally:
repo.git.checkout(current_head)
def clean_code(content):
"""
Remove docstrings, empty line or comments from `content`.
"""
# fmt: off
# Remove docstrings by splitting on triple " then triple ':
splits = content.split('\"\"\"')
content = "".join(splits[::2])
splits = content.split("\'\'\'")
# fmt: on
content = "".join(splits[::2])
# Remove empty lines and comments
lines_to_keep = []
for line in content.split("\n"):
# remove anything that is after a # sign.
line = re.sub("#.*$", "", line)
if len(line) == 0 or line.isspace():
continue
lines_to_keep.append(line)
return "\n".join(lines_to_keep)
def diff_is_docstring_only(repo, branching_point, filename):
"""
Check if the diff is only in docstrings in a filename.
"""
with checkout_commit(repo, branching_point):
with open(filename, "r", encoding="utf-8") as f:
old_content = f.read()
with open(filename, "r", encoding="utf-8") as f:
new_content = f.read()
old_content_clean = clean_code(old_content)
new_content_clean = clean_code(new_content)
return old_content_clean == new_content_clean
def get_modified_python_files(diff_with_last_commit=False):
"""
Return a list of python files that have been modified between:
- the current head and the master branch if `diff_with_last_commit=False` (default)
- the current head and its parent commit otherwise.
"""
repo = Repo(PATH_TO_TRANFORMERS)
if not diff_with_last_commit:
print(f"Master is at {repo.refs.master.commit}")
print(f"Current head is at {repo.head.commit}")
branching_commits = repo.merge_base(repo.refs.master, repo.head)
for commit in branching_commits:
print(f"Branching commit: {commit}")
return get_diff(repo, repo.head.commit, branching_commits)
else:
print(f"Master is at {repo.head.commit}")
parent_commits = repo.head.commit.parents
for commit in parent_commits:
print(f"Parent commit: {commit}")
return get_diff(repo, repo.head.commit, parent_commits)
def get_diff(repo, base_commit, commits):
"""
Get's the diff between one or several commits and the head of the repository.
"""
print("\n### DIFF ###\n")
code_diff = []
for commit in commits:
for diff_obj in commit.diff(base_commit):
# We always add new python files
if diff_obj.change_type == "A" and diff_obj.b_path.endswith(".py"):
code_diff.append(diff_obj.b_path)
# We check that deleted python files won't break corresponding tests.
elif diff_obj.change_type == "D" and diff_obj.a_path.endswith(".py"):
code_diff.append(diff_obj.a_path)
# Now for modified files
elif diff_obj.change_type in ["M", "R"] and diff_obj.b_path.endswith(".py"):
# In case of renames, we'll look at the tests using both the old and new name.
if diff_obj.a_path != diff_obj.b_path:
code_diff.extend([diff_obj.a_path, diff_obj.b_path])
else:
# Otherwise, we check modifications are in code and not docstrings.
if diff_is_docstring_only(repo, commit, diff_obj.b_path):
print(f"Ignoring diff in {diff_obj.b_path} as it only concerns docstrings or comments.")
else:
code_diff.append(diff_obj.a_path)
return code_diff
def get_module_dependencies(module_fname):
"""
Get the dependencies of a module.
"""
with open(os.path.join(PATH_TO_TRANFORMERS, module_fname), "r", encoding="utf-8") as f:
content = f.read()
module_parts = module_fname.split(os.path.sep)
imported_modules = []
# Let's start with relative imports
relative_imports = re.findall(r"from\s+(\.+\S+)\s+import\s+([^\n]+)\n", content)
relative_imports = [mod for mod, imp in relative_imports if "# tests_ignore" not in imp]
for imp in relative_imports:
level = 0
while imp.startswith("."):
imp = imp[1:]
level += 1
if len(imp) > 0:
dep_parts = module_parts[: len(module_parts) - level] + imp.split(".")
else:
dep_parts = module_parts[: len(module_parts) - level] + ["__init__.py"]
imported_module = os.path.sep.join(dep_parts)
# We ignore the main init import as it's only for the __version__ that it's done
# and it would add everything as a dependency.
if not imported_module.endswith("transformers/__init__.py"):
imported_modules.append(imported_module)
# Let's continue with direct imports
# The import from the transformers module are ignored for the same reason we ignored the
# main init before.
direct_imports = re.findall(r"from\s+transformers\.(\S+)\s+import\s+([^\n]+)\n", content)
direct_imports = [mod for mod, imp in direct_imports if "# tests_ignore" not in imp]
for imp in direct_imports:
import_parts = imp.split(".")
dep_parts = ["src", "transformers"] + import_parts
imported_modules.append(os.path.sep.join(dep_parts))
# Now let's just check that we have proper module files, or append an init for submodules
dependencies = []
for imported_module in imported_modules:
if os.path.isfile(os.path.join(PATH_TO_TRANFORMERS, f"{imported_module}.py")):
dependencies.append(f"{imported_module}.py")
elif os.path.isdir(os.path.join(PATH_TO_TRANFORMERS, imported_module)) and os.path.isfile(
os.path.sep.join([PATH_TO_TRANFORMERS, imported_module, "__init__.py"])
):
dependencies.append(os.path.sep.join([imported_module, "__init__.py"]))
return dependencies
def get_test_dependencies(test_fname):
"""
Get the dependencies of a test file.
"""
with open(os.path.join(PATH_TO_TRANFORMERS, test_fname), "r", encoding="utf-8") as f:
content = f.read()
# Tests only have relative imports for other test files
# TODO Sylvain: handle relative imports cleanly
relative_imports = re.findall(r"from\s+(\.\S+)\s+import\s+([^\n]+)\n", content)
relative_imports = [test for test, imp in relative_imports if "# tests_ignore" not in imp]
# Removes the double trailing '..' for parent imports, and creates an absolute path from the root dir with
# `tests` as a prefix.
parent_imports = [imp.strip(".") for imp in relative_imports if ".." in imp]
parent_imports = [os.path.join("tests", f"{test.replace('.', os.path.sep)}.py") for test in parent_imports]
# Removes the single trailing '.' for current dir imports, and creates an absolute path from the root dir with
# tests/{module_name} as a prefix.
current_dir_imports = [imp.strip(".") for imp in relative_imports if ".." not in imp]
directory = os.path.sep.join(test_fname.split(os.path.sep)[:-1])
current_dir_imports = [
os.path.join(directory, f"{test.replace('.', os.path.sep)}.py") for test in current_dir_imports
]
return [f for f in [*parent_imports, *current_dir_imports] if os.path.isfile(f)]
def create_reverse_dependency_map():
"""
Create the dependency map from module/test filename to the list of modules/tests that depend on it (even
recursively).
"""
modules = [
str(f.relative_to(PATH_TO_TRANFORMERS))
for f in (Path(PATH_TO_TRANFORMERS) / "src/transformers").glob("**/*.py")
]
# We grab all the dependencies of each module.
direct_deps = {m: get_module_dependencies(m) for m in modules}
# We add all the dependencies of each test file
tests = [str(f.relative_to(PATH_TO_TRANFORMERS)) for f in (Path(PATH_TO_TRANFORMERS) / "tests").glob("**/*.py")]
direct_deps.update({t: get_test_dependencies(t) for t in tests})
all_files = modules + tests
# This recurses the dependencies
something_changed = True
while something_changed:
something_changed = False
for m in all_files:
for d in direct_deps[m]:
if d not in direct_deps:
raise ValueError(f"KeyError:{d}. From {m}")
for dep in direct_deps[d]:
if dep not in direct_deps[m]:
direct_deps[m].append(dep)
something_changed = True
# Finally we can build the reverse map.
reverse_map = collections.defaultdict(list)
for m in all_files:
if m.endswith("__init__.py"):
reverse_map[m].extend(direct_deps[m])
for d in direct_deps[m]:
reverse_map[d].append(m)
return reverse_map
# Any module file that has a test name which can't be inferred automatically from its name should go here. A better
# approach is to (re-)name the test file accordingly, and second best to add the correspondence map here.
SPECIAL_MODULE_TO_TEST_MAP = {
"commands/add_new_model_like.py": "utils/test_add_new_model_like.py",
"configuration_utils.py": "test_configuration_common.py",
"convert_graph_to_onnx.py": "onnx/test_onnx.py",
"data/data_collator.py": "trainer/test_data_collator.py",
"deepspeed.py": "deepspeed/",
"feature_extraction_sequence_utils.py": "test_sequence_feature_extraction_common.py",
"feature_extraction_utils.py": "test_feature_extraction_common.py",
"file_utils.py": ["utils/test_file_utils.py", "utils/test_model_output.py"],
"modelcard.py": "utils/test_model_card.py",
"modeling_flax_utils.py": "test_modeling_flax_common.py",
"modeling_tf_utils.py": ["test_modeling_tf_common.py", "utils/test_modeling_tf_core.py"],
"modeling_utils.py": ["test_modeling_common.py", "utils/test_offline.py"],
"models/auto/modeling_auto.py": [
"auto/test_modeling_auto.py",
"auto/test_modeling_tf_pytorch.py",
"bort/test_modeling_bort.py",
],
"models/auto/modeling_flax_auto.py": "auto/test_modeling_flax_auto.py",
"models/auto/modeling_tf_auto.py": [
"auto/test_modeling_tf_auto.py",
"auto/test_modeling_tf_pytorch.py",
"bort/test_modeling_tf_bort.py",
],
"models/gpt2/modeling_gpt2.py": ["gpt2/test_modeling_gpt2.py", "megatron_gpt2/test_modeling_megatron_gpt2.py"],
"optimization.py": "optimization/test_optimization.py",
"optimization_tf.py": "optimization/test_optimization_tf.py",
"pipelines/base.py": "pipelines/test_pipelines_*.py",
"pipelines/text2text_generation.py": [
"pipelines/test_pipelines_text2text_generation.py",
"pipelines/test_pipelines_summarization.py",
"pipelines/test_pipelines_translation.py",
],
"pipelines/zero_shot_classification.py": "pipelines/test_pipelines_zero_shot.py",
"testing_utils.py": "utils/test_skip_decorators.py",
"tokenization_utils.py": ["test_tokenization_common.py", "tokenization/test_tokenization_utils.py"],
"tokenization_utils_base.py": ["test_tokenization_common.py", "tokenization/test_tokenization_utils.py"],
"tokenization_utils_fast.py": [
"test_tokenization_common.py",
"tokenization/test_tokenization_utils.py",
"tokenization/test_tokenization_fast.py",
],
"trainer.py": [
"trainer/test_trainer.py",
"extended/test_trainer_ext.py",
"trainer/test_trainer_distributed.py",
"trainer/test_trainer_tpu.py",
],
"train_pt_utils.py": "trainer/test_trainer_utils.py",
"utils/versions.py": "utils/test_versions_utils.py",
}
def module_to_test_file(module_fname):
"""
Returns the name of the file(s) where `module_fname` is tested.
"""
splits = module_fname.split(os.path.sep)
# Special map has priority
short_name = os.path.sep.join(splits[2:])
if short_name in SPECIAL_MODULE_TO_TEST_MAP:
test_file = SPECIAL_MODULE_TO_TEST_MAP[short_name]
if isinstance(test_file, str):
return f"tests/{test_file}"
return [f"tests/{f}" for f in test_file]
module_name = splits[-1]
# Fast tokenizers are tested in the same file as the slow ones.
if module_name.endswith("_fast.py"):
module_name = module_name.replace("_fast.py", ".py")
# Special case for pipelines submodules
if len(splits) >= 2 and splits[-2] == "pipelines":
default_test_file = f"tests/pipelines/test_pipelines_{module_name}"
# Special case for benchmarks submodules
elif len(splits) >= 2 and splits[-2] == "benchmark":
return ["tests/benchmark/test_benchmark.py", "tests/benchmark/test_benchmark_tf.py"]
# Special case for commands submodules
elif len(splits) >= 2 and splits[-2] == "commands":
return "tests/utils/test_cli.py"
# Special case for onnx submodules
elif len(splits) >= 2 and splits[-2] == "onnx":
return ["tests/onnx/test_onnx.py", "tests/onnx/test_onnx_v2.py"]
# Special case for utils (not the one in src/transformers, the ones at the root of the repo).
elif len(splits) > 0 and splits[0] == "utils":
default_test_file = f"tests/utils/test_utils_{module_name}"
elif len(splits) > 4 and splits[2] == "models":
default_test_file = f"tests/{splits[3]}/test_{module_name}"
elif len(splits) > 2 and splits[2].startswith("generation"):
default_test_file = f"tests/generation/test_{module_name}"
elif len(splits) > 2 and splits[2].startswith("trainer"):
default_test_file = f"tests/trainer/test_{module_name}"
else:
default_test_file = f"tests/utils/test_{module_name}"
if os.path.isfile(default_test_file):
return default_test_file
# Processing -> processor
if "processing" in default_test_file:
test_file = default_test_file.replace("processing", "processor")
if os.path.isfile(test_file):
return test_file
# This list contains the list of test files we expect never to be launched from a change in a module/util. Those are
# launched separately.
EXPECTED_TEST_FILES_NEVER_TOUCHED = [
"tests/utils/test_doc_samples.py", # Doc tests
"tests/pipelines/test_pipelines_common.py", # Actually checked by the pipeline based file
"tests/sagemaker/test_single_node_gpu.py", # SageMaker test
"tests/sagemaker/test_multi_node_model_parallel.py", # SageMaker test
"tests/sagemaker/test_multi_node_data_parallel.py", # SageMaker test
]
def _print_list(l):
return "\n".join([f"- {f}" for f in l])
def sanity_check():
"""
Checks that all test files can be touched by a modification in at least one module/utils. This test ensures that
newly-added test files are properly mapped to some module or utils, so they can be run by the CI.
"""
# Grab all module and utils
all_files = [
str(p.relative_to(PATH_TO_TRANFORMERS))
for p in (Path(PATH_TO_TRANFORMERS) / "src/transformers").glob("**/*.py")
]
all_files += [
str(p.relative_to(PATH_TO_TRANFORMERS)) for p in (Path(PATH_TO_TRANFORMERS) / "utils").glob("**/*.py")
]
# Compute all the test files we get from those.
test_files_found = []
for f in all_files:
test_f = module_to_test_file(f)
if test_f is not None:
if isinstance(test_f, str):
test_files_found.append(test_f)
else:
test_files_found.extend(test_f)
# Some of the test files might actually be subfolders so we grab the tests inside.
test_files = []
for test_f in test_files_found:
if os.path.isdir(os.path.join(PATH_TO_TRANFORMERS, test_f)):
test_files.extend(
[
str(p.relative_to(PATH_TO_TRANFORMERS))
for p in (Path(PATH_TO_TRANFORMERS) / test_f).glob("**/test*.py")
]
)
else:
test_files.append(test_f)
# Compare to existing test files
existing_test_files = [
str(p.relative_to(PATH_TO_TRANFORMERS)) for p in (Path(PATH_TO_TRANFORMERS) / "tests").glob("**/test*.py")
]
not_touched_test_files = [f for f in existing_test_files if f not in test_files]
should_be_tested = set(not_touched_test_files) - set(EXPECTED_TEST_FILES_NEVER_TOUCHED)
if len(should_be_tested) > 0:
raise ValueError(
"The following test files are not currently associated with any module or utils files, which means they "
f"will never get run by the CI:\n{_print_list(should_be_tested)}\n. Make sure the names of these test "
"files match the name of the module or utils they are testing, or adapt the constant "
"`SPECIAL_MODULE_TO_TEST_MAP` in `utils/tests_fetcher.py` to add them. If your test file is triggered "
"separately and is not supposed to be run by the regular CI, add it to the "
"`EXPECTED_TEST_FILES_NEVER_TOUCHED` constant instead."
)
def infer_tests_to_run(output_file, diff_with_last_commit=False, filters=None):
modified_files = get_modified_python_files(diff_with_last_commit=diff_with_last_commit)
print(f"\n### MODIFIED FILES ###\n{_print_list(modified_files)}")
# Create the map that will give us all impacted modules.
impacted_modules_map = create_reverse_dependency_map()
impacted_files = modified_files.copy()
for f in modified_files:
if f in impacted_modules_map:
impacted_files.extend(impacted_modules_map[f])
# Remove duplicates
impacted_files = sorted(list(set(impacted_files)))
print(f"\n### IMPACTED FILES ###\n{_print_list(impacted_files)}")
# Grab the corresponding test files:
if "setup.py" in impacted_files:
test_files_to_run = ["tests"]
else:
# Grab the corresponding test files:
test_files_to_run = []
for f in impacted_files:
# Modified test files are always added
if f.startswith("tests/"):
test_files_to_run.append(f)
# Example files are tested separately
elif f.startswith("examples/pytorch"):
test_files_to_run.append("examples/pytorch/test_examples.py")
elif f.startswith("examples/flax"):
test_files_to_run.append("examples/flax/test_examples.py")
else:
new_tests = module_to_test_file(f)
if new_tests is not None:
if isinstance(new_tests, str):
test_files_to_run.append(new_tests)
else:
test_files_to_run.extend(new_tests)
# Remove duplicates
test_files_to_run = sorted(list(set(test_files_to_run)))
# Make sure we did not end up with a test file that was removed
test_files_to_run = [f for f in test_files_to_run if os.path.isfile(f) or os.path.isdir(f)]
if filters is not None:
filtered_files = []
for filter in filters:
filtered_files.extend([f for f in test_files_to_run if f.startswith(filter)])
test_files_to_run = filtered_files
print(f"\n### TEST TO RUN ###\n{_print_list(test_files_to_run)}")
if len(test_files_to_run) > 0:
with open(output_file, "w", encoding="utf-8") as f:
f.write(" ".join(test_files_to_run))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--sanity_check", action="store_true", help="Only test that all tests and modules are accounted for."
)
parser.add_argument(
"--output_file", type=str, default="test_list.txt", help="Where to store the list of tests to run"
)
parser.add_argument(
"--diff_with_last_commit",
action="store_true",
help="To fetch the tests between the current commit and the last commit",
)
parser.add_argument(
"--filters",
type=str,
nargs="*",
default=["tests"],
help="Only keep the test files matching one of those filters.",
)
args = parser.parse_args()
if args.sanity_check:
sanity_check()
else:
repo = Repo(PATH_TO_TRANFORMERS)
diff_with_last_commit = args.diff_with_last_commit
if not diff_with_last_commit and not repo.head.is_detached and repo.head.ref == repo.refs.master:
print("Master branch detected, fetching tests against last commit.")
diff_with_last_commit = True
try:
infer_tests_to_run(args.output_file, diff_with_last_commit=diff_with_last_commit, filters=args.filters)
except Exception as e:
print(f"\nError when trying to grab the relevant tests: {e}\n\nRunning all tests.")
with open(args.output_file, "w", encoding="utf-8") as f:
if args.filters is None:
f.write("./tests/")
else:
f.write(" ".join(args.filters))
| 22,177 | 40.68797 | 117 | py |
robust-transformers | robust-transformers-main/utils/check_copies.py | # coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import glob
import os
import re
import black
from style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
TRANSFORMERS_PATH = "src/transformers"
PATH_TO_DOCS = "docs/source"
REPO_PATH = "."
# Mapping for files that are full copies of others (keys are copies, values the file to keep them up to data with)
FULL_COPIES = {
"examples/tensorflow/question-answering/utils_qa.py": "examples/pytorch/question-answering/utils_qa.py",
"examples/flax/question-answering/utils_qa.py": "examples/pytorch/question-answering/utils_qa.py",
}
LOCALIZED_READMES = {
# If the introduction or the conclusion of the list change, the prompts may need to be updated.
"README.md": {
"start_prompt": "🤗 Transformers currently provides the following architectures",
"end_prompt": "1. Want to contribute a new model?",
"format_model_list": "**[{title}]({model_link})** (from {paper_affiliations}) released with the paper {paper_title_link} by {paper_authors}.{supplements}",
},
"README_zh-hans.md": {
"start_prompt": "🤗 Transformers 目前支持如下的架构",
"end_prompt": "1. 想要贡献新的模型?",
"format_model_list": "**[{title}]({model_link})** (来自 {paper_affiliations}) 伴随论文 {paper_title_link} 由 {paper_authors} 发布。{supplements}",
},
"README_zh-hant.md": {
"start_prompt": "🤗 Transformers 目前支援以下的架構",
"end_prompt": "1. 想要貢獻新的模型?",
"format_model_list": "**[{title}]({model_link})** (from {paper_affiliations}) released with the paper {paper_title_link} by {paper_authors}.{supplements}",
},
"README_ko.md": {
"start_prompt": "🤗 Transformers는 다음 모델들을 제공합니다",
"end_prompt": "1. 새로운 모델을 올리고 싶나요?",
"format_model_list": "**[{title}]({model_link})** (from {paper_affiliations}) released with the paper {paper_title_link} by {paper_authors}.{supplements}",
},
}
def _should_continue(line, indent):
return line.startswith(indent) or len(line) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$", line) is not None
def find_code_in_transformers(object_name):
"""Find and return the code source code of `object_name`."""
parts = object_name.split(".")
i = 0
# First let's find the module where our object lives.
module = parts[i]
while i < len(parts) and not os.path.isfile(os.path.join(TRANSFORMERS_PATH, f"{module}.py")):
i += 1
if i < len(parts):
module = os.path.join(module, parts[i])
if i >= len(parts):
raise ValueError(
f"`object_name` should begin with the name of a module of transformers but got {object_name}."
)
with open(os.path.join(TRANSFORMERS_PATH, f"{module}.py"), "r", encoding="utf-8", newline="\n") as f:
lines = f.readlines()
# Now let's find the class / func in the code!
indent = ""
line_index = 0
for name in parts[i + 1 :]:
while (
line_index < len(lines) and re.search(rf"^{indent}(class|def)\s+{name}(\(|\:)", lines[line_index]) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lines):
raise ValueError(f" {object_name} does not match any function or class in {module}.")
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
start_index = line_index
while line_index < len(lines) and _should_continue(lines[line_index], indent):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
code_lines = lines[start_index:line_index]
return "".join(code_lines)
_re_copy_warning = re.compile(r"^(\s*)#\s*Copied from\s+transformers\.(\S+\.\S+)\s*($|\S.*$)")
_re_replace_pattern = re.compile(r"^\s*(\S+)->(\S+)(\s+.*|$)")
def get_indent(code):
lines = code.split("\n")
idx = 0
while idx < len(lines) and len(lines[idx]) == 0:
idx += 1
if idx < len(lines):
return re.search(r"^(\s*)\S", lines[idx]).groups()[0]
return ""
def blackify(code):
"""
Applies the black part of our `make style` command to `code`.
"""
has_indent = len(get_indent(code)) > 0
if has_indent:
code = f"class Bla:\n{code}"
mode = black.Mode(target_versions={black.TargetVersion.PY35}, line_length=119)
result = black.format_str(code, mode=mode)
result, _ = style_docstrings_in_code(result)
return result[len("class Bla:\n") :] if has_indent else result
def is_copy_consistent(filename, overwrite=False):
"""
Check if the code commented as a copy in `filename` matches the original.
Return the differences or overwrites the content depending on `overwrite`.
"""
with open(filename, "r", encoding="utf-8", newline="\n") as f:
lines = f.readlines()
diffs = []
line_index = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lines):
search = _re_copy_warning.search(lines[line_index])
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
indent, object_name, replace_pattern = search.groups()
theoretical_code = find_code_in_transformers(object_name)
theoretical_indent = get_indent(theoretical_code)
start_index = line_index + 1 if indent == theoretical_indent else line_index + 2
indent = theoretical_indent
line_index = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
should_continue = True
while line_index < len(lines) and should_continue:
line_index += 1
if line_index >= len(lines):
break
line = lines[line_index]
should_continue = _should_continue(line, indent) and re.search(f"^{indent}# End copy", line) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
observed_code_lines = lines[start_index:line_index]
observed_code = "".join(observed_code_lines)
# Before comparing, use the `replace_pattern` on the original code.
if len(replace_pattern) > 0:
patterns = replace_pattern.replace("with", "").split(",")
patterns = [_re_replace_pattern.search(p) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
obj1, obj2, option = pattern.groups()
theoretical_code = re.sub(obj1, obj2, theoretical_code)
if option.strip() == "all-casing":
theoretical_code = re.sub(obj1.lower(), obj2.lower(), theoretical_code)
theoretical_code = re.sub(obj1.upper(), obj2.upper(), theoretical_code)
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
theoretical_code = blackify(lines[start_index - 1] + theoretical_code)
theoretical_code = theoretical_code[len(lines[start_index - 1]) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index])
if overwrite:
lines = lines[:start_index] + [theoretical_code] + lines[line_index:]
line_index = start_index + 1
if overwrite and len(diffs) > 0:
# Warn the user a file has been modified.
print(f"Detected changes, rewriting {filename}.")
with open(filename, "w", encoding="utf-8", newline="\n") as f:
f.writelines(lines)
return diffs
def check_copies(overwrite: bool = False):
all_files = glob.glob(os.path.join(TRANSFORMERS_PATH, "**/*.py"), recursive=True)
diffs = []
for filename in all_files:
new_diffs = is_copy_consistent(filename, overwrite)
diffs += [f"- {filename}: copy does not match {d[0]} at line {d[1]}" for d in new_diffs]
if not overwrite and len(diffs) > 0:
diff = "\n".join(diffs)
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them."
)
check_model_list_copy(overwrite=overwrite)
def check_full_copies(overwrite: bool = False):
diffs = []
for target, source in FULL_COPIES.items():
with open(source, "r", encoding="utf-8") as f:
source_code = f.read()
with open(target, "r", encoding="utf-8") as f:
target_code = f.read()
if source_code != target_code:
if overwrite:
with open(target, "w", encoding="utf-8") as f:
print(f"Replacing the content of {target} by the one of {source}.")
f.write(source_code)
else:
diffs.append(f"- {target}: copy does not match {source}.")
if not overwrite and len(diffs) > 0:
diff = "\n".join(diffs)
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them."
)
def get_model_list(filename, start_prompt, end_prompt):
"""Extracts the model list from the README."""
with open(os.path.join(REPO_PATH, filename), "r", encoding="utf-8", newline="\n") as f:
lines = f.readlines()
# Find the start of the list.
start_index = 0
while not lines[start_index].startswith(start_prompt):
start_index += 1
start_index += 1
result = []
current_line = ""
end_index = start_index
while not lines[end_index].startswith(end_prompt):
if lines[end_index].startswith("1."):
if len(current_line) > 1:
result.append(current_line)
current_line = lines[end_index]
elif len(lines[end_index]) > 1:
current_line = f"{current_line[:-1]} {lines[end_index].lstrip()}"
end_index += 1
if len(current_line) > 1:
result.append(current_line)
return "".join(result)
def convert_to_localized_md(model_list, localized_model_list, format_str):
"""Convert `model_list` to each localized README."""
def _rep(match):
title, model_link, paper_affiliations, paper_title_link, paper_authors, supplements = match.groups()
return format_str.format(
title=title,
model_link=model_link,
paper_affiliations=paper_affiliations,
paper_title_link=paper_title_link,
paper_authors=paper_authors,
supplements=" " + supplements.strip() if len(supplements) != 0 else "",
)
# This regex captures metadata from an English model description, including model title, model link,
# affiliations of the paper, title of the paper, authors of the paper, and supplemental data (see DistilBERT for example).
_re_capture_meta = re.compile(
r"\*\*\[([^\]]*)\]\(([^\)]*)\)\*\* \(from ([^)]*)\)[^\[]*([^\)]*\)).*?by (.*?[A-Za-z\*]{2,}?)\. (.*)$"
)
# This regex is used to synchronize link.
_re_capture_title_link = re.compile(r"\*\*\[([^\]]*)\]\(([^\)]*)\)\*\*")
num_models_equal = True
if len(localized_model_list) == 0:
localized_model_index = {}
else:
try:
localized_model_index = {
re.search(r"\*\*\[([^\]]*)", line).groups()[0]: line
for line in localized_model_list.strip().split("\n")
}
except AttributeError:
raise AttributeError("A model name in localized READMEs cannot be recognized.")
for model in model_list.strip().split("\n"):
title, model_link = _re_capture_title_link.search(model).groups()
if title not in localized_model_index:
num_models_equal = False
# Add an anchor white space behind a model description string for regex.
# If metadata cannot be captured, the English version will be directly copied.
localized_model_index[title] = _re_capture_meta.sub(_rep, model + " ")
else:
# Synchronize link
localized_model_index[title] = _re_capture_title_link.sub(
f"**[{title}]({model_link})**", localized_model_index[title], count=1
)
sorted_index = sorted(localized_model_index.items(), key=lambda x: x[0].lower())
return num_models_equal, "\n".join(map(lambda x: x[1], sorted_index)) + "\n"
def convert_readme_to_index(model_list):
model_list = model_list.replace("https://huggingface.co/docs/transformers/master/", "")
return model_list.replace("https://huggingface.co/docs/transformers/", "")
def _find_text_in_file(filename, start_prompt, end_prompt):
"""
Find the text in `filename` between a line beginning with `start_prompt` and before `end_prompt`, removing empty
lines.
"""
with open(filename, "r", encoding="utf-8", newline="\n") as f:
lines = f.readlines()
# Find the start prompt.
start_index = 0
while not lines[start_index].startswith(start_prompt):
start_index += 1
start_index += 1
end_index = start_index
while not lines[end_index].startswith(end_prompt):
end_index += 1
end_index -= 1
while len(lines[start_index]) <= 1:
start_index += 1
while len(lines[end_index]) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index]), start_index, end_index, lines
def check_model_list_copy(overwrite=False, max_per_line=119):
"""Check the model lists in the README and index.rst are consistent and maybe `overwrite`."""
# If the introduction or the conclusion of the list change, the prompts may need to be updated.
index_list, start_index, end_index, lines = _find_text_in_file(
filename=os.path.join(PATH_TO_DOCS, "index.mdx"),
start_prompt="<!--This list is updated automatically from the README",
end_prompt="### Supported frameworks",
)
md_list = get_model_list(
filename="README.md",
start_prompt=LOCALIZED_READMES["README.md"]["start_prompt"],
end_prompt=LOCALIZED_READMES["README.md"]["end_prompt"],
)
converted_md_lists = []
for filename, value in LOCALIZED_READMES.items():
_start_prompt = value["start_prompt"]
_end_prompt = value["end_prompt"]
_format_model_list = value["format_model_list"]
localized_md_list = get_model_list(filename, _start_prompt, _end_prompt)
num_models_equal, converted_md_list = convert_to_localized_md(md_list, localized_md_list, _format_model_list)
converted_md_lists.append((filename, num_models_equal, converted_md_list, _start_prompt, _end_prompt))
converted_md_list = convert_readme_to_index(md_list)
if converted_md_list != index_list:
if overwrite:
with open(os.path.join(PATH_TO_DOCS, "index.mdx"), "w", encoding="utf-8", newline="\n") as f:
f.writelines(lines[:start_index] + [converted_md_list] + lines[end_index:])
else:
raise ValueError(
"The model list in the README changed and the list in `index.mdx` has not been updated. Run "
"`make fix-copies` to fix this."
)
for converted_md_list in converted_md_lists:
filename, num_models_equal, converted_md, _start_prompt, _end_prompt = converted_md_list
if filename == "README.md":
continue
if overwrite:
_, start_index, end_index, lines = _find_text_in_file(
filename=os.path.join(REPO_PATH, filename), start_prompt=_start_prompt, end_prompt=_end_prompt
)
with open(os.path.join(REPO_PATH, filename), "w", encoding="utf-8", newline="\n") as f:
f.writelines(lines[:start_index] + [converted_md] + lines[end_index:])
elif not num_models_equal:
raise ValueError(
f"The model list in the README changed and the list in `{filename}` has not been updated. Run "
"`make fix-copies` to fix this."
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
args = parser.parse_args()
check_copies(args.fix_and_overwrite)
check_full_copies(args.fix_and_overwrite)
| 17,421 | 39.800937 | 163 | py |
robust-transformers | robust-transformers-main/utils/check_inits.py | # coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import importlib.util
import os
import re
from pathlib import Path
PATH_TO_TRANSFORMERS = "src/transformers"
# Matches is_xxx_available()
_re_backend = re.compile(r"is\_([a-z_]*)_available()")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_re_import_struct_key_value = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if is_foo_available
_re_test_backend = re.compile(r"^\s*if\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
_re_import_struct_add_one = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_re_import_struct_add_many = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
_re_quote_object = re.compile('^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
_re_between_brackets = re.compile("^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
_re_import = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
def find_backend(line):
"""Find one (or multiple) backend in a code line of the init."""
if _re_test_backend.search(line) is None:
return None
backends = [b[0] for b in _re_backend.findall(line)]
backends.sort()
return "_and_".join(backends)
def parse_init(init_file):
"""
Read an init_file and parse (per backend) the _import_structure objects defined and the TYPE_CHECKING objects
defined
"""
with open(init_file, "r", encoding="utf-8", newline="\n") as f:
lines = f.readlines()
line_index = 0
while line_index < len(lines) and not lines[line_index].startswith("_import_structure = {"):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lines):
return None
# First grab the objects without a specific backend in _import_structure
objects = []
while not lines[line_index].startswith("if TYPE_CHECKING") and find_backend(lines[line_index]) is None:
line = lines[line_index]
single_line_import_search = _re_import_struct_key_value.search(line)
if single_line_import_search is not None:
imports = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", ") if len(obj) > 0]
objects.extend(imports)
elif line.startswith(" " * 8 + '"'):
objects.append(line[9:-3])
line_index += 1
import_dict_objects = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING"):
# If the line is an if is_backend_available, we grab all objects associated.
backend = find_backend(lines[line_index])
if backend is not None:
line_index += 1
objects = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(" " * 4):
line = lines[line_index]
if _re_import_struct_add_one.search(line) is not None:
objects.append(_re_import_struct_add_one.search(line).groups()[0])
elif _re_import_struct_add_many.search(line) is not None:
imports = _re_import_struct_add_many.search(line).groups()[0].split(", ")
imports = [obj[1:-1] for obj in imports if len(obj) > 0]
objects.extend(imports)
elif _re_between_brackets.search(line) is not None:
imports = _re_between_brackets.search(line).groups()[0].split(", ")
imports = [obj[1:-1] for obj in imports if len(obj) > 0]
objects.extend(imports)
elif _re_quote_object.search(line) is not None:
objects.append(_re_quote_object.search(line).groups()[0])
elif line.startswith(" " * 8 + '"'):
objects.append(line[9:-3])
elif line.startswith(" " * 12 + '"'):
objects.append(line[13:-3])
line_index += 1
import_dict_objects[backend] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
objects = []
while (
line_index < len(lines)
and find_backend(lines[line_index]) is None
and not lines[line_index].startswith("else")
):
line = lines[line_index]
single_line_import_search = _re_import.search(line)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", "))
elif line.startswith(" " * 8):
objects.append(line[8:-2])
line_index += 1
type_hint_objects = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(lines):
# If the line is an if is_backemd_available, we grab all objects associated.
backend = find_backend(lines[line_index])
if backend is not None:
line_index += 1
objects = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(" " * 8):
line = lines[line_index]
single_line_import_search = _re_import.search(line)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", "))
elif line.startswith(" " * 12):
objects.append(line[12:-2])
line_index += 1
type_hint_objects[backend] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def analyze_results(import_dict_objects, type_hint_objects):
"""
Analyze the differences between _import_structure objects and TYPE_CHECKING objects found in an init.
"""
def find_duplicates(seq):
return [k for k, v in collections.Counter(seq).items() if v > 1]
if list(import_dict_objects.keys()) != list(type_hint_objects.keys()):
return ["Both sides of the init do not have the same backends!"]
errors = []
for key in import_dict_objects.keys():
duplicate_imports = find_duplicates(import_dict_objects[key])
if duplicate_imports:
errors.append(f"Duplicate _import_structure definitions for: {duplicate_imports}")
duplicate_type_hints = find_duplicates(type_hint_objects[key])
if duplicate_type_hints:
errors.append(f"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}")
if sorted(set(import_dict_objects[key])) != sorted(set(type_hint_objects[key])):
name = "base imports" if key == "none" else f"{key} backend"
errors.append(f"Differences for {name}:")
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f" {a} in TYPE_HINT but not in _import_structure.")
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f" {a} in _import_structure but not in TYPE_HINT.")
return errors
def check_all_inits():
"""
Check all inits in the transformers repo and raise an error if at least one does not define the same objects in
both halves.
"""
failures = []
for root, _, files in os.walk(PATH_TO_TRANSFORMERS):
if "__init__.py" in files:
fname = os.path.join(root, "__init__.py")
objects = parse_init(fname)
if objects is not None:
errors = analyze_results(*objects)
if len(errors) > 0:
errors[0] = f"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(errors))
if len(failures) > 0:
raise ValueError("\n\n".join(failures))
def get_transformers_submodules():
"""
Returns the list of Transformers submodules.
"""
submodules = []
for path, directories, files in os.walk(PATH_TO_TRANSFORMERS):
for folder in directories:
# Ignore private modules
if folder.startswith("_"):
directories.remove(folder)
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(path) / folder).glob("*.py"))) == 0:
continue
short_path = str((Path(path) / folder).relative_to(PATH_TO_TRANSFORMERS))
submodule = short_path.replace(os.path.sep, ".")
submodules.append(submodule)
for fname in files:
if fname == "__init__.py":
continue
short_path = str((Path(path) / fname).relative_to(PATH_TO_TRANSFORMERS))
submodule = short_path.replace(os.path.sep, ".").replace(".py", "")
if len(submodule.split(".")) == 1:
submodules.append(submodule)
return submodules
IGNORE_SUBMODULES = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
]
def check_submodules():
# This is to make sure the transformers module imported is the one in the repo.
spec = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
transformers = spec.loader.load_module()
module_not_registered = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(module_not_registered) > 0:
list_of_modules = "\n".join(f"- {module}" for module in module_not_registered)
raise ValueError(
"The following submodules are not properly registed in the main init of Transformers:\n"
f"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value."
)
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 11,088 | 40.68797 | 115 | py |
robust-transformers | robust-transformers-main/utils/notification_service.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import collections
import functools
import json
import math
import operator
import os
import re
import sys
import time
from typing import Dict, List, Optional, Union
import requests
from slack_sdk import WebClient
client = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
NON_MODEL_TEST_MODULES = [
"benchmark",
"deepspeed",
"extended",
"fixtures",
"generation",
"onnx",
"optimization",
"pipelines",
"sagemaker",
"trainer",
"utils",
]
def handle_test_results(test_results):
expressions = test_results.split(" ")
failed = 0
success = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
time_spent = expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(expressions):
if "failed" in expression:
failed += int(expressions[i - 1])
if "passed" in expression:
success += int(expressions[i - 1])
return failed, success, time_spent
def handle_stacktraces(test_results):
# These files should follow the following architecture:
# === FAILURES ===
# <path>:<line>: Error ...
# <path>:<line>: Error ...
# <empty line>
total_stacktraces = test_results.split("\n")[1:-1]
stacktraces = []
for stacktrace in total_stacktraces:
try:
line = stacktrace[: stacktrace.index(" ")].split(":")[-2]
error_message = stacktrace[stacktrace.index(" ") :]
stacktraces.append(f"(line {line}) {error_message}")
except Exception:
stacktraces.append("Cannot retrieve error message.")
return stacktraces
def dicts_to_sum(objects: Union[Dict[str, Dict], List[dict]]):
if isinstance(objects, dict):
lists = objects.values()
else:
lists = objects
# Convert each dictionary to counter
counters = map(collections.Counter, lists)
# Sum all the counters
return functools.reduce(operator.add, counters)
class Message:
def __init__(self, title: str, model_results: Dict, additional_results: Dict):
self.title = title
# Failures and success of the modeling tests
self.n_model_success = sum(r["success"] for r in model_results.values())
self.n_model_single_gpu_failures = sum(dicts_to_sum(r["failed"])["single"] for r in model_results.values())
self.n_model_multi_gpu_failures = sum(dicts_to_sum(r["failed"])["multi"] for r in model_results.values())
# Some suites do not have a distinction between single and multi GPU.
self.n_model_unknown_failures = sum(dicts_to_sum(r["failed"])["unclassified"] for r in model_results.values())
self.n_model_failures = (
self.n_model_single_gpu_failures + self.n_model_multi_gpu_failures + self.n_model_unknown_failures
)
# Failures and success of the additional tests
self.n_additional_success = sum(r["success"] for r in additional_results.values())
all_additional_failures = dicts_to_sum([r["failed"] for r in additional_results.values()])
self.n_additional_single_gpu_failures = all_additional_failures["single"]
self.n_additional_multi_gpu_failures = all_additional_failures["multi"]
self.n_additional_unknown_gpu_failures = all_additional_failures["unclassified"]
self.n_additional_failures = (
self.n_additional_single_gpu_failures
+ self.n_additional_multi_gpu_failures
+ self.n_additional_unknown_gpu_failures
)
# Results
self.n_failures = self.n_model_failures + self.n_additional_failures
self.n_success = self.n_model_success + self.n_additional_success
self.n_tests = self.n_failures + self.n_success
self.model_results = model_results
self.additional_results = additional_results
self.thread_ts = None
@property
def time(self) -> str:
all_results = [*self.model_results.values(), *self.additional_results.values()]
time_spent = [r["time_spent"].split(", ")[0] for r in all_results if len(r["time_spent"])]
total_secs = 0
for time in time_spent:
time_parts = time.split(":")
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(time_parts) == 1:
time_parts = [0, 0, time_parts[0]]
hours, minutes, seconds = int(time_parts[0]), int(time_parts[1]), float(time_parts[2])
total_secs += hours * 3600 + minutes * 60 + seconds
hours, minutes, seconds = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f"{int(hours)}h{int(minutes)}m{int(seconds)}s"
@property
def header(self) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def no_failures(self) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def failures(self) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@staticmethod
def get_device_report(report, rjust=6):
if "single" in report and "multi" in report:
return f"{str(report['single']).rjust(rjust)} | {str(report['multi']).rjust(rjust)} | "
elif "single" in report:
return f"{str(report['single']).rjust(rjust)} | {'0'.rjust(rjust)} | "
elif "multi" in report:
return f"{'0'.rjust(rjust)} | {str(report['multi']).rjust(rjust)} | "
@property
def category_failures(self) -> Dict:
model_failures = [v["failed"] for v in self.model_results.values()]
category_failures = {}
for model_failure in model_failures:
for key, value in model_failure.items():
if key not in category_failures:
category_failures[key] = dict(value)
else:
category_failures[key]["unclassified"] += value["unclassified"]
category_failures[key]["single"] += value["single"]
category_failures[key]["multi"] += value["multi"]
individual_reports = []
for key, value in category_failures.items():
device_report = self.get_device_report(value)
if sum(value.values()):
if device_report:
individual_reports.append(f"{device_report}{key}")
else:
individual_reports.append(key)
header = "Single | Multi | Category\n"
category_failures_report = header + "\n".join(sorted(individual_reports))
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"The following modeling categories had failures:\n\n```\n{category_failures_report}\n```",
},
}
@property
def model_failures(self) -> Dict:
# Obtain per-model failures
def per_model_sum(model_category_dict):
return dicts_to_sum(model_category_dict["failed"].values())
failures = {}
non_model_failures = {
k: per_model_sum(v) for k, v in self.model_results.items() if sum(per_model_sum(v).values())
}
for k, v in self.model_results.items():
if k in NON_MODEL_TEST_MODULES:
pass
if sum(per_model_sum(v).values()):
dict_failed = dict(v["failed"])
pytorch_specific_failures = dict_failed.pop("PyTorch")
tensorflow_specific_failures = dict_failed.pop("TensorFlow")
other_failures = dicts_to_sum(dict_failed.values())
failures[k] = {
"PyTorch": pytorch_specific_failures,
"TensorFlow": tensorflow_specific_failures,
"other": other_failures,
}
model_reports = []
other_module_reports = []
for key, value in non_model_failures.items():
if key in NON_MODEL_TEST_MODULES:
device_report = self.get_device_report(value)
if sum(value.values()):
if device_report:
report = f"{device_report}{key}"
else:
report = key
other_module_reports.append(report)
for key, value in failures.items():
device_report_values = [
value["PyTorch"]["single"],
value["PyTorch"]["multi"],
value["TensorFlow"]["single"],
value["TensorFlow"]["multi"],
sum(value["other"].values()),
]
if sum(device_report_values):
device_report = " | ".join([str(x).rjust(9) for x in device_report_values]) + " | "
report = f"{device_report}{key}"
model_reports.append(report)
model_header = "Single PT | Multi PT | Single TF | Multi TF | Other | Category\n"
sorted_model_reports = sorted(model_reports, key=lambda s: s.split("] ")[-1])
model_failures_report = model_header + "\n".join(sorted_model_reports)
module_header = "Single | Multi | Category\n"
sorted_module_reports = sorted(other_module_reports, key=lambda s: s.split("] ")[-1])
module_failures_report = module_header + "\n".join(sorted_module_reports)
report = ""
if len(model_reports):
report += f"These following model modules had failures:\n```\n{model_failures_report}\n```\n\n"
if len(other_module_reports):
report += f"The following non-model modules had failures:\n```\n{module_failures_report}\n```\n\n"
return {"type": "section", "text": {"type": "mrkdwn", "text": report}}
@property
def additional_failures(self) -> Dict:
failures = {k: v["failed"] for k, v in self.additional_results.items()}
errors = {k: v["error"] for k, v in self.additional_results.items()}
individual_reports = []
for key, value in failures.items():
device_report = self.get_device_report(value)
if sum(value.values()) or errors[key]:
report = f"{key}"
if errors[key]:
report = f"[Errored out] {report}"
if device_report:
report = f"{device_report}{report}"
individual_reports.append(report)
header = "Single | Multi | Category\n"
failures_report = header + "\n".join(sorted(individual_reports))
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"The following non-modeling tests had failures:\n```\n{failures_report}\n```",
},
}
@property
def payload(self) -> str:
blocks = [self.header]
if self.n_model_failures > 0 or self.n_additional_failures > 0:
blocks.append(self.failures)
if self.n_model_failures > 0:
blocks.extend([self.category_failures, self.model_failures])
if self.n_additional_failures > 0:
blocks.append(self.additional_failures)
if self.n_model_failures == 0 and self.n_additional_failures == 0:
blocks.append(self.no_failures)
return json.dumps(blocks)
@staticmethod
def error_out():
payload = [
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print("Sending the following payload")
print(json.dumps({"blocks": json.loads(payload)}))
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"],
text="There was an issue running the tests.",
blocks=payload,
)
def post(self):
print("Sending the following payload")
print(json.dumps({"blocks": json.loads(self.payload)}))
text = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else "All tests passed."
self.thread_ts = client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"],
blocks=self.payload,
text=text,
)
def get_reply_blocks(self, job_name, job_result, failures, device, text):
if len(failures) > 2500:
failures = "\n".join(failures.split("\n")[:20]) + "\n\n[Truncated]"
title = job_name
if device is not None:
title += f" ({device}-gpu)"
content = {"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_result["job_link"] is not None:
content["accessory"] = {
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_result["job_link"],
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures}},
]
def post_reply(self):
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made.")
sorted_dict = sorted(self.model_results.items(), key=lambda t: t[0])
for job, job_result in sorted_dict:
if len(job_result["failures"]):
for device, failures in job_result["failures"].items():
text = "\n".join(
sorted([f"*{k}*: {v[device]}" for k, v in job_result["failed"].items() if v[device]])
)
blocks = self.get_reply_blocks(job, job_result, failures, device, text=text)
print("Sending the following reply")
print(json.dumps({"blocks": blocks}))
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"],
text=f"Results for {job}",
blocks=blocks,
thread_ts=self.thread_ts["ts"],
)
time.sleep(1)
for job, job_result in self.additional_results.items():
if len(job_result["failures"]):
for device, failures in job_result["failures"].items():
blocks = self.get_reply_blocks(
job,
job_result,
failures,
device,
text=f"Number of failures: {sum(job_result['failed'].values())}",
)
print("Sending the following reply")
print(json.dumps({"blocks": blocks}))
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"],
text=f"Results for {job}",
blocks=blocks,
thread_ts=self.thread_ts["ts"],
)
time.sleep(1)
def get_job_links():
run_id = os.environ["GITHUB_RUN_ID"]
url = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
result = requests.get(url).json()
jobs = {}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]})
pages_to_iterate_over = math.ceil((result["total_count"] - 100) / 100)
for i in range(pages_to_iterate_over):
result = requests.get(url + f"&page={i + 2}").json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]})
return jobs
except Exception as e:
print("Unknown error, could not fetch links.", e)
return {}
def retrieve_artifact(name: str, gpu: Optional[str]):
if gpu not in [None, "single", "multi"]:
raise ValueError(f"Invalid GPU for artifact. Passed GPU: `{gpu}`.")
if gpu is not None:
name = f"{gpu}-gpu-docker_{name}"
_artifact = {}
if os.path.exists(name):
files = os.listdir(name)
for file in files:
try:
with open(os.path.join(name, file)) as f:
_artifact[file.split(".")[0]] = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(name, file)}.") from e
return _artifact
def retrieve_available_artifacts():
class Artifact:
def __init__(self, name: str, single_gpu: bool = False, multi_gpu: bool = False):
self.name = name
self.single_gpu = single_gpu
self.multi_gpu = multi_gpu
self.paths = []
def __str__(self):
return self.name
def add_path(self, path: str, gpu: str = None):
self.paths.append({"name": self.name, "path": path, "gpu": gpu})
_available_artifacts: Dict[str, Artifact] = {}
directories = filter(os.path.isdir, os.listdir())
for directory in directories:
if directory.startswith("single-gpu-docker"):
artifact_name = directory[len("single-gpu-docker") + 1 :]
if artifact_name in _available_artifacts:
_available_artifacts[artifact_name].single_gpu = True
else:
_available_artifacts[artifact_name] = Artifact(artifact_name, single_gpu=True)
_available_artifacts[artifact_name].add_path(directory, gpu="single")
elif directory.startswith("multi-gpu-docker"):
artifact_name = directory[len("multi-gpu-docker") + 1 :]
if artifact_name in _available_artifacts:
_available_artifacts[artifact_name].multi_gpu = True
else:
_available_artifacts[artifact_name] = Artifact(artifact_name, multi_gpu=True)
_available_artifacts[artifact_name].add_path(directory, gpu="multi")
else:
artifact_name = directory
if artifact_name not in _available_artifacts:
_available_artifacts[artifact_name] = Artifact(artifact_name)
_available_artifacts[artifact_name].add_path(directory)
return _available_artifacts
if __name__ == "__main__":
arguments = sys.argv[1:][0]
try:
models = ast.literal_eval(arguments)
except SyntaxError:
Message.error_out()
raise ValueError("Errored out.")
github_actions_job_links = get_job_links()
available_artifacts = retrieve_available_artifacts()
modeling_categories = [
"PyTorch",
"TensorFlow",
"Flax",
"Tokenizers",
"Pipelines",
"Trainer",
"ONNX",
"Auto",
"Unclassified",
]
# This dict will contain all the information relative to each model:
# - Failures: the total, as well as the number of failures per-category defined above
# - Success: total
# - Time spent: as a comma-separated list of elapsed time
# - Failures: as a line-break separated list of errors
model_results = {
model: {
"failed": {m: {"unclassified": 0, "single": 0, "multi": 0} for m in modeling_categories},
"success": 0,
"time_spent": "",
"failures": {},
}
for model in models
if f"run_all_tests_gpu_{model}_test_reports" in available_artifacts
}
unclassified_model_failures = []
for model in model_results.keys():
for artifact_path in available_artifacts[f"run_all_tests_gpu_{model}_test_reports"].paths:
artifact = retrieve_artifact(artifact_path["name"], artifact_path["gpu"])
if "stats" in artifact:
# Link to the GitHub Action job
model_results[model]["job_link"] = github_actions_job_links.get(
f"Model tests ({model}, {artifact_path['gpu']}-gpu-docker)"
)
failed, success, time_spent = handle_test_results(artifact["stats"])
model_results[model]["success"] += success
model_results[model]["time_spent"] += time_spent[1:-1] + ", "
stacktraces = handle_stacktraces(artifact["failures_line"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
line = line.replace("FAILED ", "")
line = line.split()[0].replace("\n", "")
if artifact_path["gpu"] not in model_results[model]["failures"]:
model_results[model]["failures"][artifact_path["gpu"]] = ""
model_results[model]["failures"][
artifact_path["gpu"]
] += f"*{line}*\n_{stacktraces.pop(0)}_\n\n"
if re.search("_tf_", line):
model_results[model]["failed"]["TensorFlow"][artifact_path["gpu"]] += 1
elif re.search("_flax_", line):
model_results[model]["failed"]["Flax"][artifact_path["gpu"]] += 1
elif re.search("test_modeling", line):
model_results[model]["failed"]["PyTorch"][artifact_path["gpu"]] += 1
elif re.search("test_tokenization", line):
model_results[model]["failed"]["Tokenizers"][artifact_path["gpu"]] += 1
elif re.search("test_pipelines", line):
model_results[model]["failed"]["Pipelines"][artifact_path["gpu"]] += 1
elif re.search("test_trainer", line):
model_results[model]["failed"]["Trainer"][artifact_path["gpu"]] += 1
elif re.search("onnx", line):
model_results[model]["failed"]["ONNX"][artifact_path["gpu"]] += 1
elif re.search("auto", line):
model_results[model]["failed"]["Auto"][artifact_path["gpu"]] += 1
else:
model_results[model]["failed"]["Unclassified"][artifact_path["gpu"]] += 1
unclassified_model_failures.append(line)
# Additional runs
additional_files = {
"Examples directory": "run_examples_gpu",
"PyTorch pipelines": "run_tests_torch_pipeline_gpu",
"TensorFlow pipelines": "run_tests_tf_pipeline_gpu",
"Torch CUDA extension tests": "run_tests_torch_cuda_extensions_gpu_test_reports",
}
additional_results = {
key: {
"failed": {"unclassified": 0, "single": 0, "multi": 0},
"success": 0,
"time_spent": "",
"error": False,
"failures": {},
"job_link": github_actions_job_links.get(key),
}
for key in additional_files.keys()
}
for key in additional_results.keys():
# If a whole suite of test fails, the artifact isn't available.
if additional_files[key] not in available_artifacts:
additional_results[key]["error"] = True
continue
for artifact_path in available_artifacts[additional_files[key]].paths:
if artifact_path["gpu"] is not None:
additional_results[key]["job_link"] = github_actions_job_links.get(
f"{key} ({artifact_path['gpu']}-gpu-docker)"
)
artifact = retrieve_artifact(artifact_path["name"], artifact_path["gpu"])
stacktraces = handle_stacktraces(artifact["failures_line"])
failed, success, time_spent = handle_test_results(artifact["stats"])
additional_results[key]["failed"][artifact_path["gpu"] or "unclassified"] += failed
additional_results[key]["success"] += success
additional_results[key]["time_spent"] += time_spent[1:-1] + ", "
if len(artifact["errors"]):
additional_results[key]["error"] = True
if failed:
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
line = line.replace("FAILED ", "")
line = line.split()[0].replace("\n", "")
if artifact_path["gpu"] not in additional_results[key]["failures"]:
additional_results[key]["failures"][artifact_path["gpu"]] = ""
additional_results[key]["failures"][
artifact_path["gpu"]
] += f"*{line}*\n_{stacktraces.pop(0)}_\n\n"
message = Message("🤗 Results of the scheduled tests.", model_results, additional_results)
message.post()
message.post_reply()
| 27,052 | 36.730823 | 126 | py |
robust-transformers | robust-transformers-main/utils/check_repo.py | # coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import inspect
import os
import re
import warnings
from collections import OrderedDict
from difflib import get_close_matches
from pathlib import Path
from transformers import is_flax_available, is_tf_available, is_torch_available
from transformers.file_utils import ENV_VARS_TRUE_VALUES
from transformers.models.auto import get_values
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_repo.py
PATH_TO_TRANSFORMERS = "src/transformers"
PATH_TO_TESTS = "tests"
PATH_TO_DOC = "docs/source"
# Update this list with models that are supposed to be private.
PRIVATE_MODELS = [
"DPRSpanPredictor",
"RealmBertModel",
"T5Stack",
"TFDPRSpanPredictor",
]
# Update this list for models that are not tested with a comment explaining the reason it should not be.
# Being in this list is an exception and should **not** be the rule.
IGNORE_NON_TESTED = PRIVATE_MODELS.copy() + [
# models to ignore for not tested
"SegformerDecodeHead", # Building part of bigger (tested) model.
"PLBartEncoder", # Building part of bigger (tested) model.
"PLBartDecoder", # Building part of bigger (tested) model.
"PLBartDecoderWrapper", # Building part of bigger (tested) model.
"BigBirdPegasusEncoder", # Building part of bigger (tested) model.
"BigBirdPegasusDecoder", # Building part of bigger (tested) model.
"BigBirdPegasusDecoderWrapper", # Building part of bigger (tested) model.
"DetrEncoder", # Building part of bigger (tested) model.
"DetrDecoder", # Building part of bigger (tested) model.
"DetrDecoderWrapper", # Building part of bigger (tested) model.
"M2M100Encoder", # Building part of bigger (tested) model.
"M2M100Decoder", # Building part of bigger (tested) model.
"Speech2TextEncoder", # Building part of bigger (tested) model.
"Speech2TextDecoder", # Building part of bigger (tested) model.
"LEDEncoder", # Building part of bigger (tested) model.
"LEDDecoder", # Building part of bigger (tested) model.
"BartDecoderWrapper", # Building part of bigger (tested) model.
"BartEncoder", # Building part of bigger (tested) model.
"BertLMHeadModel", # Needs to be setup as decoder.
"BlenderbotSmallEncoder", # Building part of bigger (tested) model.
"BlenderbotSmallDecoderWrapper", # Building part of bigger (tested) model.
"BlenderbotEncoder", # Building part of bigger (tested) model.
"BlenderbotDecoderWrapper", # Building part of bigger (tested) model.
"MBartEncoder", # Building part of bigger (tested) model.
"MBartDecoderWrapper", # Building part of bigger (tested) model.
"MegatronBertLMHeadModel", # Building part of bigger (tested) model.
"MegatronBertEncoder", # Building part of bigger (tested) model.
"MegatronBertDecoder", # Building part of bigger (tested) model.
"MegatronBertDecoderWrapper", # Building part of bigger (tested) model.
"PegasusEncoder", # Building part of bigger (tested) model.
"PegasusDecoderWrapper", # Building part of bigger (tested) model.
"DPREncoder", # Building part of bigger (tested) model.
"ProphetNetDecoderWrapper", # Building part of bigger (tested) model.
"RealmBertModel", # Building part of bigger (tested) model.
"RealmReader", # Not regular model.
"RealmScorer", # Not regular model.
"RealmForOpenQA", # Not regular model.
"ReformerForMaskedLM", # Needs to be setup as decoder.
"Speech2Text2DecoderWrapper", # Building part of bigger (tested) model.
"TFDPREncoder", # Building part of bigger (tested) model.
"TFElectraMainLayer", # Building part of bigger (tested) model (should it be a TFPreTrainedModel ?)
"TFRobertaForMultipleChoice", # TODO: fix
"TrOCRDecoderWrapper", # Building part of bigger (tested) model.
"SeparableConv1D", # Building part of bigger (tested) model.
"FlaxBartForCausalLM", # Building part of bigger (tested) model.
]
# Update this list with test files that don't have a tester with a `all_model_classes` variable and which don't
# trigger the common tests.
TEST_FILES_WITH_NO_COMMON_TESTS = [
"camembert/test_modeling_camembert.py",
"mt5/test_modeling_flax_mt5.py",
"mbart/test_modeling_mbart.py",
"mt5/test_modeling_mt5.py",
"pegasus/test_modeling_pegasus.py",
"camembert/test_modeling_tf_camembert.py",
"mt5/test_modeling_tf_mt5.py",
"xlm_roberta/test_modeling_tf_xlm_roberta.py",
"xlm_roberta/test_modeling_flax_xlm_roberta.py",
"xlm_prophetnet/test_modeling_xlm_prophetnet.py",
"xlm_roberta/test_modeling_xlm_roberta.py",
"vision_text_dual_encoder/test_modeling_vision_text_dual_encoder.py",
"vision_text_dual_encoder/test_modeling_flax_vision_text_dual_encoder.py",
]
# Update this list for models that are not in any of the auto MODEL_XXX_MAPPING. Being in this list is an exception and
# should **not** be the rule.
IGNORE_NON_AUTO_CONFIGURED = PRIVATE_MODELS.copy() + [
# models to ignore for model xxx mapping
"ViltForQuestionAnswering",
"ViltForImagesAndTextClassification",
"ViltForImageAndTextRetrieval",
"ViltForMaskedLM",
"XGLMEncoder",
"XGLMDecoder",
"XGLMDecoderWrapper",
"PerceiverForMultimodalAutoencoding",
"PerceiverForOpticalFlow",
"SegformerDecodeHead",
"FlaxBeitForMaskedImageModeling",
"PLBartEncoder",
"PLBartDecoder",
"PLBartDecoderWrapper",
"BeitForMaskedImageModeling",
"CLIPTextModel",
"CLIPVisionModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
"FlaxCLIPTextModel",
"FlaxCLIPVisionModel",
"FlaxWav2Vec2ForCTC",
"DetrForSegmentation",
"DPRReader",
"FlaubertForQuestionAnswering",
"GPT2DoubleHeadsModel",
"LukeForMaskedLM",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"OpenAIGPTDoubleHeadsModel",
"RagModel",
"RagSequenceForGeneration",
"RagTokenForGeneration",
"RealmEmbedder",
"RealmForOpenQA",
"RealmScorer",
"RealmReader",
"TFDPRReader",
"TFGPT2DoubleHeadsModel",
"TFOpenAIGPTDoubleHeadsModel",
"TFRagModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
"Wav2Vec2ForCTC",
"HubertForCTC",
"SEWForCTC",
"SEWDForCTC",
"XLMForQuestionAnswering",
"XLNetForQuestionAnswering",
"SeparableConv1D",
"VisualBertForRegionToPhraseAlignment",
"VisualBertForVisualReasoning",
"VisualBertForQuestionAnswering",
"VisualBertForMultipleChoice",
"TFWav2Vec2ForCTC",
"TFHubertForCTC",
"MaskFormerForInstanceSegmentation",
]
# Update this list for models that have multiple model types for the same
# model doc
MODEL_TYPE_TO_DOC_MAPPING = OrderedDict(
[
("data2vec-text", "data2vec"),
("data2vec-audio", "data2vec"),
]
)
# This is to make sure the transformers module imported is the one in the repo.
spec = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
transformers = spec.loader.load_module()
def check_model_list():
"""Check the model list inside the transformers library."""
# Get the models from the directory structure of `src/transformers/models/`
models_dir = os.path.join(PATH_TO_TRANSFORMERS, "models")
_models = []
for model in os.listdir(models_dir):
model_dir = os.path.join(models_dir, model)
if os.path.isdir(model_dir) and "__init__.py" in os.listdir(model_dir):
_models.append(model)
# Get the models from the directory structure of `src/transformers/models/`
models = [model for model in dir(transformers.models) if not model.startswith("__")]
missing_models = sorted(list(set(_models).difference(models)))
if missing_models:
raise Exception(
f"The following models should be included in {models_dir}/__init__.py: {','.join(missing_models)}."
)
# If some modeling modules should be ignored for all checks, they should be added in the nested list
# _ignore_modules of this function.
def get_model_modules():
"""Get the model modules inside the transformers library."""
_ignore_modules = [
"modeling_auto",
"modeling_encoder_decoder",
"modeling_marian",
"modeling_mmbt",
"modeling_outputs",
"modeling_retribert",
"modeling_utils",
"modeling_flax_auto",
"modeling_flax_encoder_decoder",
"modeling_flax_utils",
"modeling_speech_encoder_decoder",
"modeling_flax_speech_encoder_decoder",
"modeling_flax_vision_encoder_decoder",
"modeling_transfo_xl_utilities",
"modeling_tf_auto",
"modeling_tf_encoder_decoder",
"modeling_tf_outputs",
"modeling_tf_pytorch_utils",
"modeling_tf_utils",
"modeling_tf_transfo_xl_utilities",
"modeling_tf_vision_encoder_decoder",
"modeling_vision_encoder_decoder",
]
modules = []
for model in dir(transformers.models):
# There are some magic dunder attributes in the dir, we ignore them
if not model.startswith("__"):
model_module = getattr(transformers.models, model)
for submodule in dir(model_module):
if submodule.startswith("modeling") and submodule not in _ignore_modules:
modeling_module = getattr(model_module, submodule)
if inspect.ismodule(modeling_module):
modules.append(modeling_module)
return modules
def get_models(module, include_pretrained=False):
"""Get the objects in module that are models."""
models = []
model_classes = (transformers.PreTrainedModel, transformers.TFPreTrainedModel, transformers.FlaxPreTrainedModel)
for attr_name in dir(module):
if not include_pretrained and ("Pretrained" in attr_name or "PreTrained" in attr_name):
continue
attr = getattr(module, attr_name)
if isinstance(attr, type) and issubclass(attr, model_classes) and attr.__module__ == module.__name__:
models.append((attr_name, attr))
return models
def is_a_private_model(model):
"""Returns True if the model should not be in the main init."""
if model in PRIVATE_MODELS:
return True
# Wrapper, Encoder and Decoder are all privates
if model.endswith("Wrapper"):
return True
if model.endswith("Encoder"):
return True
if model.endswith("Decoder"):
return True
return False
def check_models_are_in_init():
"""Checks all models defined in the library are in the main init."""
models_not_in_init = []
dir_transformers = dir(transformers)
for module in get_model_modules():
models_not_in_init += [
model[0] for model in get_models(module, include_pretrained=True) if model[0] not in dir_transformers
]
# Remove private models
models_not_in_init = [model for model in models_not_in_init if not is_a_private_model(model)]
if len(models_not_in_init) > 0:
raise Exception(f"The following models should be in the main init: {','.join(models_not_in_init)}.")
# If some test_modeling files should be ignored when checking models are all tested, they should be added in the
# nested list _ignore_files of this function.
def get_model_test_files():
"""Get the model test files."""
_ignore_files = [
"test_modeling_common",
"test_modeling_encoder_decoder",
"test_modeling_flax_encoder_decoder",
"test_modeling_flax_speech_encoder_decoder",
"test_modeling_marian",
"test_modeling_tf_common",
"test_modeling_tf_encoder_decoder",
]
test_files = []
for file_or_dir in os.listdir(PATH_TO_TESTS):
path = os.path.join(PATH_TO_TESTS, file_or_dir)
if os.path.isdir(path):
filenames = [os.path.join(file_or_dir, file) for file in os.listdir(path)]
else:
filenames = [file_or_dir]
for filename in filenames:
if (
os.path.isfile(os.path.join(PATH_TO_TESTS, filename))
and "test_modeling" in filename
and not os.path.splitext(filename)[0] in _ignore_files
):
test_files.append(filename)
return test_files
# This is a bit hacky but I didn't find a way to import the test_file as a module and read inside the tester class
# for the all_model_classes variable.
def find_tested_models(test_file):
"""Parse the content of test_file to detect what's in all_model_classes"""
# This is a bit hacky but I didn't find a way to import the test_file as a module and read inside the class
with open(os.path.join(PATH_TO_TESTS, test_file), "r", encoding="utf-8", newline="\n") as f:
content = f.read()
all_models = re.findall(r"all_model_classes\s+=\s+\(\s*\(([^\)]*)\)", content)
# Check with one less parenthesis as well
all_models += re.findall(r"all_model_classes\s+=\s+\(([^\)]*)\)", content)
if len(all_models) > 0:
model_tested = []
for entry in all_models:
for line in entry.split(","):
name = line.strip()
if len(name) > 0:
model_tested.append(name)
return model_tested
def check_models_are_tested(module, test_file):
"""Check models defined in module are tested in test_file."""
# XxxPreTrainedModel are not tested
defined_models = get_models(module)
tested_models = find_tested_models(test_file)
if tested_models is None:
if test_file in TEST_FILES_WITH_NO_COMMON_TESTS:
return
return [
f"{test_file} should define `all_model_classes` to apply common tests to the models it tests. "
+ "If this intentional, add the test filename to `TEST_FILES_WITH_NO_COMMON_TESTS` in the file "
+ "`utils/check_repo.py`."
]
failures = []
for model_name, _ in defined_models:
if model_name not in tested_models and model_name not in IGNORE_NON_TESTED:
failures.append(
f"{model_name} is defined in {module.__name__} but is not tested in "
+ f"{os.path.join(PATH_TO_TESTS, test_file)}. Add it to the all_model_classes in that file."
+ "If common tests should not applied to that model, add its name to `IGNORE_NON_TESTED`"
+ "in the file `utils/check_repo.py`."
)
return failures
def check_all_models_are_tested():
"""Check all models are properly tested."""
modules = get_model_modules()
test_files = get_model_test_files()
failures = []
for module in modules:
test_file = [file for file in test_files if f"test_{module.__name__.split('.')[-1]}.py" in file]
if len(test_file) == 0:
failures.append(f"{module.__name__} does not have its corresponding test file {test_file}.")
elif len(test_file) > 1:
failures.append(f"{module.__name__} has several test files: {test_file}.")
else:
test_file = test_file[0]
new_failures = check_models_are_tested(module, test_file)
if new_failures is not None:
failures += new_failures
if len(failures) > 0:
raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures))
def get_all_auto_configured_models():
"""Return the list of all models in at least one auto class."""
result = set() # To avoid duplicates we concatenate all model classes in a set.
if is_torch_available():
for attr_name in dir(transformers.models.auto.modeling_auto):
if attr_name.startswith("MODEL_") and attr_name.endswith("MAPPING_NAMES"):
result = result | set(get_values(getattr(transformers.models.auto.modeling_auto, attr_name)))
if is_tf_available():
for attr_name in dir(transformers.models.auto.modeling_tf_auto):
if attr_name.startswith("TF_MODEL_") and attr_name.endswith("MAPPING_NAMES"):
result = result | set(get_values(getattr(transformers.models.auto.modeling_tf_auto, attr_name)))
if is_flax_available():
for attr_name in dir(transformers.models.auto.modeling_flax_auto):
if attr_name.startswith("FLAX_MODEL_") and attr_name.endswith("MAPPING_NAMES"):
result = result | set(get_values(getattr(transformers.models.auto.modeling_flax_auto, attr_name)))
return [cls for cls in result]
def ignore_unautoclassed(model_name):
"""Rules to determine if `name` should be in an auto class."""
# Special white list
if model_name in IGNORE_NON_AUTO_CONFIGURED:
return True
# Encoder and Decoder should be ignored
if "Encoder" in model_name or "Decoder" in model_name:
return True
return False
def check_models_are_auto_configured(module, all_auto_models):
"""Check models defined in module are each in an auto class."""
defined_models = get_models(module)
failures = []
for model_name, _ in defined_models:
if model_name not in all_auto_models and not ignore_unautoclassed(model_name):
failures.append(
f"{model_name} is defined in {module.__name__} but is not present in any of the auto mapping. "
"If that is intended behavior, add its name to `IGNORE_NON_AUTO_CONFIGURED` in the file "
"`utils/check_repo.py`."
)
return failures
def check_all_models_are_auto_configured():
"""Check all models are each in an auto class."""
missing_backends = []
if not is_torch_available():
missing_backends.append("PyTorch")
if not is_tf_available():
missing_backends.append("TensorFlow")
if not is_flax_available():
missing_backends.append("Flax")
if len(missing_backends) > 0:
missing = ", ".join(missing_backends)
if os.getenv("TRANSFORMERS_IS_CI", "").upper() in ENV_VARS_TRUE_VALUES:
raise Exception(
"Full quality checks require all backends to be installed (with `pip install -e .[dev]` in the "
f"Transformers repo, the following are missing: {missing}."
)
else:
warnings.warn(
"Full quality checks require all backends to be installed (with `pip install -e .[dev]` in the "
f"Transformers repo, the following are missing: {missing}. While it's probably fine as long as you "
"didn't make any change in one of those backends modeling files, you should probably execute the "
"command above to be on the safe side."
)
modules = get_model_modules()
all_auto_models = get_all_auto_configured_models()
failures = []
for module in modules:
new_failures = check_models_are_auto_configured(module, all_auto_models)
if new_failures is not None:
failures += new_failures
if len(failures) > 0:
raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures))
_re_decorator = re.compile(r"^\s*@(\S+)\s+$")
def check_decorator_order(filename):
"""Check that in the test file `filename` the slow decorator is always last."""
with open(filename, "r", encoding="utf-8", newline="\n") as f:
lines = f.readlines()
decorator_before = None
errors = []
for i, line in enumerate(lines):
search = _re_decorator.search(line)
if search is not None:
decorator_name = search.groups()[0]
if decorator_before is not None and decorator_name.startswith("parameterized"):
errors.append(i)
decorator_before = decorator_name
elif decorator_before is not None:
decorator_before = None
return errors
def check_all_decorator_order():
"""Check that in all test files, the slow decorator is always last."""
errors = []
for fname in os.listdir(PATH_TO_TESTS):
if fname.endswith(".py"):
filename = os.path.join(PATH_TO_TESTS, fname)
new_errors = check_decorator_order(filename)
errors += [f"- {filename}, line {i}" for i in new_errors]
if len(errors) > 0:
msg = "\n".join(errors)
raise ValueError(
f"The parameterized decorator (and its variants) should always be first, but this is not the case in the following files:\n{msg}"
)
def find_all_documented_objects():
"""Parse the content of all doc files to detect which classes and functions it documents"""
documented_obj = []
for doc_file in Path(PATH_TO_DOC).glob("**/*.rst"):
with open(doc_file, "r", encoding="utf-8", newline="\n") as f:
content = f.read()
raw_doc_objs = re.findall(r"(?:autoclass|autofunction):: transformers.(\S+)\s+", content)
documented_obj += [obj.split(".")[-1] for obj in raw_doc_objs]
for doc_file in Path(PATH_TO_DOC).glob("**/*.mdx"):
with open(doc_file, "r", encoding="utf-8", newline="\n") as f:
content = f.read()
raw_doc_objs = re.findall("\[\[autodoc\]\]\s+(\S+)\s+", content)
documented_obj += [obj.split(".")[-1] for obj in raw_doc_objs]
return documented_obj
# One good reason for not being documented is to be deprecated. Put in this list deprecated objects.
DEPRECATED_OBJECTS = [
"AutoModelWithLMHead",
"BartPretrainedModel",
"DataCollator",
"DataCollatorForSOP",
"GlueDataset",
"GlueDataTrainingArguments",
"LineByLineTextDataset",
"LineByLineWithRefDataset",
"LineByLineWithSOPTextDataset",
"PretrainedBartModel",
"PretrainedFSMTModel",
"SingleSentenceClassificationProcessor",
"SquadDataTrainingArguments",
"SquadDataset",
"SquadExample",
"SquadFeatures",
"SquadV1Processor",
"SquadV2Processor",
"TFAutoModelWithLMHead",
"TFBartPretrainedModel",
"TextDataset",
"TextDatasetForNextSentencePrediction",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2Tokenizer",
"glue_compute_metrics",
"glue_convert_examples_to_features",
"glue_output_modes",
"glue_processors",
"glue_tasks_num_labels",
"squad_convert_examples_to_features",
"xnli_compute_metrics",
"xnli_output_modes",
"xnli_processors",
"xnli_tasks_num_labels",
"TFTrainer",
"TFTrainingArguments",
]
# Exceptionally, some objects should not be documented after all rules passed.
# ONLY PUT SOMETHING IN THIS LIST AS A LAST RESORT!
UNDOCUMENTED_OBJECTS = [
"AddedToken", # This is a tokenizers class.
"BasicTokenizer", # Internal, should never have been in the main init.
"CharacterTokenizer", # Internal, should never have been in the main init.
"DPRPretrainedReader", # Like an Encoder.
"DummyObject", # Just picked by mistake sometimes.
"MecabTokenizer", # Internal, should never have been in the main init.
"ModelCard", # Internal type.
"SqueezeBertModule", # Internal building block (should have been called SqueezeBertLayer)
"TFDPRPretrainedReader", # Like an Encoder.
"TransfoXLCorpus", # Internal type.
"WordpieceTokenizer", # Internal, should never have been in the main init.
"absl", # External module
"add_end_docstrings", # Internal, should never have been in the main init.
"add_start_docstrings", # Internal, should never have been in the main init.
"cached_path", # Internal used for downloading models.
"convert_tf_weight_name_to_pt_weight_name", # Internal used to convert model weights
"logger", # Internal logger
"logging", # External module
"requires_backends", # Internal function
]
# This list should be empty. Objects in it should get their own doc page.
SHOULD_HAVE_THEIR_OWN_PAGE = [
# Benchmarks
"PyTorchBenchmark",
"PyTorchBenchmarkArguments",
"TensorFlowBenchmark",
"TensorFlowBenchmarkArguments",
]
def ignore_undocumented(name):
"""Rules to determine if `name` should be undocumented."""
# NOT DOCUMENTED ON PURPOSE.
# Constants uppercase are not documented.
if name.isupper():
return True
# PreTrainedModels / Encoders / Decoders / Layers / Embeddings / Attention are not documented.
if (
name.endswith("PreTrainedModel")
or name.endswith("Decoder")
or name.endswith("Encoder")
or name.endswith("Layer")
or name.endswith("Embeddings")
or name.endswith("Attention")
):
return True
# Submodules are not documented.
if os.path.isdir(os.path.join(PATH_TO_TRANSFORMERS, name)) or os.path.isfile(
os.path.join(PATH_TO_TRANSFORMERS, f"{name}.py")
):
return True
# All load functions are not documented.
if name.startswith("load_tf") or name.startswith("load_pytorch"):
return True
# is_xxx_available functions are not documented.
if name.startswith("is_") and name.endswith("_available"):
return True
# Deprecated objects are not documented.
if name in DEPRECATED_OBJECTS or name in UNDOCUMENTED_OBJECTS:
return True
# MMBT model does not really work.
if name.startswith("MMBT"):
return True
if name in SHOULD_HAVE_THEIR_OWN_PAGE:
return True
return False
def check_all_objects_are_documented():
"""Check all models are properly documented."""
documented_objs = find_all_documented_objects()
modules = transformers._modules
objects = [c for c in dir(transformers) if c not in modules and not c.startswith("_")]
undocumented_objs = [c for c in objects if c not in documented_objs and not ignore_undocumented(c)]
if len(undocumented_objs) > 0:
raise Exception(
"The following objects are in the public init so should be documented:\n - "
+ "\n - ".join(undocumented_objs)
)
check_docstrings_are_in_md()
check_model_type_doc_match()
def check_model_type_doc_match():
"""Check all doc pages have a corresponding model type."""
model_doc_folder = Path(PATH_TO_DOC) / "model_doc"
model_docs = [m.stem for m in model_doc_folder.glob("*.mdx")]
model_types = list(transformers.models.auto.configuration_auto.MODEL_NAMES_MAPPING.keys())
model_types = [MODEL_TYPE_TO_DOC_MAPPING[m] if m in MODEL_TYPE_TO_DOC_MAPPING else m for m in model_types]
errors = []
for m in model_docs:
if m not in model_types and m != "auto":
close_matches = get_close_matches(m, model_types)
error_message = f"{m} is not a proper model identifier."
if len(close_matches) > 0:
close_matches = "/".join(close_matches)
error_message += f" Did you mean {close_matches}?"
errors.append(error_message)
if len(errors) > 0:
raise ValueError(
"Some model doc pages do not match any existing model type:\n"
+ "\n".join(errors)
+ "\nYou can add any missing model type to the `MODEL_NAMES_MAPPING` constant in "
"models/auto/configuration_auto.py."
)
# Re pattern to catch :obj:`xx`, :class:`xx`, :func:`xx` or :meth:`xx`.
_re_rst_special_words = re.compile(r":(?:obj|func|class|meth):`([^`]+)`")
# Re pattern to catch things between double backquotes.
_re_double_backquotes = re.compile(r"(^|[^`])``([^`]+)``([^`]|$)")
# Re pattern to catch example introduction.
_re_rst_example = re.compile(r"^\s*Example.*::\s*$", flags=re.MULTILINE)
def is_rst_docstring(docstring):
"""
Returns `True` if `docstring` is written in rst.
"""
if _re_rst_special_words.search(docstring) is not None:
return True
if _re_double_backquotes.search(docstring) is not None:
return True
if _re_rst_example.search(docstring) is not None:
return True
return False
def check_docstrings_are_in_md():
"""Check all docstrings are in md"""
files_with_rst = []
for file in Path(PATH_TO_TRANSFORMERS).glob("**/*.py"):
with open(file, "r") as f:
code = f.read()
docstrings = code.split('"""')
for idx, docstring in enumerate(docstrings):
if idx % 2 == 0 or not is_rst_docstring(docstring):
continue
files_with_rst.append(file)
break
if len(files_with_rst) > 0:
raise ValueError(
"The following files have docstrings written in rst:\n"
+ "\n".join([f"- {f}" for f in files_with_rst])
+ "\nTo fix this run `doc-builder convert path_to_py_file` after installing `doc-builder`\n"
"(`pip install git+https://github.com/huggingface/doc-builder`)"
)
def check_repo_quality():
"""Check all models are properly tested and documented."""
print("Checking all models are included.")
check_model_list()
print("Checking all models are public.")
check_models_are_in_init()
print("Checking all models are properly tested.")
check_all_decorator_order()
check_all_models_are_tested()
print("Checking all objects are properly documented.")
check_all_objects_are_documented()
print("Checking all models are in at least one auto class.")
check_all_models_are_auto_configured()
if __name__ == "__main__":
check_repo_quality()
| 30,085 | 39.656757 | 141 | py |
robust-transformers | robust-transformers-main/utils/update_metadata.py | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import importlib.util
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import Repository
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
TRANSFORMERS_PATH = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
spec = importlib.util.spec_from_file_location(
"transformers",
os.path.join(TRANSFORMERS_PATH, "__init__.py"),
submodule_search_locations=[TRANSFORMERS_PATH],
)
transformers_module = spec.loader.load_module()
# Regexes that match TF/Flax/PT model names.
_re_tf_models = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
_re_flax_models = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_re_pt_models = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
PIPELINE_TAGS_AND_AUTO_MODELS = [
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
]
# Thanks to https://stackoverflow.com/questions/29916065/how-to-do-camelcase-split-in-python
def camel_case_split(identifier):
"Split a camelcased `identifier` into words."
matches = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)", identifier)
return [m.group(0) for m in matches]
def get_frameworks_table():
"""
Generates a dataframe containing the supported auto classes for each model type, using the content of the auto
modules.
"""
# Dictionary model names to config.
config_maping_names = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
model_prefix_to_model_type = {
config.replace("Config", ""): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
pt_models = collections.defaultdict(bool)
tf_models = collections.defaultdict(bool)
flax_models = collections.defaultdict(bool)
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(transformers_module):
lookup_dict = None
if _re_tf_models.match(attr_name) is not None:
lookup_dict = tf_models
attr_name = _re_tf_models.match(attr_name).groups()[0]
elif _re_flax_models.match(attr_name) is not None:
lookup_dict = flax_models
attr_name = _re_flax_models.match(attr_name).groups()[0]
elif _re_pt_models.match(attr_name) is not None:
lookup_dict = pt_models
attr_name = _re_pt_models.match(attr_name).groups()[0]
if lookup_dict is not None:
while len(attr_name) > 0:
if attr_name in model_prefix_to_model_type:
lookup_dict[model_prefix_to_model_type[attr_name]] = True
break
# Try again after removing the last word in the name
attr_name = "".join(camel_case_split(attr_name)[:-1])
all_models = set(list(pt_models.keys()) + list(tf_models.keys()) + list(flax_models.keys()))
all_models = list(all_models)
all_models.sort()
data = {"model_type": all_models}
data["pytorch"] = [pt_models[t] for t in all_models]
data["tensorflow"] = [tf_models[t] for t in all_models]
data["flax"] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
processors = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
processors[t] = "AutoProcessor"
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
processors[t] = "AutoTokenizer"
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
processors[t] = "AutoFeatureExtractor"
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
processors[t] = "AutoTokenizer"
data["processor"] = [processors[t] for t in all_models]
return pd.DataFrame(data)
def update_pipeline_and_auto_class_table(table):
"""
Update the table of model class to (pipeline_tag, auto_class) without removing old keys if they don't exist
anymore.
"""
auto_modules = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
model_mappings = [model_mapping, f"TF_{model_mapping}", f"FLAX_{model_mapping}"]
auto_classes = [auto_class, f"TF_{auto_class}", f"Flax_{auto_class}"]
# Loop through all three frameworks
for module, cls, mapping in zip(auto_modules, auto_classes, model_mappings):
# The type of pipeline may not exist in this framework
if not hasattr(module, mapping):
continue
# First extract all model_names
model_names = []
for name in getattr(module, mapping).values():
if isinstance(name, str):
model_names.append(name)
else:
model_names.extend(list(name))
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names})
return table
def update_metadata(token, commit_sha):
"""
Update the metada for the Transformers repo.
"""
with tempfile.TemporaryDirectory() as tmp_dir:
repo = Repository(
tmp_dir, clone_from="huggingface/transformers-metadata", repo_type="dataset", use_auth_token=token
)
frameworks_table = get_frameworks_table()
frameworks_dataset = Dataset.from_pandas(frameworks_table)
frameworks_dataset.to_json(os.path.join(tmp_dir, "frameworks.json"))
tags_dataset = Dataset.from_json(os.path.join(tmp_dir, "pipeline_tags.json"))
table = {
tags_dataset[i]["model_class"]: (tags_dataset[i]["pipeline_tag"], tags_dataset[i]["auto_class"])
for i in range(len(tags_dataset))
}
table = update_pipeline_and_auto_class_table(table)
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
model_classes = sorted(list(table.keys()))
tags_table = pd.DataFrame(
{
"model_class": model_classes,
"pipeline_tag": [table[m][0] for m in model_classes],
"auto_class": [table[m][1] for m in model_classes],
}
)
tags_dataset = Dataset.from_pandas(tags_table)
tags_dataset.to_json(os.path.join(tmp_dir, "pipeline_tags.json"))
if repo.is_repo_clean():
print("Nothing to commit!")
else:
if commit_sha is not None:
commit_message = (
f"Update with commit {commit_sha}\n\nSee: "
f"https://github.com/huggingface/transformers/commit/{commit_sha}"
)
else:
commit_message = "Update"
repo.push_to_hub(commit_message)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
args = parser.parse_args()
update_metadata(args.token, args.commit_sha)
| 10,573 | 42.875519 | 117 | py |
robust-transformers | robust-transformers-main/utils/check_dummies.py | # coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
PATH_TO_TRANSFORMERS = "src/transformers"
# Matches is_xxx_available()
_re_backend = re.compile(r"is\_([a-z_]*)_available()")
# Matches from xxx import bla
_re_single_line_import = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
_re_test_backend = re.compile(r"^\s+if\s+is\_[a-z]*\_available\(\)")
DUMMY_CONSTANT = """
{0} = None
"""
DUMMY_CLASS = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
"""
DUMMY_FUNCTION = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def find_backend(line):
"""Find one (or multiple) backend in a code line of the init."""
if _re_test_backend.search(line) is None:
return None
backends = [b[0] for b in _re_backend.findall(line)]
backends.sort()
return "_and_".join(backends)
def read_init():
"""Read the init and extracts PyTorch, TensorFlow, SentencePiece and Tokenizers objects."""
with open(os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"), "r", encoding="utf-8", newline="\n") as f:
lines = f.readlines()
# Get to the point we do the actual imports for type checking
line_index = 0
while not lines[line_index].startswith("if TYPE_CHECKING"):
line_index += 1
backend_specific_objects = {}
# Go through the end of the file
while line_index < len(lines):
# If the line is an if is_backend_available, we grab all objects associated.
backend = find_backend(lines[line_index])
if backend is not None:
line_index += 1
objects = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(" " * 8):
line = lines[line_index]
single_line_import_search = _re_single_line_import.search(line)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", "))
elif line.startswith(" " * 12):
objects.append(line[12:-2])
line_index += 1
backend_specific_objects[backend] = objects
else:
line_index += 1
return backend_specific_objects
def create_dummy_object(name, backend_name):
"""Create the code for the dummy object corresponding to `name`."""
if name.isupper():
return DUMMY_CONSTANT.format(name)
elif name.islower():
return DUMMY_FUNCTION.format(name, backend_name)
else:
return DUMMY_CLASS.format(name, backend_name)
def create_dummy_files():
"""Create the content of the dummy files."""
backend_specific_objects = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
dummy_files = {}
for backend, objects in backend_specific_objects.items():
backend_name = "[" + ", ".join(f'"{b}"' for b in backend.split("_and_")) + "]"
dummy_file = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "# flake8: noqa\n"
dummy_file += "from ..file_utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(o, backend_name) for o in objects])
dummy_files[backend] = dummy_file
return dummy_files
def check_dummies(overwrite=False):
"""Check if the dummy files are up to date and maybe `overwrite` with the right content."""
dummy_files = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
short_names = {"torch": "pt"}
# Locate actual dummy modules and read their content.
path = os.path.join(PATH_TO_TRANSFORMERS, "utils")
dummy_file_paths = {
backend: os.path.join(path, f"dummy_{short_names.get(backend, backend)}_objects.py")
for backend in dummy_files.keys()
}
actual_dummies = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(file_path):
with open(file_path, "r", encoding="utf-8", newline="\n") as f:
actual_dummies[backend] = f.read()
else:
actual_dummies[backend] = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f"Updating transformers.utils.dummy_{short_names.get(backend, backend)}_objects.py as the main "
"__init__ has new objects."
)
with open(dummy_file_paths[backend], "w", encoding="utf-8", newline="\n") as f:
f.write(dummy_files[backend])
else:
raise ValueError(
"The main __init__ has objects that are not present in "
f"transformers.utils.dummy_{short_names.get(backend, backend)}_objects.py. Run `make fix-copies` "
"to fix this."
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
args = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 6,086 | 35.449102 | 118 | py |
robust-transformers | robust-transformers-main/utils/print_env_pt.py | #!/usr/bin/env python3
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
| 1,006 | 33.724138 | 74 | py |
robust-transformers | robust-transformers-main/utils/check_table.py | # coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import importlib.util
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
TRANSFORMERS_PATH = "src/transformers"
PATH_TO_DOCS = "docs/source"
REPO_PATH = "."
def _find_text_in_file(filename, start_prompt, end_prompt):
"""
Find the text in `filename` between a line beginning with `start_prompt` and before `end_prompt`, removing empty
lines.
"""
with open(filename, "r", encoding="utf-8", newline="\n") as f:
lines = f.readlines()
# Find the start prompt.
start_index = 0
while not lines[start_index].startswith(start_prompt):
start_index += 1
start_index += 1
end_index = start_index
while not lines[end_index].startswith(end_prompt):
end_index += 1
end_index -= 1
while len(lines[start_index]) <= 1:
start_index += 1
while len(lines[end_index]) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index]), start_index, end_index, lines
# Add here suffixes that are used to identify models, seperated by |
ALLOWED_MODEL_SUFFIXES = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
_re_tf_models = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
_re_flax_models = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_re_pt_models = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
spec = importlib.util.spec_from_file_location(
"transformers",
os.path.join(TRANSFORMERS_PATH, "__init__.py"),
submodule_search_locations=[TRANSFORMERS_PATH],
)
transformers_module = spec.loader.load_module()
# Thanks to https://stackoverflow.com/questions/29916065/how-to-do-camelcase-split-in-python
def camel_case_split(identifier):
"Split a camelcased `identifier` into words."
matches = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)", identifier)
return [m.group(0) for m in matches]
def _center_text(text, width):
text_length = 2 if text == "✅" or text == "❌" else len(text)
left_indent = (width - text_length) // 2
right_indent = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def get_model_table_from_auto_modules():
"""Generates an up-to-date model table from the content of the auto modules."""
# Dictionary model names to config.
config_maping_names = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
model_name_to_config = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
model_name_to_prefix = {name: config.replace("Config", "") for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
slow_tokenizers = collections.defaultdict(bool)
fast_tokenizers = collections.defaultdict(bool)
pt_models = collections.defaultdict(bool)
tf_models = collections.defaultdict(bool)
flax_models = collections.defaultdict(bool)
# Let's lookup through all transformers object (once).
for attr_name in dir(transformers_module):
lookup_dict = None
if attr_name.endswith("Tokenizer"):
lookup_dict = slow_tokenizers
attr_name = attr_name[:-9]
elif attr_name.endswith("TokenizerFast"):
lookup_dict = fast_tokenizers
attr_name = attr_name[:-13]
elif _re_tf_models.match(attr_name) is not None:
lookup_dict = tf_models
attr_name = _re_tf_models.match(attr_name).groups()[0]
elif _re_flax_models.match(attr_name) is not None:
lookup_dict = flax_models
attr_name = _re_flax_models.match(attr_name).groups()[0]
elif _re_pt_models.match(attr_name) is not None:
lookup_dict = pt_models
attr_name = _re_pt_models.match(attr_name).groups()[0]
if lookup_dict is not None:
while len(attr_name) > 0:
if attr_name in model_name_to_prefix.values():
lookup_dict[attr_name] = True
break
# Try again after removing the last word in the name
attr_name = "".join(camel_case_split(attr_name)[:-1])
# Let's build that table!
model_names = list(model_name_to_config.keys())
model_names.sort(key=str.lower)
columns = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
widths = [len(c) + 2 for c in columns]
widths[0] = max([len(name) for name in model_names]) + 2
# Build the table per se
table = "|" + "|".join([_center_text(c, w) for c, w in zip(columns, widths)]) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths]) + "|\n"
check = {True: "✅", False: "❌"}
for name in model_names:
prefix = model_name_to_prefix[name]
line = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(l, w) for l, w in zip(line, widths)]) + "|\n"
return table
def check_model_table(overwrite=False):
"""Check the model table in the index.rst is consistent with the state of the lib and maybe `overwrite`."""
current_table, start_index, end_index, lines = _find_text_in_file(
filename=os.path.join(PATH_TO_DOCS, "index.mdx"),
start_prompt="<!--This table is updated automatically from the auto modules",
end_prompt="<!-- End table-->",
)
new_table = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(PATH_TO_DOCS, "index.mdx"), "w", encoding="utf-8", newline="\n") as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:])
else:
raise ValueError(
"The model table in the `index.mdx` has not been updated. Run `make fix-copies` to fix this."
)
def has_onnx(model_type):
"""
Returns whether `model_type` is supported by ONNX (by checking if there is an ONNX config) or not.
"""
config_mapping = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING
if model_type not in config_mapping:
return False
config = config_mapping[model_type]
config_module = config.__module__
module = transformers_module
for part in config_module.split(".")[1:]:
module = getattr(module, part)
config_name = config.__name__
onnx_config_name = config_name.replace("Config", "OnnxConfig")
return hasattr(module, onnx_config_name)
def get_onnx_model_list():
"""
Return the list of models supporting ONNX.
"""
config_mapping = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING
model_names = config_mapping = transformers_module.models.auto.configuration_auto.MODEL_NAMES_MAPPING
onnx_model_types = [model_type for model_type in config_mapping.keys() if has_onnx(model_type)]
onnx_model_names = [model_names[model_type] for model_type in onnx_model_types]
onnx_model_names.sort(key=lambda x: x.upper())
return "\n".join([f"- {name}" for name in onnx_model_names]) + "\n"
def check_onnx_model_list(overwrite=False):
"""Check the model list in the serialization.mdx is consistent with the state of the lib and maybe `overwrite`."""
current_list, start_index, end_index, lines = _find_text_in_file(
filename=os.path.join(PATH_TO_DOCS, "serialization.mdx"),
start_prompt="<!--This table is automatically generated by `make fix-copies`, do not fill manually!-->",
end_prompt="In the next two sections, we'll show you how to:",
)
new_list = get_onnx_model_list()
if current_list != new_list:
if overwrite:
with open(os.path.join(PATH_TO_DOCS, "serialization.mdx"), "w", encoding="utf-8", newline="\n") as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:])
else:
raise ValueError("The list of ONNX-supported models needs an update. Run `make fix-copies` to fix this.")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
args = parser.parse_args()
check_model_table(args.fix_and_overwrite)
check_onnx_model_list(args.fix_and_overwrite)
| 9,799 | 41.060086 | 118 | py |
robust-transformers | robust-transformers-main/utils/notification_service_deprecated.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Old script for Slack's notification service. Still here as the entire suite has not been moved to the newer implem.
import os
import re
import sys
from slack_sdk import WebClient
def handle_test_results(test_results):
expressions = test_results.split(" ")
failed = 0
success = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
time_spent = expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(expressions):
if "failed" in expression:
failed += int(expressions[i - 1])
if "passed" in expression:
success += int(expressions[i - 1])
return failed, success, time_spent
def format_for_slack(total_results, results, scheduled: bool, title: str):
print(total_results, results)
header = {
"type": "header",
"text": {
"type": "plain_text",
"text": title,
"emoji": True,
},
}
if total_results["failed"] > 0:
total = {
"type": "section",
"fields": [
{"type": "mrkdwn", "text": f"*Failures:*\n❌ {total_results['failed']} failures."},
{"type": "mrkdwn", "text": f"*Passed:*\n✅ {total_results['success']} tests passed."},
],
}
else:
total = {
"type": "section",
"fields": [
{"type": "mrkdwn", "text": "\n🌞 All tests passed."},
],
}
blocks = [header, total]
if total_results["failed"] > 0:
for key, result in results.items():
print(key, result)
blocks.append({"type": "header", "text": {"type": "plain_text", "text": key, "emoji": True}})
blocks.append(
{
"type": "section",
"fields": [
{
"type": "mrkdwn",
"text": f"*Results:*\n{result['failed']} failed, {result['success']} passed.",
},
{"type": "mrkdwn", "text": f"*Time spent:*\n{result['time_spent']}"},
],
}
)
elif not scheduled:
for key, result in results.items():
blocks.append(
{"type": "section", "fields": [{"type": "mrkdwn", "text": f"*{key}*\n{result['time_spent']}."}]}
)
footer = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"<https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}|View on GitHub>",
},
}
blocks.append(footer)
blocks = {"blocks": blocks}
return blocks
if __name__ == "__main__":
arguments = sys.argv[1:]
if "scheduled" in arguments:
arguments.remove("scheduled")
scheduled = True
else:
scheduled = False
if scheduled:
# The scheduled run has several artifacts for each job.
file_paths = {
"TF Single GPU": {
"common": "run_all_tests_tf_gpu_test_reports/[].txt",
"pipeline": "run_all_tests_tf_gpu_test_reports/[].txt",
},
"Torch Single GPU": {
"common": "run_all_tests_torch_gpu_test_reports/[].txt",
"pipeline": "run_all_tests_torch_gpu_test_reports/[].txt",
"examples": "run_all_tests_torch_gpu_test_reports/[].txt",
},
"TF Multi GPU": {
"common": "run_all_tests_tf_multi_gpu_test_reports/[].txt",
"pipeline": "run_all_tests_tf_multi_gpu_test_reports/[].txt",
},
"Torch Multi GPU": {
"common": "run_all_tests_torch_multi_gpu_test_reports/[].txt",
"pipeline": "run_all_tests_torch_multi_gpu_test_reports/[].txt",
},
"Torch Cuda Extensions Single GPU": {"common": "run_tests_torch_cuda_extensions_gpu_test_reports/[].txt"},
"Torch Cuda Extensions Multi GPU": {
"common": "run_tests_torch_cuda_extensions_multi_gpu_test_reports/[].txt"
},
}
else:
file_paths = {
"TF Single GPU": {"common": "run_all_tests_tf_gpu_test_reports/[].txt"},
"Torch Single GPU": {"common": "run_all_tests_torch_gpu_test_reports/[].txt"},
"TF Multi GPU": {"common": "run_all_tests_tf_multi_gpu_test_reports/[].txt"},
"Torch Multi GPU": {"common": "run_all_tests_torch_multi_gpu_test_reports/[].txt"},
"Torch Cuda Extensions Single GPU": {"common": "run_tests_torch_cuda_extensions_gpu_test_reports/[].txt"},
"Torch Cuda Extensions Multi GPU": {
"common": "run_tests_torch_cuda_extensions_multi_gpu_test_reports/[].txt"
},
}
client = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
if not scheduled:
channel_id = os.environ["CI_SLACK_CHANNEL_ID"]
elif scheduled and len(arguments):
channel_id = os.environ["CI_SLACK_CHANNEL_ID_PAST_FUTURE"]
else:
channel_id = os.environ["CI_SLACK_CHANNEL_ID_DAILY"]
if scheduled:
title = "🤗 Results of the scheduled tests."
else:
title = "🤗 Self-push results"
if len(arguments):
title = f"{arguments} " + title
try:
results = {}
for job, file_dict in file_paths.items():
# Single return value for failed/success across steps of a same job
results[job] = {"failed": 0, "success": 0, "time_spent": "", "failures": ""}
for key, file_path in file_dict.items():
try:
with open(file_path.replace("[]", "stats")) as f:
failed, success, time_spent = handle_test_results(f.read())
results[job]["failed"] += failed
results[job]["success"] += success
results[job]["time_spent"] += time_spent[1:-1] + ", "
with open(file_path.replace("[]", "summary_short")) as f:
for line in f:
if re.search("FAILED", line):
results[job]["failures"] += line
except FileNotFoundError:
print("Artifact was not found, job was probably canceled.")
# Remove the trailing ", "
results[job]["time_spent"] = results[job]["time_spent"][:-2]
test_results_keys = ["failed", "success"]
total = {"failed": 0, "success": 0}
for job, job_result in results.items():
for result_key in test_results_keys:
total[result_key] += job_result[result_key]
if total["failed"] != 0 or scheduled:
to_be_sent_to_slack = format_for_slack(total, results, scheduled, title)
result = client.chat_postMessage(
channel=channel_id,
blocks=to_be_sent_to_slack["blocks"],
)
for job, job_result in results.items():
if len(job_result["failures"]):
client.chat_postMessage(
channel=channel_id, text=f"{job}\n{job_result['failures']}", thread_ts=result["ts"]
)
except Exception as e:
# Voluntarily catch every exception and send it to Slack.
raise Exception(f"Setup error: no artifacts were found. Error: {e}") from e
| 8,174 | 36.5 | 127 | py |
robust-transformers | robust-transformers-main/utils/test_module/custom_modeling.py | import torch
from transformers import PreTrainedModel
from .custom_configuration import CustomConfig, NoSuperInitConfig
class CustomModel(PreTrainedModel):
config_class = CustomConfig
def __init__(self, config):
super().__init__(config)
self.linear = torch.nn.Linear(config.hidden_size, config.hidden_size)
def forward(self, x):
return self.linear(x)
def _init_weights(self, module):
pass
class NoSuperInitModel(PreTrainedModel):
config_class = NoSuperInitConfig
def __init__(self, config):
super().__init__(config)
self.linear = torch.nn.Linear(config.attribute, config.attribute)
def forward(self, x):
return self.linear(x)
def _init_weights(self, module):
pass
| 772 | 21.735294 | 77 | py |
TileTrans | TileTrans-main/reconstructor.py | from typing import Union, List, Dict, Any
import torch
import torchvision
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import torch.utils.data as Data
import common
from metrics import Metrics, MetricsL1, MetricsGradient
from itertools import zip_longest
class ReconMethod():
pass
# This method reconstruct the weight matrix by the L1 distance of the line vector.
# In detail, it calculate the sum of the absolute value of each line of the evaluated matrix, then sort the lines based on the sum value.
# The metrics to evalute the matrix can be assigned mannually.
class ReconMethodL1Sort(ReconMethod):
@staticmethod
def sort(metrics:Metrics = MetricsL1, layers_group: List[torch.nn.Module] = None, grads_group:List[torch.Tensor] = [], **kargs: Any) -> torch.Tensor:
if layers_group is None:
raise ValueError("Layer must be {}".format(torch.nn.Module))
to_cat = []
for layer, grad in zip_longest(layers_group, grads_group):
weight = metrics.eval(layer, grad=grad)
weight = weight.view((weight.shape[0], -1))
to_cat.append(weight)
if len(to_cat) > 1:
weight = torch.cat(to_cat, dim=1)
else:
weight = to_cat[0]
line_sum = []
for i in range(weight.shape[0]):
line_sum.append(torch.abs(weight[i]).sum())
line_sum = torch.Tensor(line_sum)
_, index = torch.sort(line_sum)
return index
class Reconstructor():
def __init__(self, metrics: Metrics = MetricsL1, method: ReconMethod = ReconMethodL1Sort) -> None:
self.metrics = metrics
self.method = method
def __call__(self, net: torch.nn.Module = None, grads: List[torch.Tensor] = None, **kargs:Any) -> List:
if net is None:
raise ValueError("Net must be {}".format(torch.nn.Module))
def transform_rows(layer:nn.modules, index:torch.Tensor) -> None :
reconstructed_weight = torch.clone(layer.weight).index_select(0, index)
layer.weight = nn.Parameter(reconstructed_weight)
if not (layer.bias is None):
reconstructed_bias = torch.clone(layer.bias).index_select(0, index)
layer.bias = nn.Parameter(reconstructed_bias)
def transform_cols(layer:nn.modules, index:torch.Tensor) -> None :
reconstructed_weight = torch.clone(layer.weight).index_select(1, index)
layer.weight = nn.Parameter(reconstructed_weight)
def reconstruct_by_index(layer_front, layer_back, index):
with torch.no_grad():
reconstructed_weight = torch.clone(layer_front.weight).index_select(0, index)
reconstructed_bias = torch.clone(layer_front.bias).index_select(0, index)
layer_front.weight = nn.Parameter(reconstructed_weight)
layer_front.bias = nn.Parameter(reconstructed_bias)
if type(layer_front) is nn.Conv2d and type(layer_back) is nn.Linear:
changed_index = torch.arange(layer_back.weight.shape[1]).view((len(index), -1))
changed_index = changed_index.index_select(0, index).view(-1)
reconstructed_weight = torch.clone(layer_back.weight).index_select(1, changed_index)
layer_back.weight = nn.Parameter(reconstructed_weight)
else:
reconstructed_weight = torch.clone(layer_back.weight).index_select(1, index)
layer_back.weight = nn.Parameter(reconstructed_weight)
def reconstruct_grads_by_index(layer_front, layer_back, grads, i, index):
with torch.no_grad():
reconstructed_grads = torch.clone(grads[i].cpu()).index_select(0, index)
grads[i] = reconstructed_grads
if type(layer_front) is nn.Conv2d and type(layer_back) is nn.Linear:
changed_index = torch.arange(layer_back.weight.shape[1]).view((len(index), -1))
changed_index = changed_index.index_select(0, index).view(-1)
reconstructed_grads = torch.clone(grads[i+1].cpu()).index_select(1, changed_index)
grads[i+1] = reconstructed_grads
else:
reconstructed_grads = torch.clone(grads[i+1].cpu()).index_select(1, index)
grads[i+1] = reconstructed_grads
if type(net) in [models.AlexNet, models.VGG]:
recon_list = []
layers = common.get_layers(net)
for i in range(len(layers)-1):
layer_0 = layers[i]
layer_1 = layers[i+1]
if grads:
index = self.method.sort(self.metrics, [layer_0], [grads[i]])
reconstruct_by_index(layer_0, layer_1, index)
reconstruct_grads_by_index(layer_0, layer_1, grads, i, index)
else:
index = self.method.sort(self.metrics, [layer_0])
reconstruct_by_index(layer_0, layer_1, index)
recon_list.append(index)
return recon_list
elif type(net) is models.ResNet:
recon_dic = {}
for c in net.children():
if type(c) is nn.Sequential:
for b in c.children():
if type(b) is models.resnet.BasicBlock:
index = self.method.sort(self.metrics, [b.conv1])
transform_rows(b.conv1, index)
transform_rows(b.bn1, index)
transform_cols(b.conv2, index)
layers = [net.layer1, net.layer2, net.layer3, net.layer4]
layers_row_group = []
layers_col_group = []
bns_group = []
layers_row_group.append(net.conv1)
bns_group.append(net.bn1)
for i,l in enumerate(layers):
for j,b in enumerate(l.children()):
if i == 0 and j==0:
layers_col_group.append(b.conv1)
elif j!= 0:
layers_col_group.append(b.conv1)
if not (b.downsample is None):
layers_row_group.append(b.downsample._modules['0'])
bns_group.append(b.downsample._modules['1'])
layers_row_group.append(b.conv2)
bns_group.append(b.bn2)
if i == 3:
layers_col_group.append(net.fc)
else:
layers_col_group.append(layers[i+1]._modules['0'].conv1)
layers_col_group.append(layers[i+1]._modules['0'].downsample._modules['0'])
index = self.method.sort(self.metrics, layers_row_group)
for trans_layer in layers_row_group:
transform_rows(trans_layer, index)
for bn in bns_group:
transform_rows(bn, index)
for recover_layer in layers_col_group:
if type(recover_layer) is nn.Linear:
changed_index = torch.arange(net.layer4._modules['2'].conv2.weight.shape[1]).view((len(index), -1))
changed_index = changed_index.index_select(0, index).view(-1)
transform_cols(recover_layer, changed_index)
else:
transform_cols(recover_layer, index)
layers_row_group = []
layers_col_group = []
bns_group = []
return recon_dic | 7,797 | 47.7375 | 153 | py |
TileTrans | TileTrans-main/pruner.py | import os
from typing import List, Any
from itertools import zip_longest
import torch
from torch.nn import modules
from torch.utils.data.dataloader import DataLoader
import torchvision
import torch.nn as nn
import torch.utils.data as Data
import torchvision.transforms as transforms
import torchvision.models as models
import torch.distributed as dist
import common
def imply_mask(self, input):
with torch.no_grad():
self.weight = nn.Parameter(self.weight*self.mask)
def add_masks(net):
layers = common.get_layers(net)
with torch.no_grad():
masks = [torch.ones(layer.weight.shape) for layer in layers]
for i in range(len(masks)):
layers[i].register_buffer('mask', masks[i])
layers[i].register_forward_pre_hook(imply_mask)
class Method():
def __init__(self, metrics=None) -> None:
self._metrics = metrics # Class for calculating the importance score of weight matrix.
def __call__(self, net:nn.Module, sparsity:float, is_soft:bool=False) -> None:
pass
class Pruner():
def __init__(self, method:Method=None) -> None:
self._method = method # Class to prune the weight matrix.
def prune(self, net:torch.nn.Module, sparsity:float, grads: List[torch.Tensor] = [], is_soft: bool=False) -> None:
if not self._method:
raise Exception("Methos is not set! ")
if sparsity > 1 or sparsity <0:
raise ValueError("Sparsity must be a float number between 0 and 1.")
self._method(net, sparsity, grads)
class EW_pruning(Method):
def __init__(self, metrics=None) -> None:
super().__init__(metrics=metrics)
def __call__(self, net:nn.Module, sparsity: float, grads:List[torch.Tensor]=[], is_soft:bool=False) -> None:
layers = common.get_layers(net)
with torch.no_grad():
to_cat = [self._metrics.eval(l, grad=g).view(-1) for l,g in zip_longest(layers, grads)]
concatenated_weights = torch.cat(to_cat)
target_index = int(sparsity*len(concatenated_weights))
# del to_cat
# torch.cuda.empty_cache()
sorted_result, _ = torch.sort(concatenated_weights)
threshold = sorted_result[target_index].item()
# del concatenated_weights
# del sorted_result
# torch.cuda.empty_cache()
to_cat = [self._metrics.eval(l, grad=g).view(-1) for l,g in zip_longest(layers, grads)]
masks = [c.view(l.weight.shape) >= threshold for l,c in zip_longest(layers, to_cat)]
# del to_cat
# torch.cuda.empty_cache()
for l,m in zip(layers, masks):
l.register_buffer("mask", m)
l.register_forward_pre_hook(imply_mask)
class TW_pruning(Method):
def __init__(self, tile_shape:list=[128,1], metrics=None) -> None:
super().__init__(metrics=metrics)
self.tile_shape = tile_shape
def __call__(self, net: nn.Module, sparsity: float, grads:List[torch.Tensor]=[], is_soft:bool=False) -> None:
def cal_tile_mean(weight, tile_shape):
tile_mean = torch.zeros((int((weight.shape[0] + tile_shape[0] - 1)/tile_shape[0]), int((weight.shape[1] + tile_shape[1] - 1)/tile_shape[1])))
for i in range(tile_mean.shape[0]):
for j in range(tile_mean.shape[1]):
tile = torch.abs(weight[i*tile_shape[0]:(i+1)*tile_shape[0], j*tile_shape[1]:(j+1)*tile_shape[1]])
tile_mean[i][j] = torch.sum(tile) / float(tile.nelement())
return tile_mean
layers = common.get_layers(net)
tile_shape = self.tile_shape
with torch.no_grad():
to_cat = []
shape_list = []
weights = [self._metrics.eval(l, grad=g) for l,g in zip_longest(layers, grads)]
for w,layer in zip(weights, layers):
if type(layer) is nn.Conv2d:
tile_mean = cal_tile_mean(w.view((layer.weight.shape[0],-1)), tile_shape)
elif type(layer) is nn.Linear:
tile_mean = cal_tile_mean(w, tile_shape)
else:
tile_mean = cal_tile_mean(w, tile_shape)
shape_list.append(tile_mean.shape)
to_cat.append(tile_mean.view(-1))
if len(to_cat)==1:
concatenated_weights = to_cat[0]
else:
concatenated_weights = torch.cat(to_cat)
target_index = int(sparsity*len(concatenated_weights))
sorted_result, _ = torch.sort(concatenated_weights)
threshold = sorted_result[target_index]
tile_masks = [w.view(s) >= threshold for w,s in zip(to_cat, shape_list)]
for m,l in zip(tile_masks, layers):
if type(l) is nn.Conv2d:
mask = torch.zeros_like(l.weight).view((l.weight.shape[0],-1))
elif type(l) is nn.Linear:
mask = torch.zeros_like(l.weight)
else:
mask = torch.zeros_like(l.weight)
for ii in range(m.shape[0]):
for jj in range(m.shape[1]):
if m[ii][jj]:
mask[ii*tile_shape[0]:(ii+1)*tile_shape[0], jj*tile_shape[1]:(jj+1)*tile_shape[1]] = 1
if type(l) is nn.Conv2d:
mask = mask.view(l.weight.shape)
l.register_buffer("mask", mask)
l.register_forward_pre_hook(imply_mask)
class TEW_pruning(Method):
def __init__(self, tile_shape:list=[128,1], metrics=None) -> None:
super().__init__(metrics=metrics)
self.tile_shape = tile_shape
def __call__(self, net: nn.Module, sparsity: float, recovery:float=0.01, is_soft:bool=False) -> None:
def cal_tile_mean(weight, tile_shape):
tile_mean = torch.zeros((int((weight.shape[0] + tile_shape[0] - 1)/tile_shape[0]), int((weight.shape[1] + tile_shape[1] - 1)/tile_shape[1])))
for i in range(tile_mean.shape[0]):
for j in range(tile_mean.shape[1]):
tile = torch.abs(weight[i*tile_shape[0]:(i+1)*tile_shape[0], j*tile_shape[1]:(j+1)*tile_shape[1]])
tile_mean[i][j] = torch.sum(tile) / float(tile.nelement())
return tile_mean
layers = common.get_layers(net)
tile_shape = self.tile_shape
with torch.no_grad():
to_cat = []
shape_list = []
weights = [self._metrics(l) for l in layers]
for w,layer in zip(weights, layers):
if type(layer) is nn.Conv2d:
tile_mean = cal_tile_mean(w.view((layer.weight.shape[0],-1)), tile_shape)
elif type(layer) is nn.Linear:
tile_mean = cal_tile_mean(w, tile_shape)
else:
tile_mean = cal_tile_mean(w, tile_shape)
shape_list.append(tile_mean.shape)
to_cat.append(tile_mean.view(-1))
if len(to_cat)==1:
concatenated_weights = to_cat[0]
else:
concatenated_weights = torch.cat(to_cat)
if sparsity+recovery>1:
raise ValueError("Sparsity({}) plus recovery({}) is larger than 1.".format(sparsity, recovery))
target_index = int((sparsity+recovery)*len(concatenated_weights))
sorted_result, _ = torch.sort(concatenated_weights)
threshold = sorted_result[target_index]
tile_masks = [w.view(s) >= threshold for w,s in zip(to_cat, shape_list)]
to_cat = []
recovery_masks = []
for m,l in zip(tile_masks, layers):
if type(l) is nn.Conv2d:
mask = torch.zeros_like(l.weight).view((l.weight.shape[0],-1))
elif type(l) is nn.Linear:
mask = torch.zeros_like(l.weight)
else:
mask = torch.zeros_like(l.weight)
for ii in range(m.shape[0]):
for jj in range(m.shape[1]):
if m[ii][jj]:
mask[ii*tile_shape[0]:(ii+1)*tile_shape[0], jj*tile_shape[1]:(jj+1)*tile_shape[1]] = 1
if type(l) is nn.Conv2d:
mask = mask.view(l.weight.shape)
recovery_mask = torch.logical_xor(mask, torch.ones_like(mask))
to_cat.append((self._metrics(l)*recovery_mask).flatten())
recovery_masks.append(recovery_mask)
l.register_buffer("mask", mask)
l.register_forward_pre_hook(imply_mask)
# Recovery step
if len(to_cat)==1:
concatenated_recovery_weights = to_cat[0]
else:
concatenated_recovery_weights = torch.cat(to_cat)
target_index = int((1-recovery)*len(concatenated_recovery_weights))
sorted_result,_ = torch.sort(concatenated_recovery_weights)
threshold = sorted_result[target_index]
for m,l in zip(recovery_masks, layers):
recovery_mask = (l.weight*m)>=threshold
l.mask += recovery_mask | 9,324 | 42.574766 | 153 | py |
TileTrans | TileTrans-main/common.py | import torch
import torchvision
import torch.nn as nn
import torchvision.models as models
import torch.utils.data as Data
import numpy as np
import matplotlib.pyplot as plt
import sys
import torchvision.transforms as transforms
import torchvision.datasets as datasets
def print_model_weight(net):
for layer in net.children():
if type(layer) is nn.Sequential:
print_model_weight(layer)
else:
if type(layer) is nn.Conv2d:
print(str(layer) + ' sparsity: ' + str(float(torch.sum(layer.weight == 0))/float(layer.weight.nelement())))
elif type(layer) is nn.Linear:
print(str(layer) + ' sparsity: ' + str(float(torch.sum(layer.weight == 0))/float(layer.weight.nelement())))
def get_layers(net:nn.Module):
def _search_layers(net:nn.Module, layers:list):
for l in net.children():
if type(l) in [nn.Sequential, models.resnet.BasicBlock]:
_search_layers(l, layers)
# elif type(l) in [nn.Conv2d, nn.Linear, nn_flags.Conv_flags, nn_flags.Linear_flags]:
elif type(l) in [nn.Conv2d, nn.Linear]:
layers.append(l)
layers = []
_search_layers(net, layers)
return layers
def imply_mask(self, input):
with torch.no_grad():
self.weight = nn.Parameter(self.weight*self.mask)
def add_masks(net):
layers = get_layers(net)
with torch.no_grad():
masks = [torch.ones(layer.weight.shape) for layer in layers]
for i in range(len(masks)):
layers[i].register_buffer('mask', masks[i])
layers[i].register_forward_pre_hook(imply_mask)
def padding(weight, tile_shape):
weight_2d = weight.view((-1,-1))
W = tile_shape[0]
H = tile_shape[1]
w_to_padding = weight_2d.shape[0] % W
h_to_padding = weight_2d.shape[1] % H
padded_weight = torch.cat((weight_2d, torch.zeros((weight_2d.shape[0], w_to_padding))), 1)
padded_weight = torch.cat((padded_weight, torch.zeros((h_to_padding, padded_weight.shape[1]))), 0)
return padded_weight
def shifting(weight, value, dim):
weight_left, weight_right = torch.split(weight, (weight.shape[dim] - value, value), dim)
return torch.cat((weight_right, weight_left), dim)
def check_sparsity(layer):
print("Sum = {}, nele = {}, Sparsity = {}".format(torch.sum(layer.mask),layer.mask.nelement(), 1 - torch.sum(layer.mask)/float(layer.mask.nelement())))
return (1 - torch.sum(layer.mask)/float(layer.mask.nelement())).item()
def check_model_saprsity(net):
result = []
layers = get_layers(net)
for l in layers:
print(l)
result.append(check_sparsity(l))
return result
def worker_init(wrk_id):
np.random.seed(torch.utils.data.get_worker_info().seed%(2**32 - 1))
def get_dataset(dataset:str="ImageNet"):
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
if dataset == "ImageNet":
# train_set = loader.ImageNetDataset(transform=transform)
# test_set = loader.ImageNetDataset(val=True, transform=transform)
train_set = datasets.ImageNet(root="/home/dataset/imagenet", split="train", transform=transform)
test_set = datasets.ImageNet(root="/home/dataset/imagenet", split="val", transform=transform)
elif dataset == "CIFAR10":
train_set = torchvision.datasets.CIFAR10(root='./data', train=True, download=False, transform=transform)
test_set = torchvision.datasets.CIFAR10(root='./data', train=False, download=False, transform=transform)
return train_set, test_set | 3,712 | 37.278351 | 155 | py |
TileTrans | TileTrans-main/metrics.py | import os
from typing import List, Any
import torch
import torch.nn as nn
from common import get_layers
class Metrics():
pass
class MetricsL1(Metrics):
@staticmethod
def eval(layer: nn.Module, *agrs: Any, **kargs: Any):
return torch.abs(layer.weight)
@staticmethod
def hook_func(*args: Any, **kargs: Any):
pass
class MetricsL2(Metrics):
@staticmethod
def eval(layer: nn.Module, *agrs: Any, **kargs: Any):
return torch.square(layer.weight)
@staticmethod
def hook_func(*args: Any, **kargs: Any):
pass
class MetricsGradient(Metrics):
@staticmethod
def eval(layer: nn.Module, grad: torch.Tensor=None, *args: Any, **kargs: Any):
return torch.square(layer.weight.cpu()*grad.cpu())
@staticmethod
def hook_func(net: nn.Module, *args: Any, **kargs: Any):
with torch.no_grad():
layers = get_layers(net)
grads = kargs["grads"]
total_steps = kargs["total_steps"]
worker_nums = kargs["worker_nums"]
step = kargs["step"]
for i in range(len(grads)):
grads[i] = grads[i].cuda()
if step == 0:
for i in range(len(grads)):
grads[i][:] = 0
for l,g in zip(layers, grads):
g += l.weight.grad/total_steps
@staticmethod
def create_grads(net: nn.Module) -> List[torch.Tensor]:
layers = get_layers(net)
grads = []
for l in layers:
grad = torch.zeros_like(l.weight)
grads.append(grad)
return grads
| 1,626 | 26.576271 | 82 | py |
TileTrans | TileTrans-main/train.py | import torch
import torchvision
import torch.nn as nn
import torch.utils.data as Data
import torch.distributed as dist
import numpy as np
import matplotlib.pyplot as plt
import torchvision.models as models
import torchvision.transforms as transforms
GPU_NUM=2
def check_correct_rate(net, test_loader):
correct_num = 0
net.cuda()
net.eval()
with torch.no_grad():
for step, (x, y) in enumerate(test_loader):
b_x = x.cuda()
b_y = y.cuda()
output = net(b_x)
prey_y = torch.max(output, 1)[1].cuda().data
correct_num += torch.sum(prey_y == b_y).type(torch.FloatTensor)
if step%50==0:
print("{}/{}...".format(step, len(test_loader)))
net.train()
return float(correct_num/len(test_loader.dataset))
def train(net, train_loader, test_loader, loss_func, optimizer, epochs, save_name):
def print_overwrite(step, goal, running_loss, status):
print("{}/{}, {} loss={}...".format(step, goal, status, running_loss ))
loss_min = np.inf
acc_max = 0
net.cuda()
for epoch in range(epochs):
loss_valid = 0
loss_train = 0
runtime_loss = 0
net.train()
for step, (x, y) in enumerate(train_loader):
b_x = x.cuda()
b_y = y.cuda()
output = net(b_x)
loss = loss_func(output, b_y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_train += loss.item()
runtime_loss = loss_train/(step+1)
if step%50 == 0:
print_overwrite(step, len(train_loader), runtime_loss, 'train')
correct_num = 0
net.eval()
with torch.no_grad():
for step, (x, y) in enumerate(test_loader):
b_x = x.cuda()
b_y = y.cuda()
output = net(b_x)
loss = loss_func(output, b_y)
loss_valid += loss.item()
prey_y = torch.max(output, 1)[1].cuda().data
correct_num += torch.sum(prey_y == b_y).type(torch.FloatTensor)
# print_overwrite(step, len(test_loader), runtime_loss, 'valid')
loss_train /= len(train_loader)
loss_valid /= len(test_loader)
accuracy = correct_num/len(test_loader.dataset)
print('\n--------------------------------------------------')
print('Epoch: {} Train Loss: {:.4f} Valid Loss: {:.4f} Correct Rate: {:.4f}'.format(epoch, loss_train, loss_valid, accuracy))
print('--------------------------------------------------')
# if loss_valid < loss_min:
if acc_max < accuracy:
acc_max = accuracy
torch.save(net.state_dict(), save_name)
# print("\nMinimum Validation Loss of {:.4f} at epoch {}/{}".format(loss_min, epoch, EPOCH))
print("\nMaximum Validation Accuracy of {:.4f} at epoch {}/{}".format(accuracy, epoch, epochs))
print('Model Saved\n') | 3,058 | 30.214286 | 136 | py |
speechbrain | speechbrain-main/setup.py | #!/usr/bin/env python3
import os
import sys
import site
import setuptools
from distutils.core import setup
# Editable install in user site directory can be allowed with this hack:
# https://github.com/pypa/pip/issues/7953.
site.ENABLE_USER_SITE = "--user" in sys.argv[1:]
with open("README.md") as f:
long_description = f.read()
with open(os.path.join("speechbrain", "version.txt")) as f:
version = f.read().strip()
setup(
name="speechbrain",
version=version,
description="All-in-one speech toolkit in pure Python and Pytorch",
long_description=long_description,
long_description_content_type="text/markdown",
author="Mirco Ravanelli & Others",
author_email="speechbrain@gmail.com",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
],
packages=setuptools.find_packages(),
package_data={"speechbrain": ["version.txt", "log-config.yaml"]},
install_requires=[
"hyperpyyaml",
"joblib",
"numpy",
"packaging",
"scipy",
"sentencepiece",
"torch>=1.9",
"torchaudio",
"tqdm",
"huggingface_hub",
],
python_requires=">=3.7",
url="https://speechbrain.github.io/",
)
| 1,280 | 25.6875 | 72 | py |
speechbrain | speechbrain-main/tools/profiling/profile.py | """Example recipe to benchmark SpeechBrain using PyTorch profiling.
A pretrained interference is benchmarked for real-time factors and memory peaks across audio durations and batch sizes.
Profiling is carried out either on random data (pure noise) or on an example file that is truncated and repeated to
be representative of a benchmark setting (duration vs batch size). The setup is defined in: profile.yaml.
@profile_optimiser is used: the last two/six batches are recorded for profiling.
Run from within this directory (yaml defines an example audio w/ relative path):
`python profile.py profile.yaml`
Author:
* Andreas Nautsch 2022
"""
import sys
import torch
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.profiling import (
profile_report,
export,
report_time,
report_memory,
)
from speechbrain.pretrained import (
Pretrained,
EncoderDecoderASR,
EncoderASR,
EndToEndSLU,
EncoderClassifier,
SpeakerRecognition,
VAD,
SepformerSeparation,
SpectralMaskEnhancement,
SNREstimator,
)
from typing import Optional, List
def get_funcs_to_unary_input_classifier(
cls,
call_func: str,
source: str,
save_dir: str,
device: torch.device,
example_audio=None,
batch_label="wavs",
lengths_label: Optional[str] = "wav_lens",
):
"""Implement get_funcs_to_unary_input_classifier."""
assert issubclass(cls, Pretrained)
pretrained = cls.from_hparams(
source=source, savedir=save_dir, run_opts={"device": device}
)
example = pretrained.load_audio(example_audio) if example_audio else None
def prepare(batch_size, duration, sampling_rate=16000):
"""Prepares input data."""
unary_input = {
batch_label: example[: duration * sampling_rate].repeat(
batch_size, 1
)
if example is not None
else torch.rand(
(batch_size, duration * sampling_rate), device=device
),
}
if lengths_label is not None:
unary_input[lengths_label] = torch.ones(batch_size)
return unary_input
def call(model, **kwargs):
"""Calls the specified funnction."""
getattr(model, call_func)(**kwargs)
return prepare, call, pretrained
def get_funcs_to_profile(
pretrained_type, source, save_dir, example_audio=None, example=None
):
"""Creates per pretrained interface:
pretrained - loaded model, to device
prepare(batch_size, duration, sampling_rate=16000) - function handle to create dimensioned batch input
call(model, **kwargs) - function handle to the inference function to be profiled
"""
# Put all data directly to cpu/cuda
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Create prepare() and call() functions depending on model type
if pretrained_type == "EncoderDecoderASR":
return get_funcs_to_unary_input_classifier(
cls=EncoderDecoderASR,
source=source,
save_dir=save_dir,
call_func="transcribe_batch",
example_audio=example_audio,
device=device,
)
elif pretrained_type == "EncoderASR":
return get_funcs_to_unary_input_classifier(
cls=EncoderASR,
source=source,
save_dir=save_dir,
call_func="transcribe_batch",
example_audio=example_audio,
device=device,
)
elif pretrained_type == "EndToEndSLU": # untested
return get_funcs_to_unary_input_classifier(
cls=EndToEndSLU,
source=source,
save_dir=save_dir,
call_func="decode_batch",
example_audio=example_audio,
device=device,
)
elif pretrained_type == "EncoderClassifier": # untested
return get_funcs_to_unary_input_classifier(
cls=EncoderClassifier,
source=source,
save_dir=save_dir,
call_func="classify_batch",
example_audio=example_audio,
device=device,
)
elif pretrained_type == "SpeakerRecognition": # untested
pretrained = SpeakerRecognition.from_hparams(
source=source, savedir=save_dir, run_opts={"device": device}
)
if example_audio:
example = pretrained.load_audio(example_audio)
def prepare(batch_size, duration, num_wavs2=10, sampling_rate=16000):
"""Prepares input data."""
return {
"wavs1": torch.rand(
(batch_size, duration * sampling_rate), device=device
),
"wavs2": torch.rand(
(num_wavs2, duration * sampling_rate), device=device
),
"wav1_lens": torch.ones(batch_size),
"wav2_lens": torch.ones(num_wavs2),
}
def call(model, **kwargs):
"""Calls verify_batch."""
model.verify_batch(**kwargs)
elif pretrained_type == "VAD": # untested
# VAD boundary post-processing can introduce slightly more load (ignored here)
return get_funcs_to_unary_input_classifier(
cls=VAD,
source=source,
save_dir=save_dir,
call_func="get_speech_prob_chunk",
example_audio=example_audio,
device=device,
)
elif pretrained_type == "SepformerSeparation": # untested
return get_funcs_to_unary_input_classifier(
cls=SepformerSeparation,
source=source,
save_dir=save_dir,
call_func="separate_batch",
example_audio=example_audio,
device=device,
batch_label="mix",
lengths_label=None,
)
elif pretrained_type == "SpectralMaskEnhancement": # untested
return get_funcs_to_unary_input_classifier(
cls=SpectralMaskEnhancement,
source=source,
save_dir=save_dir,
call_func="enhance_batch",
example_audio=example_audio,
device=device,
batch_label="noisy",
lengths_label="lengths",
)
elif pretrained_type == "SNREstimator": # untested
pretrained = SNREstimator.from_hparams(
source=source, savedir=save_dir, run_opts={"device": device}
)
if example_audio:
example = pretrained.load_audio(example_audio)
def prepare(batch_size, duration, num_spks=2, sampling_rate=16000):
"""Prepares input data."""
return {
"mix": example[: duration * sampling_rate].repeat(batch_size, 1)
if example is not None
else torch.rand(
(batch_size, duration * sampling_rate), device=device
),
"predictions": torch.rand(
(batch_size, duration * sampling_rate, num_spks),
device=device,
),
}
def call(model, **kwargs):
"""Calls estimate_batch"""
model.estimate_batch(**kwargs)
else: # pretrained_type must be part of SpeechBrain
raise TypeError("Unknown pretrained model.")
return prepare, call, pretrained
def benchmark_to_markdown(
benchmark: List[List[str]], columns: List[str], rows: List[str]
):
"""Implement benchmark to markdown."""
cell_width = max([len(x) for x in benchmark[0]])
fmt = "{: >%d} " % cell_width
out = (
"| "
+ fmt.format("|")
+ "| ".join([fmt.format(x) for x in columns])
+ "|\n"
)
sep = "|:" + cell_width * "-" + ":"
out += (1 + len(columns)) * sep + "|\n"
for i, r in enumerate(rows):
out += "| " + fmt.format("%ds " % r)
out += "| " + " | ".join(benchmark[i]) + " |\n"
print(out)
return out
def profile_pretrained(
pretrained_type,
source,
save_dir,
audio_mockup_secs,
batch_sizes,
triangle_only=True,
example_audio=None,
export_logs=False,
):
"""Loops through the profiler settings and benchmarks the inference of a pretrained model.
Reporting:
- real time factor
- peak memory (inference only)
Logs:
- shell w/ tabular profiler summary and targeted reporting
- if export_logs: traces are stored in `log` folder
- benchmark_real_time (file output)
- memory_peaks (file output)
"""
# Pretrained interface
create_batch_data, call, pretrained = get_funcs_to_profile(
pretrained_type, source, save_dir, example_audio
)
# Prepare table to write out profiling information
realtime_factor = []
memory_peaks = []
us_in_s = 1000.0 ** 2
byte_in_GB = 1024.0 ** 3
# Comprehensive benchmarking
for d, duration in enumerate(audio_mockup_secs):
realtime_factor_row = []
memory_peaks_row = []
for b, bs in enumerate(batch_sizes):
# skip expected heavy-loads
if (
triangle_only
): # this is a protection mechanism, since configs might explore exponentially
if (
(b + d >= (len(audio_mockup_secs) + len(batch_sizes)) / 2)
and (d > 0)
and (b > 0)
):
print(
f"\tskipped - duration: {duration:d}, batch_size: {bs:d}"
)
realtime_factor_row.append("_skip_")
memory_peaks_row.append("_skip_")
continue
# where are we :)
print(f"\nDuration: {duration:d}, batch_size: {bs:d}")
# benchmarking
kwargs = create_batch_data(batch_size=bs, duration=duration)
realtime = (
bs * us_in_s * duration
) # batches recorded x conversion factor x secs
# Simulating batching and profiling it
prof = export(profile_report()) if export_logs else profile_report()
num_steps = 10 # profile_report scheduler needs 10 steps for seven recordings
for _ in range(num_steps):
call(model=pretrained, **kwargs)
prof.step()
# Gathering time and memory reports
print(
prof.key_averages().table(
sort_by="cpu_time_total", row_limit=10
)
)
cpu_time, cuda_time = report_time(
prof, verbose=True, upper_control_limit=True
) # no need to avg #records
cpu_mem, cuda_mem = report_memory(prof, verbose=True)
if cuda_time == 0: # CPU values only
realtime_factor_row.append(f"{cpu_time / realtime:.2E}")
memory_peaks_row.append(f"{cpu_mem / byte_in_GB:.2f} Gb")
else: # CPU + GPU values
realtime_factor_row.append(
f"{cpu_time / realtime:.2E} + {cuda_time / realtime:.2E}"
)
memory_peaks_row.append(
f"{cpu_mem / byte_in_GB:.2f} + {cuda_mem / byte_in_GB:.2f} Gb"
)
realtime_factor.append(realtime_factor_row)
memory_peaks.append(memory_peaks_row)
# Store tables
print("\n\tReal-time factor")
with open("bechmark_realtime_factors.md", "w") as f:
f.write(
benchmark_to_markdown(
benchmark=realtime_factor,
columns=batch_sizes,
rows=audio_mockup_secs,
)
)
print("\n\tPeak memory")
with open("bechmark_memory_peaks.md", "w") as f:
f.write(
benchmark_to_markdown(
benchmark=memory_peaks,
columns=batch_sizes,
rows=audio_mockup_secs,
)
)
if __name__ == "__main__":
# Reading command line arguments
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# Load hyperparameters file with command-line overrides
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides, overrides_must_match=False)
# Ensure profiling dimensions are set
profiling_setup = {
"audio_mockup_secs": [1, 2],
"batch_sizes": [1, 2],
"triangle_only": True,
"example_audio": None,
"export_logs": False,
}
if "profiling_dimensions" in hparams:
if isinstance(hparams["profiling_dimensions"], dict):
for arg, specification in hparams["profiling_dimensions"].items():
if arg in profiling_setup:
profiling_setup[arg] = specification
# Lookup on pretrained model and its local storage
pretrained_type = hparams["pretrained_model"]["type"]
source = hparams["pretrained_model"]["source"]
save_dir = f"pretrained_models/{source}"
profile_pretrained(
pretrained_type=pretrained_type,
source=source,
save_dir=save_dir,
**profiling_setup,
)
| 13,246 | 32.45202 | 119 | py |
speechbrain | speechbrain-main/speechbrain/core.py | """Core SpeechBrain code for running experiments.
Authors
* Peter Plantinga 2020
* Abdel Heba 2020
* Mirco Ravanelli 2020
* Aku Rouhe 2021
* Andreas Nautsch 2022
"""
import os
import sys
import yaml
import time
import torch
import shutil
import logging
import inspect
import pathlib
import argparse
import tempfile
import warnings
from contextlib import contextmanager
import speechbrain as sb
from datetime import date
from enum import Enum, auto
from tqdm.contrib import tqdm
from types import SimpleNamespace
from torch.nn import SyncBatchNorm
from torch.utils.data import DataLoader
from torch.nn import DataParallel as DP
from torch.utils.data import IterableDataset
from torch.utils.data import DistributedSampler
from torch.nn.parallel import DistributedDataParallel as DDP
from hyperpyyaml import resolve_references
from speechbrain.utils.distributed import run_on_main
from speechbrain.dataio.dataloader import LoopedLoader
from speechbrain.dataio.dataloader import SaveableDataLoader
from speechbrain.dataio.sampler import DistributedSamplerWrapper
from speechbrain.dataio.sampler import ReproducibleRandomSampler
logger = logging.getLogger(__name__)
DEFAULT_LOG_CONFIG = os.path.dirname(os.path.abspath(__file__))
DEFAULT_LOG_CONFIG = os.path.join(DEFAULT_LOG_CONFIG, "log-config.yaml")
torch._C._jit_set_profiling_executor(False)
torch._C._jit_set_profiling_mode(False)
INTRA_EPOCH_CKPT_FLAG = "brain_intra_epoch_ckpt"
PYTHON_VERSION_MAJOR = 3
PYTHON_VERSION_MINOR = 7
def create_experiment_directory(
experiment_directory,
hyperparams_to_save=None,
overrides={},
log_config=DEFAULT_LOG_CONFIG,
save_env_desc=True,
):
"""Create the output folder and relevant experimental files.
Arguments
---------
experiment_directory : str
The place where the experiment directory should be created.
hyperparams_to_save : str
A filename of a yaml file representing the parameters for this
experiment. If passed, references are resolved, and the result is
written to a file in the experiment directory called "hyperparams.yaml".
overrides : dict
A mapping of replacements made in the yaml file, to save in yaml.
log_config : str
A yaml filename containing configuration options for the logger.
save_env_desc : bool
If True, an environment state description is saved to the experiment
directory, in a file called env.log in the experiment directory.
"""
try:
# all writing command must be done with the main_process
if sb.utils.distributed.if_main_process():
if not os.path.isdir(experiment_directory):
os.makedirs(experiment_directory)
# Write the parameters file
if hyperparams_to_save is not None:
hyperparams_filename = os.path.join(
experiment_directory, "hyperparams.yaml"
)
with open(hyperparams_to_save) as f:
resolved_yaml = resolve_references(f, overrides)
with open(hyperparams_filename, "w") as w:
print("# Generated %s from:" % date.today(), file=w)
print("# %s" % os.path.abspath(hyperparams_to_save), file=w)
print("# yamllint disable", file=w)
shutil.copyfileobj(resolved_yaml, w)
# Copy executing file to output directory
module = inspect.getmodule(inspect.currentframe().f_back)
if module is not None:
callingfile = os.path.realpath(module.__file__)
shutil.copy(callingfile, experiment_directory)
# Log exceptions to output automatically
log_file = os.path.join(experiment_directory, "log.txt")
logger_overrides = {
"handlers": {"file_handler": {"filename": log_file}}
}
sb.utils.logger.setup_logging(log_config, logger_overrides)
sys.excepthook = _logging_excepthook
# Log beginning of experiment!
logger.info("Beginning experiment!")
logger.info(f"Experiment folder: {experiment_directory}")
# Save system description:
if save_env_desc:
description_str = sb.utils.logger.get_environment_description()
with open(
os.path.join(experiment_directory, "env.log"), "w"
) as fo:
fo.write(description_str)
finally:
# wait for main_process if ddp is used
sb.utils.distributed.ddp_barrier()
def _logging_excepthook(exc_type, exc_value, exc_traceback):
"""Interrupt exception raising to log the error."""
logger.error("Exception:", exc_info=(exc_type, exc_value, exc_traceback))
def parse_arguments(arg_list=None):
"""Parse command-line arguments to the experiment.
Arguments
---------
arg_list : list, None
A list of arguments to parse. If not given, this is read from
`sys.argv[1:]`
Returns
-------
param_file : str
The location of the parameters file.
run_opts : dict
Run options, such as distributed, device, etc.
overrides : dict
The overrides to pass to ``load_hyperpyyaml``.
Example
-------
>>> argv = ['hyperparams.yaml', '--device', 'cuda:1', '--seed', '10']
>>> filename, run_opts, overrides = parse_arguments(argv)
>>> filename
'hyperparams.yaml'
>>> run_opts["device"]
'cuda:1'
>>> overrides
'seed: 10'
"""
if arg_list is None:
arg_list = sys.argv[1:]
parser = argparse.ArgumentParser(description="Run a SpeechBrain experiment")
parser.add_argument(
"param_file",
type=str,
help="A yaml-formatted file using the extended YAML syntax. "
"defined by SpeechBrain.",
)
parser.add_argument(
"--debug",
default=False,
action="store_true",
help="Run the experiment with only a few batches for all "
"datasets, to ensure code runs without crashing.",
)
parser.add_argument(
"--debug_batches",
type=int,
default=2,
help="Number of batches to run in debug mode.",
)
parser.add_argument(
"--debug_epochs",
type=int,
default=2,
help="Number of epochs to run in debug mode. "
"If a non-positive number is passed, all epochs are run.",
)
parser.add_argument(
"--debug_persistently",
default=False,
action="store_true",
help="Keep data stored during debug mode (not using /tmp).",
)
parser.add_argument(
"--log_config",
type=str,
help="A file storing the configuration options for logging",
)
# if use_env = False in torch.distributed.lunch then local_rank arg is given
parser.add_argument("--local_rank", type=int, help="Rank on local machine")
parser.add_argument(
"--device",
type=str,
default="cuda:0",
help="The device to run the experiment on (e.g. 'cuda:0')",
)
parser.add_argument(
"--data_parallel_backend",
default=False,
action="store_true",
help="This flag enables training with data_parallel.",
)
parser.add_argument(
"--distributed_launch",
default=False,
action="store_true",
help="This flag enables training with DDP. Assumes script run with "
"`torch.distributed.launch`",
)
parser.add_argument(
"--distributed_backend",
type=str,
default="nccl",
help="One of {nccl, gloo, mpi}",
)
parser.add_argument(
"--find_unused_parameters",
default=False,
action="store_true",
help="This flag disable unused parameters detection",
)
parser.add_argument(
"--jit_module_keys",
type=str,
nargs="*",
help="A list of keys in the 'modules' dict to jitify",
)
parser.add_argument(
"--auto_mix_prec",
default=None,
action="store_true",
help="This flag enables training with automatic mixed-precision.",
)
parser.add_argument(
"--bfloat16_mix_prec",
default=None,
action="store_true",
help="This flag enables training with bfloat16 mixed-precision.",
)
parser.add_argument(
"--max_grad_norm",
type=float,
help="Gradient norm will be clipped to this value, "
"enter negative value to disable.",
)
parser.add_argument(
"--nonfinite_patience",
type=int,
help="Max number of batches per epoch to skip if loss is nonfinite.",
)
parser.add_argument(
"--noprogressbar",
default=None,
action="store_true",
help="This flag disables the data loop progressbars.",
)
parser.add_argument(
"--ckpt_interval_minutes",
type=float,
help="Amount of time between saving intra-epoch checkpoints "
"in minutes. If non-positive, intra-epoch checkpoints are not saved.",
)
parser.add_argument(
"--grad_accumulation_factor",
type=int,
help="Number of batches to accumulate gradients before optimizer step",
)
parser.add_argument(
"--optimizer_step_limit",
type=int,
help="Number of optimizer steps to run. If not passed, all epochs are run.",
)
parser.add_argument(
"--tqdm_colored_bar",
default=False,
action="store_true",
help="Enable colored progress-bar in tqdm. If this is "
"false, tqdm shall use default colors.",
)
# Accept extra args to override yaml
run_opts, overrides = parser.parse_known_args(arg_list)
# Ignore items that are "None", they were not passed
run_opts = {k: v for k, v in vars(run_opts).items() if v is not None}
param_file = run_opts["param_file"]
del run_opts["param_file"]
overrides = _convert_to_yaml(overrides)
# Checking that DataParallel use the right number of GPU
if run_opts["data_parallel_backend"]:
if torch.cuda.device_count() == 0:
raise ValueError("You must have at least 1 GPU.")
# For DDP, the device args must equal to local_rank used by
# torch.distributed.launch. If run_opts["local_rank"] exists,
# use os.environ["LOCAL_RANK"]
local_rank = None
if "local_rank" in run_opts:
local_rank = run_opts["local_rank"]
else:
if "LOCAL_RANK" in os.environ and os.environ["LOCAL_RANK"] != "":
local_rank = int(os.environ["LOCAL_RANK"])
# force device arg to be the same as local_rank from torch.distributed.lunch
if local_rank is not None and "cuda" in run_opts["device"]:
run_opts["device"] = run_opts["device"][:-1] + str(local_rank)
return param_file, run_opts, overrides
def _convert_to_yaml(overrides):
"""Convert args to yaml for overrides"""
yaml_string = ""
# Handle '--arg=val' type args
joined_args = "=".join(overrides)
split_args = joined_args.split("=")
for arg in split_args:
if arg.startswith("--"):
yaml_string += "\n" + arg[len("--") :] + ":"
else:
yaml_string += " " + arg
return yaml_string.strip()
class Stage(Enum):
"""Simple enum to track stage of experiments."""
TRAIN = auto()
VALID = auto()
TEST = auto()
@sb.utils.checkpoints.register_checkpoint_hooks
class Brain:
"""Brain class abstracts away the details of data loops.
The primary purpose of the `Brain` class is the implementation of
the ``fit()`` method, which iterates epochs and datasets for the
purpose of "fitting" a set of modules to a set of data.
In order to use the ``fit()`` method, one should sub-class the ``Brain``
class and override any methods for which the default behavior does not
match the use case. For a simple use case (e.g., training a single model
with a single dataset) the only methods that need to be overridden are:
* ``compute_forward()``
* ``compute_objectives()``
The example below illustrates how overriding these two methods is done.
For more complicated use cases, such as multiple modules that need to
be updated, the following methods can be overridden:
* ``fit_batch()``
* ``evaluate_batch()``
Arguments
---------
modules : dict of str:torch.nn.Module pairs
These modules are passed to the optimizer by default if they have
trainable parameters, and will have ``train()``/``eval()`` called on them.
opt_class : torch.optim class
A torch optimizer constructor that takes only the list of
parameters (e.g. a lambda or partial function definition). By default,
this will be passed all modules in ``modules`` at the
beginning of the ``fit()`` method. This behavior can be changed
by overriding the ``configure_optimizers()`` method.
hparams : dict
Each key:value pair should consist of a string key and a hyperparameter
that is used within the overridden methods. These will
be accessible via an ``hparams`` attribute, using "dot" notation:
e.g., self.hparams.model(x).
run_opts : dict
A set of options to change the runtime environment, including
debug (bool)
If ``True``, this will only iterate a few batches for all
datasets, to ensure code runs without crashing.
debug_batches (int)
Number of batches to run in debug mode, Default ``2``.
debug_epochs (int)
Number of epochs to run in debug mode, Default ``2``.
If a non-positive number is passed, all epochs are run.
debug_persistently (bool)
Keep data stored during debug mode (not using /tmp), Default ``False``.
jit_module_keys (list of str)
List of keys in ``modules`` that should be jit compiled.
distributed_backend (str)
One of ``nccl``, ``gloo``, ``mpi``.
device (str)
The location for performing computations.
auto_mix_prec (bool)
If ``True``, automatic mixed-precision is used.
Activate it only with cuda.
max_grad_norm (float)
Default implementation of ``fit_batch()`` uses
``clip_grad_norm_`` with this value. Default: ``5``.
nonfinite_patience (int)
Number of times to ignore non-finite losses before stopping.
Default: ``3``.
noprogressbar (bool)
Whether to turn off progressbar when training. Default: ``False``.
ckpt_interval_minutes (float)
Amount of time between saving intra-epoch checkpoints,
in minutes, default: ``15.0``. If non-positive, these are not saved.
Typically in a script this comes from ``speechbrain.parse_args``, which
has different defaults than Brain. If an option is not defined here
(keep in mind that parse_args will inject some options by default),
then the option is also searched for in hparams (by key).
checkpointer : speechbrain.Checkpointer
By default, this will be used to load checkpoints, and will have the
optimizer added to continue training if interrupted.
profiler : torch.profiler.profile
Context manager for profiling and benchmarking of training/inference steps.
Default: ``None`` (skip profiling).
Example
-------
>>> from torch.optim import SGD
>>> class SimpleBrain(Brain):
... def compute_forward(self, batch, stage):
... return self.modules.model(batch[0])
... def compute_objectives(self, predictions, batch, stage):
... return torch.nn.functional.l1_loss(predictions, batch[0])
>>> model = torch.nn.Linear(in_features=10, out_features=10)
>>> brain = SimpleBrain({"model": model}, opt_class=lambda x: SGD(x, 0.1))
>>> brain.fit(range(1), ([torch.rand(10, 10), torch.rand(10, 10)],))
"""
def __init__( # noqa: C901
self,
modules=None,
opt_class=None,
hparams=None,
run_opts=None,
checkpointer=None,
profiler=None,
):
self.opt_class = opt_class
self.checkpointer = checkpointer
self.profiler = profiler
# Arguments passed via the run opts dictionary
run_opt_defaults = {
"debug": False,
"debug_batches": 2,
"debug_epochs": 2,
"debug_persistently": False,
"device": "cpu",
"data_parallel_backend": False,
"distributed_launch": False,
"distributed_backend": "nccl",
"find_unused_parameters": False,
"jit_module_keys": None,
"auto_mix_prec": False,
"bfloat16_mix_prec": False,
"max_grad_norm": 5.0,
"nonfinite_patience": 3,
"noprogressbar": False,
"ckpt_interval_minutes": 0,
"grad_accumulation_factor": 1,
"optimizer_step_limit": None,
"tqdm_colored_bar": False,
"tqdm_barcolor": {
"train": "GREEN",
"valid": "MAGENTA",
"test": "CYAN",
},
}
for arg, default in run_opt_defaults.items():
if run_opts is not None and arg in run_opts:
if hparams is not None and arg in hparams:
logger.info(
"Info: "
+ arg
+ " arg overridden by command line input to: "
+ str(run_opts[arg])
)
setattr(self, arg, run_opts[arg])
else:
# If any arg from run_opt_defaults exist in hparams and
# not in command line args "run_opts"
if hparams is not None and arg in hparams:
logger.info(
"Info: " + arg + " arg from hparam file is used"
)
setattr(self, arg, hparams[arg])
else:
setattr(self, arg, default)
# Check Python version
if not (
sys.version_info.major == PYTHON_VERSION_MAJOR
and sys.version_info.minor >= PYTHON_VERSION_MINOR
):
logger.warn(
"Detected Python "
+ str(sys.version_info.major)
+ "."
+ str(sys.version_info.minor)
+ ". We suggest using SpeechBrain with Python >="
+ str(PYTHON_VERSION_MAJOR)
+ "."
+ str(PYTHON_VERSION_MINOR)
)
if self.data_parallel_backend and self.distributed_launch:
sys.exit(
"To use data_parallel backend, start your script with:\n\t"
"python experiment.py hyperparams.yaml "
"--data_parallel_backend=True"
"To use DDP backend, start your script with:\n\t"
"python -m torch.distributed.lunch [args]\n"
"experiment.py hyperparams.yaml --distributed_launch=True "
"--distributed_backend=nccl"
)
# Switch to the right context
if self.device == "cuda":
torch.cuda.set_device(0)
elif "cuda" in self.device:
torch.cuda.set_device(int(self.device[-1]))
# Put modules on the right device, accessible with dot notation
self.modules = torch.nn.ModuleDict(modules).to(self.device)
# Make hyperparams available with dot notation too
if hparams is not None:
self.hparams = SimpleNamespace(**hparams)
# Checkpointer should point at a temporary directory in debug mode
if (
self.debug
and not self.debug_persistently
and self.checkpointer is not None
and hasattr(self.checkpointer, "checkpoints_dir")
):
tempdir = tempfile.TemporaryDirectory()
logger.info(
"Since debug mode is active, switching checkpointer "
f"output to temporary directory: {tempdir.name}"
)
self.checkpointer.checkpoints_dir = pathlib.Path(tempdir.name)
# Keep reference to tempdir as long as checkpointer exists
self.checkpointer.tempdir = tempdir
# Sampler should be handled by `make_dataloader`
# or if you provide a DataLoader directly, you can set
# this.train_sampler = your_sampler
# to have your_sampler.set_epoch() called on each epoch.
self.train_sampler = None
# Automatic mixed precision init
if self.auto_mix_prec:
self.scaler = torch.cuda.amp.GradScaler()
if self.checkpointer is not None:
self.checkpointer.add_recoverable("scaler", self.scaler)
# List parameter count for the user
total_params = sum(
p.numel() for p in self.modules.parameters() if p.requires_grad
)
if total_params > 0:
clsname = self.__class__.__name__
fmt_num = sb.utils.logger.format_order_of_magnitude(total_params)
logger.info(f"{fmt_num} trainable parameters in {clsname}")
if self.distributed_launch:
self.rank = int(os.environ["RANK"])
if not torch.distributed.is_initialized():
if self.rank > 0:
sys.exit(
" ================ WARNING ==============="
"Please add sb.ddp_init_group() into your exp.py"
"To use DDP backend, start your script with:\n\t"
"python -m torch.distributed.launch [args]\n\t"
"experiment.py hyperparams.yaml "
"--distributed_launch=True --distributed_backend=nccl"
)
else:
logger.warn(
"To use DDP, please add "
"sb.utils.distributed.ddp_init_group() into your exp.py"
)
logger.info(
"Only the main process is alive, "
"all other subprocess were killed."
)
# Prepare iterating variables
self.avg_train_loss = 0.0
self.step = 0
self.optimizer_step = 0
# Add this class to the checkpointer for intra-epoch checkpoints
if self.checkpointer is not None:
self.checkpointer.add_recoverable("brain", self)
# Force default color for tqdm progrressbar
if not self.tqdm_colored_bar:
self.tqdm_barcolor = dict.fromkeys(self.tqdm_barcolor, "")
def compute_forward(self, batch, stage):
"""Forward pass, to be overridden by sub-classes.
Arguments
---------
batch : torch.Tensor or tensors
An element from the dataloader, including inputs for processing.
stage : Stage
The stage of the experiment: Stage.TRAIN, Stage.VALID, Stage.TEST
Returns
-------
torch.Tensor or Tensors
The outputs after all processing is complete.
Directly passed to ``compute_objectives()``.
"""
raise NotImplementedError
def compute_objectives(self, predictions, batch, stage):
"""Compute loss, to be overridden by sub-classes.
Arguments
---------
predictions : torch.Tensor or Tensors
The output tensor or tensors to evaluate.
Comes directly from ``compute_forward()``.
batch : torch.Tensor or tensors
An element from the dataloader, including targets for comparison.
stage : Stage
The stage of the experiment: Stage.TRAIN, Stage.VALID, Stage.TEST
Returns
-------
loss : torch.Tensor
A tensor with the computed loss.
"""
raise NotImplementedError
def on_stage_start(self, stage, epoch=None):
"""Gets called when a stage starts.
Useful for defining class variables used during the stage.
Arguments
---------
stage : Stage
The stage of the experiment: Stage.TRAIN, Stage.VALID, Stage.TEST
epoch : int
The current epoch count.
"""
pass
def on_stage_end(self, stage, stage_loss, epoch=None):
"""Gets called at the end of a stage.
Useful for computing stage statistics, saving checkpoints, etc.
Arguments
---------
stage : Stage
The stage of the experiment: Stage.TRAIN, Stage.VALID, Stage.TEST
stage_loss : float
The average loss over the completed stage.
epoch : int
The current epoch count.
"""
pass
def make_dataloader(
self, dataset, stage, ckpt_prefix="dataloader-", **loader_kwargs
):
"""Creates DataLoaders for Datasets.
This is used by ``fit()`` and ``evaluate()`` if they just receive
Datasets.
Alternatively, this can be called from outside the Brain subclass.
In that case, the DataLoader should be passed to ``fit()`` in place
of the dataset.
The Stage.TRAIN DataLoader is handled specially. It has extra args for
shuffle and drop_last. In DDP a DistributedSampler is created (unless
the dataset is an IterableDataset).
NOTE
----
Some important DataLoader arguments are passed via **loader_kwargs,
e.g., batch_size, num_workers, pin_memory.
NOTE
----
By default, ``evaluate()`` specifies ckpt_prefix=None to stop the test
DataLoader being added to the checkpointer. If you need to add a
recoverable after saving checkpoints (e.g., at test time, after
checkpointing the training), and still be able to recover reasonably,
you should probably specify ``allow_partial_load=True``.
Arguments
---------
dataset : Dataset
A set of data to use to create data loader. If the Dataset is a
DynamicItemDataset, PaddedBatch is used as the default collate_fn,
unless specified in loader_kwargs.
stage : Stage
The stage of the experiment: Stage.TRAIN, Stage.VALID, Stage.TEST
ckpt_prefix : str, None
Prefix to use for SaveableDataLoader Checkpoint name. The Stage
name is added to this to create the full key. Set to None to not
save the DataLoader.
**loader_kwargs : dict
Additional keyword arguments to the DataLoader.
E.g., batch_size, num_workers, pin_memory.
"""
# TRAIN stage is handled specially.
if stage == sb.Stage.TRAIN:
loader_kwargs = self._train_loader_specifics(dataset, loader_kwargs)
dataloader = sb.dataio.dataloader.make_dataloader(
dataset, **loader_kwargs
)
if (
self.checkpointer is not None
and ckpt_prefix is not None
and (
isinstance(dataloader, SaveableDataLoader)
or isinstance(dataloader, LoopedLoader)
)
):
ckpt_key = ckpt_prefix + stage.name
self.checkpointer.add_recoverable(ckpt_key, dataloader)
return dataloader
def _train_loader_specifics(self, dataset, loader_kwargs):
sampler = loader_kwargs.get("sampler", None)
# Shuffling should really only matter for the train stage. Shuffling
# will also lead to more padding in batches if the order was otherwise
# sorted by length.
shuffle = loader_kwargs.get("shuffle", False)
if shuffle and not self.distributed_launch:
if sampler is not None:
raise ValueError(
"Cannot specify both shuffle=True"
"and a sampler in loader_kwargs"
)
sampler = ReproducibleRandomSampler(dataset)
self.train_sampler = sampler
loader_kwargs["sampler"] = self.train_sampler
# Delete the shuffle flag, since you cannot specify both a sampler and
# shuffling:
del loader_kwargs["shuffle"]
# Possibly make a DistributedSampler or a wrapper for some other sampler
if self.distributed_launch and not isinstance(dataset, IterableDataset):
# sort or not
if hasattr(self.hparams, "sorting"):
shuffle_ddp = (
self.hparams.sorting == "random"
) # False if 'ascending' or 'descending'
else:
shuffle_ddp = True
drop_last = loader_kwargs.get("drop_last", False)
# num_replicas arg is equal to world_size
# and retrieved automatically within
# DistributedSampler obj.
if sampler is not None:
self.train_sampler = DistributedSamplerWrapper(
sampler,
rank=self.rank,
drop_last=drop_last,
shuffle=shuffle,
)
# with DistributedSamplerWrapper, one must disable shuffling for dataloader
loader_kwargs["shuffle"] = False
loader_kwargs["sampler"] = self.train_sampler
elif loader_kwargs.get("batch_sampler") is None:
# no sampler and batch-sampler
self.train_sampler = DistributedSampler(
dataset,
rank=self.rank,
shuffle=shuffle_ddp,
drop_last=drop_last,
)
# with DistributedSamplerWrapper, one must disable shuffling for dataloader
loader_kwargs["shuffle"] = False
loader_kwargs["sampler"] = self.train_sampler
else: # batch_sampler was specified
self.train_sampler = DistributedSamplerWrapper(
loader_kwargs.get("batch_sampler", None),
rank=self.rank,
shuffle=shuffle_ddp,
)
loader_kwargs["batch_sampler"] = self.train_sampler
elif self.distributed_launch and isinstance(dataset, IterableDataset):
logger.warning(
"Cannot automatically solve distributed sampling "
"for IterableDataset."
)
return loader_kwargs
def on_fit_start(self):
"""Gets called at the beginning of ``fit()``, on multiple processes
if ``distributed_count > 0`` and backend is ddp.
Default implementation compiles the jit modules, initializes
optimizers, and loads the latest checkpoint to resume training.
"""
# Run this *after* starting all processes since jit modules cannot be
# pickled.
self._compile_jit()
# Wrap modules with parallel backend after jit
self._wrap_distributed()
# Initialize optimizers after parameters are configured
self.init_optimizers()
# Load latest checkpoint to resume training if interrupted
if self.checkpointer is not None:
self.checkpointer.recover_if_possible(
device=torch.device(self.device)
)
def init_optimizers(self):
"""Called during ``on_fit_start()``, initialize optimizers
after parameters are fully configured (e.g. DDP, jit).
The default implementation of this method depends on an optimizer
class being passed at initialization that takes only a list
of parameters (e.g., a lambda or a partial function definition).
This creates a single optimizer that optimizes all trainable params.
Override this class if there are multiple optimizers.
"""
if self.opt_class is not None:
self.optimizer = self.opt_class(self.modules.parameters())
if self.checkpointer is not None:
self.checkpointer.add_recoverable("optimizer", self.optimizer)
def zero_grad(self, set_to_none=False):
"""Sets the gradients of all optimized ``torch.Tensor``s to zero
if ``set_to_none=False`` (default) or to None otherwise.
Setting gradients to None should save the memory, e.g.
during ``evaluate()`` and thus larger batch might be used.
"""
if hasattr(self, "optimizer"):
self.optimizer.zero_grad(set_to_none)
def on_evaluate_start(self, max_key=None, min_key=None):
"""Gets called at the beginning of ``evaluate()``
Default implementation loads the best-performing checkpoint for
evaluation, based on stored metrics.
Arguments
---------
max_key : str
Key to use for finding best checkpoint (higher is better).
By default, passed to ``self.checkpointer.recover_if_possible()``.
min_key : str
Key to use for finding best checkpoint (lower is better).
By default, passed to ``self.checkpointer.recover_if_possible()``.
"""
# Recover best checkpoint for evaluation
if self.checkpointer is not None:
self.checkpointer.recover_if_possible(
max_key=max_key,
min_key=min_key,
device=torch.device(self.device),
)
def fit_batch(self, batch):
"""Fit one batch, override to do multiple updates.
The default implementation depends on a few methods being defined
with a particular behavior:
* ``compute_forward()``
* ``compute_objectives()``
Also depends on having optimizers passed at initialization.
Arguments
---------
batch : list of torch.Tensors
Batch of data to use for training. Default implementation assumes
this batch has two elements: inputs and targets.
Returns
-------
detached loss
"""
should_step = self.step % self.grad_accumulation_factor == 0
# Managing automatic mixed precision
if self.auto_mix_prec:
with torch.autocast(device_type=torch.device(self.device).type):
outputs = self.compute_forward(batch, Stage.TRAIN)
# Losses are excluded from mixed precision to avoid instabilities
loss = self.compute_objectives(outputs, batch, Stage.TRAIN)
with self.no_sync(not should_step):
self.scaler.scale(
loss / self.grad_accumulation_factor
).backward()
if should_step:
self.scaler.unscale_(self.optimizer)
if self.check_gradients(loss):
self.scaler.step(self.optimizer)
self.scaler.update()
self.zero_grad()
self.optimizer_step += 1
else:
if self.bfloat16_mix_prec:
with torch.autocast(
device_type=torch.device(self.device).type,
dtype=torch.bfloat16,
):
outputs = self.compute_forward(batch, Stage.TRAIN)
loss = self.compute_objectives(outputs, batch, Stage.TRAIN)
else:
outputs = self.compute_forward(batch, Stage.TRAIN)
loss = self.compute_objectives(outputs, batch, Stage.TRAIN)
with self.no_sync(not should_step):
(loss / self.grad_accumulation_factor).backward()
if should_step:
if self.check_gradients(loss):
self.optimizer.step()
self.zero_grad()
self.optimizer_step += 1
self.on_fit_batch_end(batch, outputs, loss, should_step)
return loss.detach().cpu()
def on_fit_batch_end(self, batch, outputs, loss, should_step):
"""Called after ``fit_batch()``, meant for calculating and logging metrics.
Arguments
---------
batch : list of torch.Tensors
Batch of data to use for training. Default implementation assumes
this batch has two elements: inputs and targets.
outputs : list or dictionary of torch.Tensors
Returned value of compute_forward().
loss : torch.Tensor
Returned value of compute_objectives().
should_step : boolean
Whether optimizer.step() was called or not.
"""
pass
def check_gradients(self, loss):
"""Check if gradients are finite and not too large.
Automatically clips large gradients.
Arguments
---------
loss : tensor
The loss tensor after ``backward()`` has been called but
before the optimizers ``step()``.
Returns
-------
bool
Whether or not the optimizer step should be carried out.
"""
if not torch.isfinite(loss):
self.nonfinite_count += 1
# Print helpful debug info
logger.warn(f"Loss is {loss}.")
for p in self.modules.parameters():
if not torch.isfinite(p).all():
logger.warn("Parameter is not finite: " + str(p))
# Check if patience is exhausted
if self.nonfinite_count > self.nonfinite_patience:
raise ValueError(
"Loss is not finite and patience is exhausted. "
"To debug, wrap `fit()` with "
"autograd's `detect_anomaly()`, e.g.\n\nwith "
"torch.autograd.detect_anomaly():\n\tbrain.fit(...)"
)
else:
logger.warn("Patience not yet exhausted, ignoring this batch.")
return False
# Clip gradient norm
if self.max_grad_norm > 0.0:
torch.nn.utils.clip_grad_norm_(
(p for p in self.modules.parameters()), self.max_grad_norm
)
return True
def evaluate_batch(self, batch, stage):
"""Evaluate one batch, override for different procedure than train.
The default implementation depends on two methods being defined
with a particular behavior:
* ``compute_forward()``
* ``compute_objectives()``
Arguments
---------
batch : list of torch.Tensors
Batch of data to use for evaluation. Default implementation assumes
this batch has two elements: inputs and targets.
stage : Stage
The stage of the experiment: Stage.VALID, Stage.TEST
Returns
-------
detached loss
"""
out = self.compute_forward(batch, stage=stage)
loss = self.compute_objectives(out, batch, stage=stage)
return loss.detach().cpu()
def _fit_train(self, train_set, epoch, enable):
# Training stage
self.on_stage_start(Stage.TRAIN, epoch)
self.modules.train()
self.zero_grad()
# Reset nonfinite count to 0 each epoch
self.nonfinite_count = 0
if self.train_sampler is not None and hasattr(
self.train_sampler, "set_epoch"
):
self.train_sampler.set_epoch(epoch)
# Time since last intra-epoch checkpoint
last_ckpt_time = time.time()
with tqdm(
train_set,
initial=self.step,
dynamic_ncols=True,
disable=not enable,
colour=self.tqdm_barcolor["train"],
) as t:
for batch in t:
if self._optimizer_step_limit_exceeded:
logger.info("Train iteration limit exceeded")
break
self.step += 1
loss = self.fit_batch(batch)
self.avg_train_loss = self.update_average(
loss, self.avg_train_loss
)
t.set_postfix(train_loss=self.avg_train_loss)
# Profile only if desired (steps allow the profiler to know when all is warmed up)
if self.profiler is not None:
if self.profiler.record_steps:
self.profiler.step()
# Debug mode only runs a few batches
if self.debug and self.step == self.debug_batches:
break
if (
self.checkpointer is not None
and self.ckpt_interval_minutes > 0
and time.time() - last_ckpt_time
>= self.ckpt_interval_minutes * 60.0
):
# This should not use run_on_main, because that
# includes a DDP barrier. That eventually leads to a
# crash when the processes'
# time.time() - last_ckpt_time differ and some
# processes enter this block while others don't,
# missing the barrier.
if sb.utils.distributed.if_main_process():
self._save_intra_epoch_ckpt()
last_ckpt_time = time.time()
# Run train "on_stage_end" on all processes
self.zero_grad(set_to_none=True) # flush gradients
self.on_stage_end(Stage.TRAIN, self.avg_train_loss, epoch)
self.avg_train_loss = 0.0
self.step = 0
def _fit_valid(self, valid_set, epoch, enable):
# Validation stage
if valid_set is not None:
self.on_stage_start(Stage.VALID, epoch)
self.modules.eval()
avg_valid_loss = 0.0
with torch.no_grad():
for batch in tqdm(
valid_set,
dynamic_ncols=True,
disable=not enable,
colour=self.tqdm_barcolor["valid"],
):
self.step += 1
loss = self.evaluate_batch(batch, stage=Stage.VALID)
avg_valid_loss = self.update_average(loss, avg_valid_loss)
# Profile only if desired (steps allow the profiler to know when all is warmed up)
if self.profiler is not None:
if self.profiler.record_steps:
self.profiler.step()
# Debug mode only runs a few batches
if self.debug and self.step == self.debug_batches:
break
# Only run validation "on_stage_end" on main process
self.step = 0
run_on_main(
self.on_stage_end,
args=[Stage.VALID, avg_valid_loss, epoch],
)
def fit(
self,
epoch_counter,
train_set,
valid_set=None,
progressbar=None,
train_loader_kwargs={},
valid_loader_kwargs={},
):
"""Iterate epochs and datasets to improve objective.
Relies on the existence of multiple functions that can (or should) be
overridden. The following methods are used and expected to have a
certain behavior:
* ``fit_batch()``
* ``evaluate_batch()``
* ``update_average()``
If the initialization was done with distributed_count > 0 and the
distributed_backend is ddp, this will generally handle multiprocess
logic, like splitting the training data into subsets for each device and
only saving a checkpoint on the main process.
Arguments
---------
epoch_counter : iterable
Each call should return an integer indicating the epoch count.
train_set : Dataset, DataLoader
A set of data to use for training. If a Dataset is given, a
DataLoader is automatically created. If a DataLoader is given, it is
used directly.
valid_set : Dataset, DataLoader
A set of data to use for validation. If a Dataset is given, a
DataLoader is automatically created. If a DataLoader is given, it is
used directly.
train_loader_kwargs : dict
Kwargs passed to `make_dataloader()` for making the train_loader
(if train_set is a Dataset, not DataLoader).
E.G. batch_size, num_workers.
DataLoader kwargs are all valid.
valid_loader_kwargs : dict
Kwargs passed to `make_dataloader()` for making the valid_loader
(if valid_set is a Dataset, not DataLoader).
E.g., batch_size, num_workers.
DataLoader kwargs are all valid.
progressbar : bool
Whether to display the progress of each epoch in a progressbar.
"""
if not (
isinstance(train_set, DataLoader)
or isinstance(train_set, LoopedLoader)
):
train_set = self.make_dataloader(
train_set, stage=sb.Stage.TRAIN, **train_loader_kwargs
)
if valid_set is not None and not (
isinstance(valid_set, DataLoader)
or isinstance(valid_set, LoopedLoader)
):
valid_set = self.make_dataloader(
valid_set,
stage=sb.Stage.VALID,
ckpt_prefix=None,
**valid_loader_kwargs,
)
self.on_fit_start()
if progressbar is None:
progressbar = not self.noprogressbar
# Only show progressbar if requested and main_process
enable = progressbar and sb.utils.distributed.if_main_process()
# Iterate epochs
for epoch in epoch_counter:
self._fit_train(train_set=train_set, epoch=epoch, enable=enable)
self._fit_valid(valid_set=valid_set, epoch=epoch, enable=enable)
# Debug mode only runs a few epochs
if (
self.debug
and epoch == self.debug_epochs
or self._optimizer_step_limit_exceeded
):
break
@property
def _optimizer_step_limit_exceeded(self):
return (
self.optimizer_step_limit is not None
and self.optimizer_step >= self.optimizer_step_limit
)
def _save_intra_epoch_ckpt(self):
"""Saves a CKPT with specific intra-epoch flag."""
self.checkpointer.save_and_keep_only(
end_of_epoch=False,
num_to_keep=1,
ckpt_predicate=lambda c: INTRA_EPOCH_CKPT_FLAG in c.meta,
meta={INTRA_EPOCH_CKPT_FLAG: True},
verbosity=logging.DEBUG,
)
def _compile_jit(self):
"""Compile requested modules with ``torch.jit.script``."""
if self.jit_module_keys is None:
return
for name in self.jit_module_keys:
if name not in self.modules:
raise ValueError(
"module" + name + " is not defined in your hparams file."
)
module = torch.jit.script(self.modules[name])
self.modules[name] = module.to(self.device)
def _wrap_distributed(self):
"""Wrap modules with distributed wrapper when requested."""
if not self.distributed_launch and not self.data_parallel_backend:
return
elif self.distributed_launch:
for name, module in self.modules.items():
if any(p.requires_grad for p in module.parameters()):
module = SyncBatchNorm.convert_sync_batchnorm(module)
if self.distributed_backend == "gloo":
module = DDP(
module,
device_ids=None,
find_unused_parameters=self.find_unused_parameters,
)
else:
module = DDP(
module,
device_ids=[self.device],
find_unused_parameters=self.find_unused_parameters,
)
self.modules[name] = module
else:
# data_parallel_backend
for name, module in self.modules.items():
if any(p.requires_grad for p in module.parameters()):
module = DP(module)
self.modules[name] = module
def evaluate(
self,
test_set,
max_key=None,
min_key=None,
progressbar=None,
test_loader_kwargs={},
):
"""Iterate test_set and evaluate brain performance. By default, loads
the best-performing checkpoint (as recorded using the checkpointer).
Arguments
---------
test_set : Dataset, DataLoader
If a DataLoader is given, it is iterated directly. Otherwise passed
to ``self.make_dataloader()``.
max_key : str
Key to use for finding best checkpoint, passed to
``on_evaluate_start()``.
min_key : str
Key to use for finding best checkpoint, passed to
``on_evaluate_start()``.
progressbar : bool
Whether to display the progress in a progressbar.
test_loader_kwargs : dict
Kwargs passed to ``make_dataloader()`` if ``test_set`` is not a
DataLoader. NOTE: ``loader_kwargs["ckpt_prefix"]`` gets
automatically overwritten to ``None`` (so that the test DataLoader
is not added to the checkpointer).
Returns
-------
average test loss
"""
if progressbar is None:
progressbar = not self.noprogressbar
if not (
isinstance(test_set, DataLoader)
or isinstance(test_set, LoopedLoader)
):
test_loader_kwargs["ckpt_prefix"] = None
test_set = self.make_dataloader(
test_set, Stage.TEST, **test_loader_kwargs
)
self.on_evaluate_start(max_key=max_key, min_key=min_key)
self.on_stage_start(Stage.TEST, epoch=None)
self.modules.eval()
avg_test_loss = 0.0
with torch.no_grad():
for batch in tqdm(
test_set,
dynamic_ncols=True,
disable=not progressbar,
colour=self.tqdm_barcolor["test"],
):
self.step += 1
loss = self.evaluate_batch(batch, stage=Stage.TEST)
avg_test_loss = self.update_average(loss, avg_test_loss)
# Profile only if desired (steps allow the profiler to know when all is warmed up)
if self.profiler is not None:
if self.profiler.record_steps:
self.profiler.step()
# Debug mode only runs a few batches
if self.debug and self.step == self.debug_batches:
break
# Only run evaluation "on_stage_end" on main process
run_on_main(
self.on_stage_end, args=[Stage.TEST, avg_test_loss, None]
)
self.step = 0
return avg_test_loss
def update_average(self, loss, avg_loss):
"""Update running average of the loss.
Arguments
---------
loss : torch.tensor
detached loss, a single float value.
avg_loss : float
current running average.
Returns
-------
avg_loss : float
The average loss.
"""
if torch.isfinite(loss):
avg_loss -= avg_loss / self.step
avg_loss += float(loss) / self.step
return avg_loss
@contextmanager
def no_sync(self, use=True):
"""Copies pytorch's implementation for doing no_sync across all modules.
Explanation: nn.module.no_sync() is a context manager for when one does
not want to sync gradients, which happens when using both DDP and gradient accumulation.
Speechbrain brain's class can contain multiple modules and calling no_sync on these
individually would be very awkward, therefore this contextmanager exists.
Arguments
---------
use : bool
If set to `False` will still sync gradients, useful to make behaviour togglable.
"""
if use:
old_values_list = []
for module in self.modules.values():
if not hasattr(module, "require_backward_grad_sync"):
# if not using DDP
break
old_values_list.append(module.require_backward_grad_sync)
module.require_backward_grad_sync = False
yield
for module, old_value in zip(
self.modules.values(), old_values_list
):
if not hasattr(module, "require_backward_grad_sync"):
break
module.require_backward_grad_sync = old_value
else:
yield
@sb.utils.checkpoints.mark_as_saver
def _save(self, path):
save_dict = {
"step": self.step,
"avg_train_loss": self.avg_train_loss,
"optimizer_step": self.optimizer_step,
}
with open(path, "w") as w:
w.write(yaml.dump(save_dict))
@sb.utils.checkpoints.mark_as_loader
def _recover(self, path, end_of_epoch, device):
del end_of_epoch
del device
with open(path) as f:
save_dict = yaml.safe_load(f)
self.step = save_dict["step"]
self.avg_train_loss = save_dict["avg_train_loss"]
# Ensure compatibility with checkpoints from before optimizer_step:
if "optimizer_step" not in save_dict:
clsname = self.__class__.__name__
MSG = f"'optimizer_step' not found in {clsname} checkpoint."
MSG += " Using the saved 'step' value (BACKWARDS COMPATIBILITY)"
warnings.warn(MSG)
self.optimizer_step = self.step
else:
self.optimizer_step = save_dict["optimizer_step"]
| 54,493 | 36.608006 | 102 | py |
speechbrain | speechbrain-main/speechbrain/tokenizers/SentencePiece.py | """Library for Byte-pair-encoding (BPE) tokenization.
Authors
* Abdelwahab Heba 2020
* Loren Lugosch 2020
"""
import os.path
import torch
import logging
import csv
import json
import sentencepiece as spm
from speechbrain.dataio.dataio import merge_char
from speechbrain.utils import edit_distance
from speechbrain.utils.distributed import run_on_main
logger = logging.getLogger(__name__)
class SentencePiece:
"""BPE class call the SentencePiece unsupervised text tokenizer from Google.
Reference: https://github.com/google/sentencepiece
SentencePiece lib is an unsupervised text tokenizer and detokenizer.
It implements subword units like Byte-pair-encoding (BPE),
Unigram language model and char/word tokenizer.
Arguments
---------
model_dir : str
The directory where the model will be saved (or already stored).
vocab_size : int, None, optional
Vocab size for the chosen tokenizer type (BPE, Unigram).
The vocab_size is optional for char, and mandatory for BPE & unigram
tokenization.
annotation_train : str
Path of the annotation file which is used to learn the tokenizer. It
can be in JSON or csv format.
annotation_read : str
The data entry which contains the word sequence in the annotation file.
model_type : str
(bpe, char, unigram).
If "bpe", train unsupervised tokenization of piece of words. see:
https://www.aclweb.org/anthology/P16-1162/
If "word" take the vocabulary from the input text.
If "unigram" do piece of word tokenization using unigram language
model, see: https://arxiv.org/abs/1804.10959
char_format_input : bool
Whether the read entry contains characters format input.
(default: False)
(e.g., a p p l e _ i s _ g o o d)
character_coverage : int
Amount of characters covered by the model, good defaults
are: 0.9995 for languages with a rich character set like Japanese or
Chinese and 1.0 for other languages with small character set.
(default: 1.0)
user_defined_symbols : string
String contained a list of symbols separated by a comma.
User-defined symbols are handled as one piece in any context.
(default: None)
max_sentencepiece_length : int
Maximum number of characters for the tokens. (default: 10)
bos_id : int
If -1 the bos_id = unk_id = 0. otherwise, bos_id = int. (default: -1)
eos_id : int
If -1 the bos_id = unk_id = 0. otherwise, bos_id = int. (default: -1)
split_by_whitespace : bool
If False, allow the sentencepiece to extract piece crossing multiple
words. This feature is important for : Chinese/Japanese/Korean.
(default: True)
num_sequences : int
If not none, use at most this many sequences to train the tokenizer
(for large datasets). (default: None)
annotation_list_to_check : list,
List of the annotation file which is used for checking the accuracy of
recovering words from the tokenizer.
annotation_format : str
The format of the annotation file. JSON or csv are the formats supported.
text_file: str
An alternate path to the text file (needed when multiple models are trained on
the same data file)
add_dummy_prefix : bool
If True the tokenizer adds dummy whitespace at the beginning of text. (default: True)
Example
-------
>>> import torch
>>> dict_int2lab = {1: "HELLO", 2: "MORNING"}
>>> model_dir = getfixture('tmpdir') / "tokenizer_data"
>>> # Example with csv
>>> annotation_train = "tests/samples/annotation/dev-clean.csv"
>>> annotation_read = "wrd"
>>> model_type = "bpe"
>>> bpe = SentencePiece(str(model_dir), 100, annotation_train, annotation_read, model_type)
>>> batch_seq = torch.Tensor([[1, 2, 2, 1],[1, 2, 1, 0]])
>>> batch_lens = torch.Tensor([1.0, 0.75])
>>> encoded_seq_ids, encoded_seq_pieces = bpe(
... batch_seq, batch_lens, dict_int2lab, task="encode"
... )
>>> # Example using JSON
>>> annotation_train = str(model_dir + "/dev-clean.json")
>>> annotation_read = "wrd"
>>> bpe = SentencePiece(model_dir, 100, annotation_train, annotation_read, model_type, annotation_format = 'json')
>>> encoded_seq_ids, encoded_seq_pieces = bpe(
... batch_seq, batch_lens, dict_int2lab, task="encode"
... )
"""
def __init__(
self,
model_dir,
vocab_size,
annotation_train=None,
annotation_read=None,
model_type="unigram",
char_format_input=False,
character_coverage=1.0,
user_defined_symbols=None,
max_sentencepiece_length=10,
bos_id=-1,
eos_id=-1,
pad_id=-1,
unk_id=0,
split_by_whitespace=True,
num_sequences=None,
annotation_list_to_check=None,
annotation_format="csv",
text_file=None,
add_dummy_prefix=True,
):
if model_type not in ["unigram", "bpe", "char"]:
raise ValueError("model_type must be one of : [unigram, bpe, char]")
if not os.path.isdir(model_dir):
os.makedirs(model_dir)
if not isinstance(vocab_size, int):
raise ValueError("vocab_size must be integer.")
self.annotation_train = annotation_train
self.annotation_read = annotation_read
self.annotation_format = annotation_format
if self.annotation_train is not None:
ext = os.path.splitext(self.annotation_train)[1]
if text_file is None:
text_file = os.path.join(
model_dir,
os.path.basename(self.annotation_train).replace(
ext, ".txt"
),
)
self.text_file = text_file
self.prefix_model_file = os.path.join(
model_dir, str(vocab_size) + "_" + model_type
)
self.vocab_size = str(vocab_size)
self.model_type = model_type
self.char_format_input = char_format_input
self.character_coverage = str(character_coverage)
self.max_sentencepiece_length = str(max_sentencepiece_length)
self.bos_id = str(bos_id)
self.eos_id = str(eos_id)
self.pad_id = str(pad_id)
self.unk_id = str(unk_id)
self.num_sequences = num_sequences
self.split_by_whitespace = split_by_whitespace
self.user_defined_symbols = user_defined_symbols
self.add_dummy_prefix = str(add_dummy_prefix)
if not os.path.isfile(self.prefix_model_file + ".model"):
logger.info("Train tokenizer with type:" + self.model_type)
if not os.path.isfile(self.text_file):
if annotation_format == "csv":
run_on_main(self._csv2text)
elif annotation_format == "json":
run_on_main(self._json2text)
else:
raise ValueError(
"Annotation format not supported. Supported formats are csv and json. Got "
+ annotation_format
)
run_on_main(self._train_BPE)
else:
logger.info("Tokenizer is already trained.")
logger.info("==== Loading Tokenizer ===")
logger.info("Tokenizer path: " + self.prefix_model_file + ".model")
logger.info("Tokenizer vocab_size: " + str(self.vocab_size))
logger.info("Tokenizer type: " + self.model_type)
self.sp = spm.SentencePieceProcessor()
self.sp.load(self.prefix_model_file + ".model")
if annotation_list_to_check is not None:
run_on_main(
self._check_coverage_from_bpe,
kwargs={"list_annotation_files": annotation_list_to_check},
)
def _csv2text(self):
"""Read CSV file and convert specific data entries into text file.
"""
if not os.path.isfile(os.path.abspath(self.annotation_train)):
raise ValueError(
self.annotation_train
+ " is not a file. please provide annotation file for training."
)
logger.info(
"Extract "
+ self.annotation_read
+ " sequences from:"
+ self.annotation_train
)
annotation_file = open(self.annotation_train, "r")
reader = csv.reader(annotation_file)
headers = next(reader, None)
if self.annotation_read not in headers:
raise ValueError(
self.annotation_read + " must exist in:" + self.annotation_train
)
index_label = headers.index(self.annotation_read)
text_file = open(self.text_file, "w+")
row_idx = 0
for row in reader:
if self.num_sequences is not None and row_idx > self.num_sequences:
print(
"Using %d sequences to train the tokenizer."
% self.num_sequences
)
break
row_idx += 1
sent = row[index_label]
if self.char_format_input:
(sent,) = merge_char([sent.split()])
sent = " ".join(sent)
text_file.write(sent + "\n")
text_file.close()
annotation_file.close()
logger.info("Text file created at: " + self.text_file)
def _json2text(self):
"""Read JSON file and convert specific data entries into text file.
"""
if not os.path.isfile(os.path.abspath(self.annotation_train)):
raise ValueError(
self.annotation_train
+ " is not a file. please provide annotation file for training."
)
logger.info(
"Extract "
+ self.annotation_read
+ " sequences from:"
+ self.annotation_train
)
# Read JSON
with open(self.annotation_train, "r") as f:
out_json = json.load(f)
# Save text file
text_file = open(self.text_file, "w+")
row_idx = 0
for snt_id in out_json.keys():
if self.num_sequences is not None and row_idx > self.num_sequences:
print(
"Using %d sequences to train the tokenizer."
% self.num_sequences
)
break
row_idx += 1
sent = out_json[snt_id][self.annotation_read]
if self.char_format_input:
(sent,) = merge_char([sent.split()])
sent = " ".join(sent)
text_file.write(sent + "\n")
text_file.close()
logger.info("Text file created at: " + self.text_file)
def _train_BPE(self):
"""Train tokenizer with unsupervised techniques (BPE, Unigram) using
SentencePiece Library. If you use "char" mode, the SentencePiece
creates a char dict so the vocab_size attribute is not needed.
"""
query = (
"--input="
+ self.text_file
+ " --model_prefix="
+ self.prefix_model_file
+ " --model_type="
+ self.model_type
+ " --bos_id="
+ self.bos_id
+ " --eos_id="
+ self.eos_id
+ " --pad_id="
+ self.pad_id
+ " --unk_id="
+ self.unk_id
+ " --max_sentencepiece_length="
+ self.max_sentencepiece_length
+ " --character_coverage="
+ self.character_coverage
+ " --add_dummy_prefix="
+ self.add_dummy_prefix
)
if self.model_type not in ["char"]:
# include vocab_size
query += " --vocab_size=" + str(self.vocab_size)
if self.user_defined_symbols is not None:
query += " --user_defined_symbols=" + self.user_defined_symbols
if not self.split_by_whitespace:
query += " --split_by_whitespace=false"
# Train tokenizer
spm.SentencePieceTrainer.train(query)
def _check_coverage_from_bpe(self, list_annotation_files=[]):
"""Logging the accuracy of the BPE model to recover words from the training text.
Arguments
---------
annotation_list_to_check : list,
List of the annotation file which is used for checking the accuracy of recovering words from the tokenizer.
"""
for annotation_file in list_annotation_files:
if os.path.isfile(os.path.abspath(annotation_file)):
logger.info(
"==== Accuracy checking for recovering text from tokenizer ==="
)
# csv reading
if self.annotation_format == "csv":
fannotation_file = open(annotation_file, "r")
reader = csv.reader(fannotation_file)
headers = next(reader, None)
if self.annotation_read not in headers:
raise ValueError(
self.annotation_read
+ " must exist in:"
+ annotation_file
)
index_label = headers.index(self.annotation_read)
# json reading
else:
with open(self.annotation_train, "r") as f:
reader = json.load(f)
index_label = self.annotation_read
wrong_recover_list = []
for row in reader:
if self.annotation_format == "csv":
row = row[index_label]
else:
row = reader[row][index_label]
if self.char_format_input:
(row,) = merge_char([row.split()])
row = " ".join(row)
row = row.split("\n")[0]
encoded_id = self.sp.encode_as_ids(row)
decode_text = self.sp.decode_ids(encoded_id)
(details,) = edit_distance.wer_details_for_batch(
["utt1"],
[row.split(" ")],
[decode_text.split(" ")],
compute_alignments=True,
)
if details["WER"] > 0:
for align in details["alignment"]:
if align[0] != "=" and align[1] is not None:
if align[1] not in wrong_recover_list:
wrong_recover_list.append(align[1])
if self.annotation_format == "csv":
fannotation_file.close()
logger.info("recover words from: " + annotation_file)
if len(wrong_recover_list) > 0:
logger.warn(
"Wrong recover words: " + str(len(wrong_recover_list))
)
logger.warn(
"Tokenizer vocab size: " + str(self.sp.vocab_size())
)
logger.warn(
"accuracy recovering words: "
+ str(
1
- float(len(wrong_recover_list))
/ self.sp.vocab_size()
)
)
else:
logger.info("Wrong recover words: 0")
logger.warning("accuracy recovering words: " + str(1.0))
else:
logger.info(
"No accuracy recover checking for" + annotation_file
)
def __call__(
self, batch, batch_lens=None, ind2lab=None, task="encode",
):
"""This __call__ function implements the tokenizer encoder and decoder
(restoring the string of word) for BPE, Regularized BPE (with unigram),
and char (speechbrain/nnet/RNN.py).
Arguments
----------
batch : tensor.IntTensor or list
List if ( batch_lens = None and task = "decode_from_list")
Contains the original labels. Shape: [batch_size, max_length]
batch_lens : tensor.LongTensor
Containing the relative length of each label sequences. Must be 1D
tensor of shape: [batch_size]. (default: None)
ind2lab : dict
Dictionary which maps the index from label sequences
(batch tensor) to string label.
task : str
("encode", "decode", "decode_from_list)
"encode": convert the batch tensor into sequence of tokens.
the output contain a list of (tokens_seq, tokens_lens)
"decode": convert a tensor of tokens to a list of word sequences.
"decode_from_list": convert a list of token sequences to a list
of word sequences.
"""
if task == "encode" and ind2lab is None:
raise ValueError("Tokenizer encoder must have the ind2lab function")
if task == "encode":
# Convert list of words/chars to bpe ids
bpe = []
max_bpe_len = 0
batch_lens = (batch_lens * batch.shape[1]).round().int()
for i, utt_seq in enumerate(batch):
tokens = [
ind2lab[int(index)] for index in utt_seq[: batch_lens[i]]
]
if self.char_format_input:
(words_list,) = merge_char([tokens])
sent = " ".join(words_list)
else:
sent = " ".join(tokens)
bpe_encode = self.sp.encode_as_ids(sent)
bpe.append(bpe_encode)
# save the longest bpe sequence
# it help to compute the relative length of each utterance
if len(bpe_encode) > max_bpe_len:
max_bpe_len = len(bpe_encode)
# Create bpe tensor
bpe_tensor = torch.zeros(
(batch.shape[0], max_bpe_len), device=batch.device
)
bpe_lens = torch.zeros((batch.shape[0]), device=batch.device)
for i, bpe_utt in enumerate(bpe):
bpe_tensor[i, : len(bpe_utt)] = torch.Tensor(bpe_utt)
bpe_lens[i] = len(bpe_utt) / max_bpe_len
return bpe_tensor, bpe_lens
elif task == "decode_from_list":
# From list of hyps (not padded outputs)
# do decoding
return [self.sp.decode_ids(utt_seq).split(" ") for utt_seq in batch]
elif task == "decode":
# From a batch tensor and a length tensor
# find the absolute batch lengths and do decoding
batch_lens = (batch_lens * batch.shape[1]).round().int()
return [
self.sp.decode_ids(
utt_seq[: batch_lens[i]].int().tolist()
).split(" ")
for i, utt_seq in enumerate(batch)
]
| 19,212 | 40.229614 | 119 | py |
speechbrain | speechbrain-main/speechbrain/decoders/seq2seq.py | """Decoding methods for seq2seq autoregressive model.
Authors
* Adel Moumen 2022
* Ju-Chieh Chou 2020
* Peter Plantinga 2020
* Mirco Ravanelli 2020
* Sung-Lin Yeh 2020
"""
import torch
import speechbrain as sb
from speechbrain.decoders.ctc import CTCPrefixScorer
class S2SBaseSearcher(torch.nn.Module):
"""S2SBaseSearcher class to be inherited by other
decoding approaches for seq2seq model.
Arguments
---------
bos_index : int
The index of the beginning-of-sequence (bos) token.
eos_index : int
The index of end-of-sequence token.
min_decode_radio : float
The ratio of minimum decoding steps to the length of encoder states.
max_decode_radio : float
The ratio of maximum decoding steps to the length of encoder states.
Returns
-------
predictions
Outputs as Python list of lists, with "ragged" dimensions; padding
has been removed.
scores
The sum of log probabilities (and possibly
additional heuristic scores) for each prediction.
"""
def __init__(
self, bos_index, eos_index, min_decode_ratio, max_decode_ratio,
):
super(S2SBaseSearcher, self).__init__()
self.bos_index = bos_index
self.eos_index = eos_index
self.min_decode_ratio = min_decode_ratio
self.max_decode_ratio = max_decode_ratio
def forward(self, enc_states, wav_len):
"""This method should implement the forward algorithm of decoding method.
Arguments
---------
enc_states : torch.Tensor
The precomputed encoder states to be used when decoding.
(ex. the encoded speech representation to be attended).
wav_len : torch.Tensor
The speechbrain-style relative length.
"""
raise NotImplementedError
def forward_step(self, inp_tokens, memory, enc_states, enc_lens):
"""This method should implement one step of
forwarding operation in the autoregressive model.
Arguments
---------
inp_tokens : torch.Tensor
The input tensor of the current timestep.
memory : No limit
The memory variables input for this timestep.
(ex. RNN hidden states).
enc_states : torch.Tensor
The encoder states to be attended.
enc_lens : torch.Tensor
The actual length of each enc_states sequence.
Returns
-------
log_probs : torch.Tensor
Log-probabilities of the current timestep output.
memory : No limit
The memory variables generated in this timestep.
(ex. RNN hidden states).
attn : torch.Tensor
The attention weight for doing penalty.
"""
raise NotImplementedError
def reset_mem(self, batch_size, device):
"""This method should implement the resetting of
memory variables for the seq2seq model.
E.g., initializing zero vector as initial hidden states.
Arguments
---------
batch_size : int
The size of the batch.
device : torch.device
The device to put the initial variables.
Return
------
memory : No limit
The initial memory variable.
"""
raise NotImplementedError
def lm_forward_step(self, inp_tokens, memory):
"""This method should implement one step of
forwarding operation for language model.
Arguments
---------
inp_tokens : torch.Tensor
The input tensor of the current timestep.
memory : No limit
The momory variables input for this timestep.
(e.g., RNN hidden states).
Return
------
log_probs : torch.Tensor
Log-probabilities of the current timestep output.
memory : No limit
The memory variables generated in this timestep.
(e.g., RNN hidden states).
"""
raise NotImplementedError
def reset_lm_mem(self, batch_size, device):
"""This method should implement the resetting of
memory variables in the language model.
E.g., initializing zero vector as initial hidden states.
Arguments
---------
batch_size : int
The size of the batch.
device : torch.device
The device to put the initial variables.
Return
------
memory : No limit
The initial memory variable.
"""
raise NotImplementedError
def change_max_decoding_length(self, min_decode_steps, max_decode_steps):
"""set the minimum/maximum length the decoder can take."""
return min_decode_steps, max_decode_steps
class S2SGreedySearcher(S2SBaseSearcher):
"""This class implements the general forward-pass of
greedy decoding approach. See also S2SBaseSearcher().
"""
def forward(self, enc_states, wav_len):
"""This method performs a greedy search.
Arguments
---------
enc_states : torch.Tensor
The precomputed encoder states to be used when decoding.
(ex. the encoded speech representation to be attended).
wav_len : torch.Tensor
The speechbrain-style relative length.
"""
enc_lens = torch.round(enc_states.shape[1] * wav_len).int()
device = enc_states.device
batch_size = enc_states.shape[0]
memory = self.reset_mem(batch_size, device=device)
# Using bos as the first input
inp_tokens = (
enc_states.new_zeros(batch_size).fill_(self.bos_index).long()
)
log_probs_lst = []
max_decode_steps = int(enc_states.shape[1] * self.max_decode_ratio)
# the decoding steps can be based on the max number of tokens that a decoder can process (e.g., 448 for Whisper).
_, max_decode_steps = self.change_max_decoding_length(
0, max_decode_steps
)
has_ended = enc_states.new_zeros(batch_size).bool()
for t in range(max_decode_steps):
log_probs, memory, _ = self.forward_step(
inp_tokens, memory, enc_states, enc_lens
)
log_probs_lst.append(log_probs)
inp_tokens = log_probs.argmax(dim=-1)
has_ended = has_ended | (inp_tokens == self.eos_index)
if has_ended.all():
break
log_probs = torch.stack(log_probs_lst, dim=1)
scores, predictions = log_probs.max(dim=-1)
scores = scores.sum(dim=1).tolist()
predictions = batch_filter_seq2seq_output(
predictions, eos_id=self.eos_index
)
return predictions, scores
class S2SWhisperGreedySearch(S2SGreedySearcher):
"""
This class implements the greedy decoding
for Whisper neural nets made by OpenAI in
https://cdn.openai.com/papers/whisper.pdf.
Arguments
---------
model : HuggingFaceWhisper
The Whisper model.
language_token : int
The language token to be used for the decoder input.
bos_token : int
The beginning of sentence token to be used for the decoder input.
task_token : int
The task token to be used for the decoder input.
timestamp_token : int
The timestamp token to be used for the decoder input.
max_length : int
The maximum decoding steps to perform.
The Whisper model has a maximum length of 448.
**kwargs
see S2SBaseSearcher, arguments are directly passed.
"""
def __init__(
self,
model,
language_token=50259,
bos_token=50258,
task_token=50359,
timestamp_token=50363,
max_length=448,
**kwargs,
):
super().__init__(**kwargs)
self.model = model
self.softmax = torch.nn.LogSoftmax(dim=-1)
self.decoder_input_tokens = None
self.language_token = language_token # default language is english
self.bos_token = bos_token # always this value
self.task_token = task_token # default task is transcribe
self.timestamp_token = timestamp_token # default is notimestamp
self.max_length = max_length - 3 # 3 tokens are added to the input
def set_language_token(self, language_token):
"""set the language token to be used for the decoder input."""
self.language_token = language_token
def set_bos_token(self, bos_token):
"""set the bos token to be used for the decoder input."""
self.bos_token = bos_token
def set_task_token(self, task_token):
"""set the task token to be used for the decoder input."""
self.task_token = task_token
def set_timestamp_token(self, timestamp_token):
"""set the timestamp token to be used for the decoder input."""
self.timestamp_token = timestamp_token
# need to reset bos_index too as timestamp_token is the first
# inp_token and need to be the first so that the first input gave
# to the model is [bos, language, task, timestamp] (order matters).
self.bos_index = self.timestamp_token
def set_decoder_input_tokens(self, decoder_input_tokens):
"""decoder_input_tokens are the tokens used as input to the decoder.
They are directly taken from the tokenizer.prefix_tokens attribute.
decoder_input_tokens = [bos_token, language_token, task_token, timestamp_token]
"""
self.set_bos_token(decoder_input_tokens[0])
self.set_language_token(decoder_input_tokens[1])
self.set_task_token(decoder_input_tokens[2])
self.set_timestamp_token(decoder_input_tokens[3])
# bos will be timestamp in our case.
self.decoder_input_tokens = [
self.bos_token,
self.language_token,
self.task_token,
]
def reset_mem(self, batch_size, device):
"""This method set the first tokens to be decoder_input_tokens during search."""
return torch.tensor([self.decoder_input_tokens] * batch_size).to(device)
def forward_step(self, inp_tokens, memory, enc_states, enc_lens):
"""Performs a step in the implemented beamsearcher."""
memory = _update_mem(inp_tokens, memory)
# WARNING: the max_decode_ratio need to be under 449 because
# of positinal encoding
dec_out, attn = self.model.forward_decoder(enc_states, memory)
log_probs = self.softmax(dec_out[:, -1])
return log_probs, memory, attn
def change_max_decoding_length(self, min_decode_steps, max_decode_steps):
"""set the minimum/maximum length the decoder can take."""
return (
int(self.min_decode_ratio * self.max_length),
int(self.max_decode_ratio * self.max_length),
)
class S2SRNNGreedySearcher(S2SGreedySearcher):
"""
This class implements the greedy decoding
for AttentionalRNNDecoder (speechbrain/nnet/RNN.py).
See also S2SBaseSearcher() and S2SGreedySearcher().
Arguments
---------
embedding : torch.nn.Module
An embedding layer.
decoder : torch.nn.Module
Attentional RNN decoder.
linear : torch.nn.Module
A linear output layer.
**kwargs
see S2SBaseSearcher, arguments are directly passed.
Example
-------
>>> emb = torch.nn.Embedding(5, 3)
>>> dec = sb.nnet.RNN.AttentionalRNNDecoder(
... "gru", "content", 3, 3, 1, enc_dim=7, input_size=3
... )
>>> lin = sb.nnet.linear.Linear(n_neurons=5, input_size=3)
>>> searcher = S2SRNNGreedySearcher(
... embedding=emb,
... decoder=dec,
... linear=lin,
... bos_index=4,
... eos_index=4,
... min_decode_ratio=0,
... max_decode_ratio=1,
... )
>>> enc = torch.rand([2, 6, 7])
>>> wav_len = torch.rand([2])
>>> hyps, scores = searcher(enc, wav_len)
"""
def __init__(self, embedding, decoder, linear, **kwargs):
super(S2SRNNGreedySearcher, self).__init__(**kwargs)
self.emb = embedding
self.dec = decoder
self.fc = linear
self.softmax = torch.nn.LogSoftmax(dim=-1)
def reset_mem(self, batch_size, device):
"""When doing greedy search, keep hidden state (hs) adn context vector (c)
as memory.
"""
hs = None
self.dec.attn.reset()
c = torch.zeros(batch_size, self.dec.attn_dim, device=device)
return hs, c
def forward_step(self, inp_tokens, memory, enc_states, enc_lens):
"""Performs a step in the implemented beamsearcher."""
hs, c = memory
e = self.emb(inp_tokens)
dec_out, hs, c, w = self.dec.forward_step(
e, hs, c, enc_states, enc_lens
)
log_probs = self.softmax(self.fc(dec_out))
return log_probs, (hs, c), w
class S2SBeamSearcher(S2SBaseSearcher):
"""This class implements the beam-search algorithm for the seq2seq model.
See also S2SBaseSearcher().
Arguments
---------
bos_index : int
The index of beginning-of-sequence token.
eos_index : int
The index of end-of-sequence token.
min_decode_radio : float
The ratio of minimum decoding steps to length of encoder states.
max_decode_radio : float
The ratio of maximum decoding steps to length of encoder states.
beam_size : int
The width of beam.
topk : int
The number of hypothesis to return. (default: 1)
return_log_probs : bool
Whether to return log-probabilities. (default: False)
using_eos_threshold : bool
Whether to use eos threshold. (default: true)
eos_threshold : float
The threshold coefficient for eos token (default: 1.5). See 3.1.2 in
reference: https://arxiv.org/abs/1904.02619
length_normalization : bool
Whether to divide the scores by the length. (default: True)
length_rewarding : float
The coefficient of length rewarding (γ).
log P(y|x) + λ log P_LM(y) + γ*len(y). (default: 0.0)
coverage_penalty: float
The coefficient of coverage penalty (η).
log P(y|x) + λ log P_LM(y) + γ*len(y) + η*coverage(x,y). (default: 0.0)
Reference: https://arxiv.org/pdf/1612.02695.pdf, https://arxiv.org/pdf/1808.10792.pdf
lm_weight : float
The weight of LM when performing beam search (λ).
log P(y|x) + λ log P_LM(y). (default: 0.0)
ctc_weight : float
The weight of CTC probabilities when performing beam search (λ).
(1-λ) log P(y|x) + λ log P_CTC(y|x). (default: 0.0)
blank_index : int
The index of the blank token.
ctc_score_mode: str
Default: "full"
CTC prefix scoring on "partial" token or "full: token.
ctc_window_size: int
Default: 0
Compute the ctc scores over the time frames using windowing based on attention peaks.
If 0, no windowing applied.
using_max_attn_shift: bool
Whether using the max_attn_shift constraint. (default: False)
max_attn_shift: int
Beam search will block the beams that attention shift more
than max_attn_shift.
Reference: https://arxiv.org/abs/1904.02619
minus_inf : float
DefaultL -1e20
The value of minus infinity to block some path
of the search.
"""
def __init__(
self,
bos_index,
eos_index,
min_decode_ratio,
max_decode_ratio,
beam_size,
topk=1,
return_log_probs=False,
using_eos_threshold=True,
eos_threshold=1.5,
length_normalization=True,
length_rewarding=0,
coverage_penalty=0.0,
lm_weight=0.0,
lm_modules=None,
ctc_weight=0.0,
blank_index=0,
ctc_score_mode="full",
ctc_window_size=0,
using_max_attn_shift=False,
max_attn_shift=60,
minus_inf=-1e20,
):
super(S2SBeamSearcher, self).__init__(
bos_index, eos_index, min_decode_ratio, max_decode_ratio,
)
self.beam_size = beam_size
self.topk = topk
self.return_log_probs = return_log_probs
self.length_normalization = length_normalization
self.length_rewarding = length_rewarding
self.coverage_penalty = coverage_penalty
self.coverage = None
if self.length_normalization and self.length_rewarding > 0:
raise ValueError(
"length normalization is not compatible with length rewarding."
)
self.using_eos_threshold = using_eos_threshold
self.eos_threshold = eos_threshold
self.using_max_attn_shift = using_max_attn_shift
self.max_attn_shift = max_attn_shift
self.lm_weight = lm_weight
self.lm_modules = lm_modules
# ctc related
self.ctc_weight = ctc_weight
self.blank_index = blank_index
self.att_weight = 1.0 - ctc_weight
assert (
0.0 <= self.ctc_weight <= 1.0
), "ctc_weight should not > 1.0 and < 0.0"
if self.ctc_weight > 0.0:
if len({self.bos_index, self.eos_index, self.blank_index}) < 3:
raise ValueError(
"To perform joint ATT/CTC decoding, set blank, eos and bos to different indexes."
)
# ctc already initialized
self.minus_inf = minus_inf
self.ctc_score_mode = ctc_score_mode
self.ctc_window_size = ctc_window_size
def _check_full_beams(self, hyps, beam_size):
"""This method checks whether hyps has been full.
Arguments
---------
hyps : List
This list contains batch_size number.
Each inside list contains a list stores all the hypothesis for this sentence.
beam_size : int
The number of beam_size.
Returns
-------
bool
Whether the hyps has been full.
"""
hyps_len = [len(lst) for lst in hyps]
beam_size = [self.beam_size for _ in range(len(hyps_len))]
if hyps_len == beam_size:
return True
else:
return False
def _check_attn_shift(self, attn, prev_attn_peak):
"""This method checks whether attention shift is more than attn_shift.
Arguments
---------
attn : torch.Tensor
The attention to be checked.
prev_attn_peak : torch.Tensor
The previous attention peak place.
Returns
-------
cond : torch.BoolTensor
Each element represents whether the beam is within the max_shift range.
attn_peak : torch.Tensor
The peak of the attn tensor.
"""
# Block the candidates that exceed the max shift
_, attn_peak = torch.max(attn, dim=1)
lt_cond = attn_peak <= (prev_attn_peak + self.max_attn_shift)
mt_cond = attn_peak > (prev_attn_peak - self.max_attn_shift)
# True if not exceed limit
# Multiplication equals to element-wise and for tensor
cond = (lt_cond * mt_cond).unsqueeze(1)
return cond, attn_peak
def _check_eos_threshold(self, log_probs):
"""
This method checks whether eos log-probabilities exceed threshold.
Arguments
---------
log_probs : torch.Tensor
The log-probabilities.
Return
------
cond : torch.BoolTensor
Each element represents whether the eos log-probabilities will be kept.
"""
max_probs, _ = torch.max(log_probs, dim=-1)
eos_probs = log_probs[:, self.eos_index]
cond = eos_probs > (self.eos_threshold * max_probs)
return cond
def _update_hyp_and_scores(
self,
inp_tokens,
alived_seq,
alived_log_probs,
hyps_and_scores,
scores,
timesteps,
):
"""This method will update hyps and scores if inp_tokens are eos.
Arguments
---------
inp_tokens : torch.Tensor
The current output.
alived_seq : torch.Tensor
The tensor to store the alived_seq.
alived_log_probs : torch.Tensor
The tensor to store the alived_log_probs.
hyps_and_scores : list
To store generated hypotheses and scores.
scores : torch.Tensor
The final scores of beam search.
timesteps : float
The current timesteps. This is for length rewarding.
Returns
-------
is_eos : torch.BoolTensor
Each element represents whether the token is eos.
"""
is_eos = inp_tokens.eq(self.eos_index)
(eos_indices,) = torch.nonzero(is_eos, as_tuple=True)
# Store the hypothesis and their scores when reaching eos.
if eos_indices.shape[0] > 0:
for index in eos_indices:
# convert to int
index = index.item()
batch_id = torch.div(
index, self.beam_size, rounding_mode="floor"
)
if len(hyps_and_scores[batch_id]) == self.beam_size:
continue
hyp = alived_seq[index, :]
log_probs = alived_log_probs[index, :]
final_scores = scores[index] + self.length_rewarding * (
timesteps + 1
)
hyps_and_scores[batch_id].append((hyp, log_probs, final_scores))
return is_eos
def _get_top_score_prediction(self, hyps_and_scores, topk):
"""This method sorts the scores and return corresponding hypothesis and log probs.
Arguments
---------
hyps_and_scores : list
To store generated hypotheses and scores.
topk : int
Number of hypothesis to return.
Returns
-------
topk_hyps : torch.Tensor (batch, topk, max length of token_id sequences)
This tensor stores the topk predicted hypothesis.
topk_scores : torch.Tensor (batch, topk)
The length of each topk sequence in the batch.
topk_lengths : torch.Tensor (batch, topk)
This tensor contains the final scores of topk hypotheses.
topk_log_probs : list
The log probabilities of each hypotheses.
"""
top_hyps, top_log_probs, top_scores, top_lengths = [], [], [], []
batch_size = len(hyps_and_scores)
# Collect hypotheses
for i in range(len(hyps_and_scores)):
hyps, log_probs, scores = zip(*hyps_and_scores[i])
top_hyps += hyps
top_scores += scores
top_log_probs += log_probs
top_lengths += [len(hyp) for hyp in hyps]
top_hyps = torch.nn.utils.rnn.pad_sequence(
top_hyps, batch_first=True, padding_value=0
)
top_scores = torch.stack((top_scores), dim=0).view(batch_size, -1)
top_lengths = torch.tensor(
top_lengths, dtype=torch.int, device=top_scores.device
)
# Get topk indices
topk_scores, indices = top_scores.topk(self.topk, dim=-1)
indices = (indices + self.beam_offset.unsqueeze(1)).view(
batch_size * self.topk
)
# Select topk hypotheses
topk_hyps = torch.index_select(top_hyps, dim=0, index=indices,)
topk_hyps = topk_hyps.view(batch_size, self.topk, -1)
topk_lengths = torch.index_select(top_lengths, dim=0, index=indices,)
topk_lengths = topk_lengths.view(batch_size, self.topk)
topk_log_probs = [top_log_probs[index.item()] for index in indices]
return topk_hyps, topk_scores, topk_lengths, topk_log_probs
def forward(self, enc_states, wav_len): # noqa: C901
"""Applies beamsearch and returns the predicted tokens."""
enc_lens = torch.round(enc_states.shape[1] * wav_len).int()
device = enc_states.device
batch_size = enc_states.shape[0]
memory = self.reset_mem(batch_size * self.beam_size, device=device)
if self.lm_weight > 0:
lm_memory = self.reset_lm_mem(batch_size * self.beam_size, device)
if self.ctc_weight > 0:
# (batch_size * beam_size, L, vocab_size)
ctc_outputs = self.ctc_forward_step(enc_states)
ctc_scorer = CTCPrefixScorer(
ctc_outputs,
enc_lens,
batch_size,
self.beam_size,
self.blank_index,
self.eos_index,
self.ctc_window_size,
)
ctc_memory = None
# Inflate the enc_states and enc_len by beam_size times
enc_states = inflate_tensor(enc_states, times=self.beam_size, dim=0)
enc_lens = inflate_tensor(enc_lens, times=self.beam_size, dim=0)
# Using bos as the first input
inp_tokens = (
torch.zeros(batch_size * self.beam_size, device=device)
.fill_(self.bos_index)
.long()
)
# The first index of each sentence.
self.beam_offset = (
torch.arange(batch_size, device=device) * self.beam_size
)
# initialize sequence scores variables.
sequence_scores = torch.empty(
batch_size * self.beam_size, device=device
)
sequence_scores.fill_(float("-inf"))
# keep only the first to make sure no redundancy.
sequence_scores.index_fill_(0, self.beam_offset, 0.0)
# keep the hypothesis that reaches eos and their corresponding score and log_probs.
hyps_and_scores = [[] for _ in range(batch_size)]
# keep the sequences that still not reaches eos.
alived_seq = torch.empty(
batch_size * self.beam_size, 0, device=device
).long()
# Keep the log-probabilities of alived sequences.
alived_log_probs = torch.empty(
batch_size * self.beam_size, 0, device=device
)
min_decode_steps = int(enc_states.shape[1] * self.min_decode_ratio)
max_decode_steps = int(enc_states.shape[1] * self.max_decode_ratio)
# the decoding steps can be based on the max number of tokens that a decoder can process (e.g., 448 for Whisper).
min_decode_steps, max_decode_steps = self.change_max_decoding_length(
min_decode_steps, max_decode_steps
)
# Initialize the previous attention peak to zero
# This variable will be used when using_max_attn_shift=True
prev_attn_peak = torch.zeros(batch_size * self.beam_size, device=device)
for t in range(max_decode_steps):
# terminate condition
if self._check_full_beams(hyps_and_scores, self.beam_size):
break
log_probs, memory, attn = self.forward_step(
inp_tokens, memory, enc_states, enc_lens
)
log_probs = self.att_weight * log_probs
# Keep the original value
log_probs_clone = log_probs.clone().reshape(batch_size, -1)
vocab_size = log_probs.shape[-1]
if self.using_max_attn_shift:
# Block the candidates that exceed the max shift
cond, attn_peak = self._check_attn_shift(attn, prev_attn_peak)
log_probs = mask_by_condition(
log_probs, cond, fill_value=self.minus_inf
)
prev_attn_peak = attn_peak
# Set eos to minus_inf when less than minimum steps.
if t < min_decode_steps:
log_probs[:, self.eos_index] = self.minus_inf
# Set the eos prob to minus_inf when it doesn't exceed threshold.
if self.using_eos_threshold:
cond = self._check_eos_threshold(log_probs)
log_probs[:, self.eos_index] = mask_by_condition(
log_probs[:, self.eos_index],
cond,
fill_value=self.minus_inf,
)
# adding LM scores to log_prob if lm_weight > 0
if self.lm_weight > 0:
lm_log_probs, lm_memory = self.lm_forward_step(
inp_tokens, lm_memory
)
log_probs = log_probs + self.lm_weight * lm_log_probs
# adding CTC scores to log_prob if ctc_weight > 0
if self.ctc_weight > 0:
g = alived_seq
# block blank token
log_probs[:, self.blank_index] = self.minus_inf
if self.ctc_weight != 1.0 and self.ctc_score_mode == "partial":
# pruning vocab for ctc_scorer
_, ctc_candidates = log_probs.topk(
self.beam_size * 2, dim=-1
)
else:
ctc_candidates = None
ctc_log_probs, ctc_memory = ctc_scorer.forward_step(
g, ctc_memory, ctc_candidates, attn
)
log_probs = log_probs + self.ctc_weight * ctc_log_probs
scores = sequence_scores.unsqueeze(1).expand(-1, vocab_size)
scores = scores + log_probs
# length normalization
if self.length_normalization:
scores = scores / (t + 1)
# keep topk beams
scores, candidates = scores.view(batch_size, -1).topk(
self.beam_size, dim=-1
)
# The input for the next step, also the output of current step.
inp_tokens = (candidates % vocab_size).view(
batch_size * self.beam_size
)
scores = scores.view(batch_size * self.beam_size)
sequence_scores = scores
# recover the length normalization
if self.length_normalization:
sequence_scores = sequence_scores * (t + 1)
# The index of which beam the current top-K output came from in (t-1) timesteps.
predecessors = (
torch.div(candidates, vocab_size, rounding_mode="floor")
+ self.beam_offset.unsqueeze(1).expand_as(candidates)
).view(batch_size * self.beam_size)
# Permute the memory to synchoronize with the output.
memory = self.permute_mem(memory, index=predecessors)
if self.lm_weight > 0:
lm_memory = self.permute_lm_mem(lm_memory, index=predecessors)
if self.ctc_weight > 0:
ctc_memory = ctc_scorer.permute_mem(ctc_memory, candidates)
# If using_max_attn_shift, then the previous attn peak has to be permuted too.
if self.using_max_attn_shift:
prev_attn_peak = torch.index_select(
prev_attn_peak, dim=0, index=predecessors
)
# Add coverage penalty
if self.coverage_penalty > 0:
cur_attn = torch.index_select(attn, dim=0, index=predecessors)
# coverage: cumulative attention probability vector
if t == 0:
# Init coverage
self.coverage = cur_attn
# the attn of transformer is [batch_size*beam_size, current_step, source_len]
if len(cur_attn.size()) > 2:
self.converage = torch.sum(cur_attn, dim=1)
else:
# Update coverage
self.coverage = torch.index_select(
self.coverage, dim=0, index=predecessors
)
self.coverage = self.coverage + cur_attn
# Compute coverage penalty and add it to scores
penalty = torch.max(
self.coverage, self.coverage.clone().fill_(0.5)
).sum(-1)
penalty = penalty - self.coverage.size(-1) * 0.5
penalty = penalty.view(batch_size * self.beam_size)
penalty = (
penalty / (t + 1) if self.length_normalization else penalty
)
scores = scores - penalty * self.coverage_penalty
# Update alived_seq
alived_seq = torch.cat(
[
torch.index_select(alived_seq, dim=0, index=predecessors),
inp_tokens.unsqueeze(1),
],
dim=-1,
)
# Takes the log-probabilities
beam_log_probs = log_probs_clone[
torch.arange(batch_size).unsqueeze(1), candidates
].reshape(batch_size * self.beam_size)
alived_log_probs = torch.cat(
[
torch.index_select(
alived_log_probs, dim=0, index=predecessors
),
beam_log_probs.unsqueeze(1),
],
dim=-1,
)
is_eos = self._update_hyp_and_scores(
inp_tokens,
alived_seq,
alived_log_probs,
hyps_and_scores,
scores,
timesteps=t,
)
# Block the paths that have reached eos.
sequence_scores.masked_fill_(is_eos, float("-inf"))
if not self._check_full_beams(hyps_and_scores, self.beam_size):
# Using all eos to fill-up the hyps.
eos = (
torch.zeros(batch_size * self.beam_size, device=device)
.fill_(self.eos_index)
.long()
)
_ = self._update_hyp_and_scores(
eos,
alived_seq,
alived_log_probs,
hyps_and_scores,
scores,
timesteps=max_decode_steps,
)
(
topk_hyps,
topk_scores,
topk_lengths,
log_probs,
) = self._get_top_score_prediction(hyps_and_scores, topk=self.topk,)
# pick the best hyp
predictions = topk_hyps[:, 0, :]
predictions = batch_filter_seq2seq_output(
predictions, eos_id=self.eos_index
)
if self.return_log_probs:
return predictions, topk_scores, log_probs
else:
return predictions, topk_scores
def ctc_forward_step(self, x):
"""Applies a ctc step during bramsearch."""
logits = self.ctc_fc(x)
log_probs = self.softmax(logits)
return log_probs
def permute_mem(self, memory, index):
"""This method permutes the seq2seq model memory
to synchronize the memory index with the current output.
Arguments
---------
memory : No limit
The memory variable to be permuted.
index : torch.Tensor
The index of the previous path.
Return
------
The variable of the memory being permuted.
"""
raise NotImplementedError
def permute_lm_mem(self, memory, index):
"""This method permutes the language model memory
to synchronize the memory index with the current output.
Arguments
---------
memory : No limit
The memory variable to be permuted.
index : torch.Tensor
The index of the previous path.
Returns
-------
The variable of the memory being permuted.
"""
raise NotImplementedError
class S2SRNNBeamSearcher(S2SBeamSearcher):
"""
This class implements the beam search decoding
for AttentionalRNNDecoder (speechbrain/nnet/RNN.py).
See also S2SBaseSearcher(), S2SBeamSearcher().
Arguments
---------
embedding : torch.nn.Module
An embedding layer.
decoder : torch.nn.Module
Attentional RNN decoder.
linear : torch.nn.Module
A linear output layer.
temperature : float
Temperature factor applied to softmax. It changes the probability
distribution, being softer when T>1 and sharper with T<1.
**kwargs
see S2SBeamSearcher, arguments are directly passed.
Example
-------
>>> emb = torch.nn.Embedding(5, 3)
>>> dec = sb.nnet.RNN.AttentionalRNNDecoder(
... "gru", "content", 3, 3, 1, enc_dim=7, input_size=3
... )
>>> lin = sb.nnet.linear.Linear(n_neurons=5, input_size=3)
>>> ctc_lin = sb.nnet.linear.Linear(n_neurons=5, input_size=7)
>>> searcher = S2SRNNBeamSearcher(
... embedding=emb,
... decoder=dec,
... linear=lin,
... ctc_linear=ctc_lin,
... bos_index=4,
... eos_index=4,
... blank_index=4,
... min_decode_ratio=0,
... max_decode_ratio=1,
... beam_size=2,
... )
>>> enc = torch.rand([2, 6, 7])
>>> wav_len = torch.rand([2])
>>> hyps, scores = searcher(enc, wav_len)
"""
def __init__(
self,
embedding,
decoder,
linear,
ctc_linear=None,
temperature=1.0,
**kwargs,
):
super(S2SRNNBeamSearcher, self).__init__(**kwargs)
self.emb = embedding
self.dec = decoder
self.fc = linear
self.ctc_fc = ctc_linear
if self.ctc_weight > 0.0 and self.ctc_fc is None:
raise ValueError(
"To perform joint ATT/CTC decoding, ctc_fc is required."
)
self.softmax = torch.nn.LogSoftmax(dim=-1)
self.temperature = temperature
def reset_mem(self, batch_size, device):
"""Needed to reset the memory during beamsearch."""
hs = None
self.dec.attn.reset()
c = torch.zeros(batch_size, self.dec.attn_dim, device=device)
return hs, c
def forward_step(self, inp_tokens, memory, enc_states, enc_lens):
"""Performs a step in the implemented beamsearcher."""
with torch.no_grad():
hs, c = memory
e = self.emb(inp_tokens)
dec_out, hs, c, w = self.dec.forward_step(
e, hs, c, enc_states, enc_lens
)
log_probs = self.softmax(self.fc(dec_out) / self.temperature)
# average attn weight of heads when attn_type is multiheadlocation
if self.dec.attn_type == "multiheadlocation":
w = torch.mean(w, dim=1)
return log_probs, (hs, c), w
def permute_mem(self, memory, index):
"""Memory permutation during beamsearch."""
hs, c = memory
# shape of hs: [num_layers, batch_size, n_neurons]
if isinstance(hs, tuple):
hs_0 = torch.index_select(hs[0], dim=1, index=index)
hs_1 = torch.index_select(hs[1], dim=1, index=index)
hs = (hs_0, hs_1)
else:
hs = torch.index_select(hs, dim=1, index=index)
c = torch.index_select(c, dim=0, index=index)
if self.dec.attn_type == "location":
self.dec.attn.prev_attn = torch.index_select(
self.dec.attn.prev_attn, dim=0, index=index
)
return (hs, c)
class S2SRNNBeamSearchLM(S2SRNNBeamSearcher):
"""This class implements the beam search decoding
for AttentionalRNNDecoder (speechbrain/nnet/RNN.py) with LM.
See also S2SBaseSearcher(), S2SBeamSearcher(), S2SRNNBeamSearcher().
Arguments
---------
embedding : torch.nn.Module
An embedding layer.
decoder : torch.nn.Module
Attentional RNN decoder.
linear : torch.nn.Module
A linear output layer.
language_model : torch.nn.Module
A language model.
temperature_lm : float
Temperature factor applied to softmax. It changes the probability
distribution, being softer when T>1 and sharper with T<1.
**kwargs
Arguments to pass to S2SBeamSearcher.
Example
-------
>>> from speechbrain.lobes.models.RNNLM import RNNLM
>>> emb = torch.nn.Embedding(5, 3)
>>> dec = sb.nnet.RNN.AttentionalRNNDecoder(
... "gru", "content", 3, 3, 1, enc_dim=7, input_size=3
... )
>>> lin = sb.nnet.linear.Linear(n_neurons=5, input_size=3)
>>> lm = RNNLM(output_neurons=5, return_hidden=True)
>>> searcher = S2SRNNBeamSearchLM(
... embedding=emb,
... decoder=dec,
... linear=lin,
... language_model=lm,
... bos_index=4,
... eos_index=4,
... blank_index=4,
... min_decode_ratio=0,
... max_decode_ratio=1,
... beam_size=2,
... lm_weight=0.5,
... )
>>> enc = torch.rand([2, 6, 7])
>>> wav_len = torch.rand([2])
>>> hyps, scores = searcher(enc, wav_len)
"""
def __init__(
self,
embedding,
decoder,
linear,
language_model,
temperature_lm=1.0,
**kwargs,
):
super(S2SRNNBeamSearchLM, self).__init__(
embedding, decoder, linear, **kwargs
)
self.lm = language_model
self.lm.eval()
self.log_softmax = sb.nnet.activations.Softmax(apply_log=True)
self.temperature_lm = temperature_lm
def lm_forward_step(self, inp_tokens, memory):
"""Applies a step to the LM during beamsearch."""
with torch.no_grad():
logits, hs = self.lm(inp_tokens, hx=memory)
log_probs = self.log_softmax(logits / self.temperature_lm)
return log_probs, hs
def permute_lm_mem(self, memory, index):
"""This is to permute lm memory to synchronize with current index
during beam search. The order of beams will be shuffled by scores
every timestep to allow batched beam search.
Further details please refer to speechbrain/decoder/seq2seq.py.
"""
if isinstance(memory, tuple):
memory_0 = torch.index_select(memory[0], dim=1, index=index)
memory_1 = torch.index_select(memory[1], dim=1, index=index)
memory = (memory_0, memory_1)
else:
memory = torch.index_select(memory, dim=1, index=index)
return memory
def reset_lm_mem(self, batch_size, device):
"""Needed to reset the LM memory during beamsearch."""
# set hidden_state=None, pytorch RNN will automatically set it to
# zero vectors.
return None
class S2SRNNBeamSearchTransformerLM(S2SRNNBeamSearcher):
"""This class implements the beam search decoding
for AttentionalRNNDecoder (speechbrain/nnet/RNN.py) with LM.
See also S2SBaseSearcher(), S2SBeamSearcher(), S2SRNNBeamSearcher().
Arguments
---------
embedding : torch.nn.Module
An embedding layer.
decoder : torch.nn.Module
Attentional RNN decoder.
linear : torch.nn.Module
A linear output layer.
language_model : torch.nn.Module
A language model.
temperature_lm : float
Temperature factor applied to softmax. It changes the probability
distribution, being softer when T>1 and sharper with T<1.
**kwargs
Arguments to pass to S2SBeamSearcher.
Example
-------
>>> from speechbrain.lobes.models.transformer.TransformerLM import TransformerLM
>>> emb = torch.nn.Embedding(5, 3)
>>> dec = sb.nnet.RNN.AttentionalRNNDecoder(
... "gru", "content", 3, 3, 1, enc_dim=7, input_size=3
... )
>>> lin = sb.nnet.linear.Linear(n_neurons=5, input_size=3)
>>> lm = TransformerLM(5, 512, 8, 1, 0, 1024, activation=torch.nn.GELU)
>>> searcher = S2SRNNBeamSearchTransformerLM(
... embedding=emb,
... decoder=dec,
... linear=lin,
... language_model=lm,
... bos_index=4,
... eos_index=4,
... blank_index=4,
... min_decode_ratio=0,
... max_decode_ratio=1,
... beam_size=2,
... lm_weight=0.5,
... )
>>> enc = torch.rand([2, 6, 7])
>>> wav_len = torch.rand([2])
>>> hyps, scores = searcher(enc, wav_len)
"""
def __init__(
self,
embedding,
decoder,
linear,
language_model,
temperature_lm=1.0,
**kwargs,
):
super(S2SRNNBeamSearchTransformerLM, self).__init__(
embedding, decoder, linear, **kwargs
)
self.lm = language_model
self.lm.eval()
self.log_softmax = sb.nnet.activations.Softmax(apply_log=True)
self.temperature_lm = temperature_lm
def lm_forward_step(self, inp_tokens, memory):
"""Performs a step in the LM during beamsearch."""
memory = _update_mem(inp_tokens, memory)
if not next(self.lm.parameters()).is_cuda:
self.lm.to(inp_tokens.device)
logits = self.lm(memory)
log_probs = self.softmax(logits / self.temperature_lm)
return log_probs[:, -1, :], memory
def permute_lm_mem(self, memory, index):
"""Permutes the LM ,emory during beamsearch"""
memory = torch.index_select(memory, dim=0, index=index)
return memory
def reset_lm_mem(self, batch_size, device):
"""Needed to reset the LM memory during beamsearch"""
# set hidden_state=None, pytorch RNN will automatically set it to
# zero vectors.
return None
class S2STransformerBeamSearch(S2SBeamSearcher):
"""This class implements the beam search decoding
for Transformer.
See also S2SBaseSearcher(), S2SBeamSearcher().
Arguments
---------
model : torch.nn.Module
The model to use for decoding.
linear : torch.nn.Module
A linear output layer.
**kwargs
Arguments to pass to S2SBeamSearcher
Example:
--------
>>> # see recipes/LibriSpeech/ASR_transformer/experiment.py
"""
def __init__(
self, modules, temperature=1.0, temperature_lm=1.0, **kwargs,
):
super(S2STransformerBeamSearch, self).__init__(**kwargs)
self.model = modules[0]
self.fc = modules[1]
self.ctc_fc = modules[2]
self.softmax = torch.nn.LogSoftmax(dim=-1)
self.temperature = temperature
self.temperature_lm = temperature_lm
def reset_mem(self, batch_size, device):
"""Needed to reset the memory during beamsearch."""
return None
def reset_lm_mem(self, batch_size, device):
"""Needed to reset the LM memory during beamsearch."""
return None
def permute_mem(self, memory, index):
"""Permutes the memory."""
memory = torch.index_select(memory, dim=0, index=index)
return memory
def permute_lm_mem(self, memory, index):
"""Permutes the memory of the language model."""
memory = torch.index_select(memory, dim=0, index=index)
return memory
def forward_step(self, inp_tokens, memory, enc_states, enc_lens):
"""Performs a step in the implemented beamsearcher."""
memory = _update_mem(inp_tokens, memory)
pred, attn = self.model.decode(memory, enc_states)
prob_dist = self.softmax(self.fc(pred) / self.temperature)
return prob_dist[:, -1, :], memory, attn
def lm_forward_step(self, inp_tokens, memory):
"""Performs a step in the implemented LM module."""
memory = _update_mem(inp_tokens, memory)
if not next(self.lm_modules.parameters()).is_cuda:
self.lm_modules.to(inp_tokens.device)
logits = self.lm_modules(memory)
log_probs = self.softmax(logits / self.temperature_lm)
return log_probs[:, -1, :], memory
class S2SWhisperBeamSearch(S2SBeamSearcher):
"""This class implements the beam search decoding
for Whisper neural nets made by OpenAI in
https://cdn.openai.com/papers/whisper.pdf.
Arguments
---------
module : list with the followings one:
model : torch.nn.Module
A whisper model. It should have a decode() method.
ctc_lin : torch.nn.Module (optional)
A linear output layer for CTC.
language_token : int
The token to use for language.
bos_token : int
The token to use for beginning of sentence.
task_token : int
The token to use for task.
timestamp_token : int
The token to use for timestamp.
max_length : int
The maximum decoding steps to perform.
The Whisper model has a maximum length of 448.
**kwargs
Arguments to pass to S2SBeamSearcher
"""
def __init__(
self,
module,
temperature=1.0,
temperature_lm=1.0,
language_token=50259,
bos_token=50258,
task_token=50359,
timestamp_token=50363,
max_length=447,
**kwargs,
):
super(S2SWhisperBeamSearch, self).__init__(**kwargs)
self.model = module[0]
if len(module) == 2:
self.ctc_fc = module[1]
self.softmax = torch.nn.LogSoftmax(dim=-1)
self.temperature = temperature
self.temperature_lm = temperature_lm
self.decoder_input_tokens = None
self.language_token = language_token # default language is english
self.bos_token = bos_token # always this value
self.task_token = task_token # default task is transcribe
self.timestamp_token = timestamp_token # default is notimestamp
self.max_length = max_length - 3 # -3 for [bos, language, task]
def set_language_token(self, language_token):
"""set the language token to use for the decoder input."""
self.language_token = language_token
def set_bos_token(self, bos_token):
"""set the bos token to use for the decoder input."""
self.bos_token = bos_token
def set_task_token(self, task_token):
"""set the task token to use for the decoder input."""
self.task_token = task_token
def set_timestamp_token(self, timestamp_token):
"""set the timestamp token to use for the decoder input."""
self.timestamp_token = timestamp_token
# need to reset bos_index too as timestamp_token is the first
# inp_token and need to be the first so that the first input gave
# to the model is [bos, language, task, timestamp] (order matters).
self.bos_index = self.timestamp_token
def change_max_decoding_length(self, min_decode_steps, max_decode_steps):
"""set the minimum/maximum length the decoder can take."""
return (
int(self.min_decode_ratio * self.max_length),
int(self.max_decode_ratio * self.max_length),
)
def set_decoder_input_tokens(self, decoder_input_tokens):
"""decoder_input_tokens are the tokens used as input to the decoder.
They are directly taken from the tokenizer.prefix_tokens attribute.
decoder_input_tokens = [bos_token, language_token, task_token, timestamp_token]
"""
self.set_bos_token(decoder_input_tokens[0])
self.set_language_token(decoder_input_tokens[1])
self.set_task_token(decoder_input_tokens[2])
self.set_timestamp_token(decoder_input_tokens[3])
# bos will be timestamp in our case.
self.decoder_input_tokens = [
self.bos_token,
self.language_token,
self.task_token,
]
def reset_mem(self, batch_size, device):
"""This method set the first tokens to be decoder_input_tokens during search."""
return torch.tensor([self.decoder_input_tokens] * batch_size).to(device)
def reset_lm_mem(self, batch_size, device):
"""Needed to reset the LM memory during beamsearch."""
return None
def permute_mem(self, memory, index):
"""Permutes the memory."""
memory = torch.index_select(memory, dim=0, index=index)
return memory
def permute_lm_mem(self, memory, index):
"""Permutes the memory of the language model."""
memory = torch.index_select(memory, dim=0, index=index)
return memory
def forward_step(self, inp_tokens, memory, enc_states, enc_lens):
"""Performs a step in the implemented beamsearcher."""
memory = _update_mem(inp_tokens, memory)
dec_out, attn, = self.model.forward_decoder(enc_states, memory)
log_probs = self.softmax(dec_out[:, -1])
return log_probs, memory, attn
def lm_forward_step(self, inp_tokens, memory):
"""Performs a step in the implemented LM module."""
memory = _update_mem(inp_tokens, memory)
if not next(self.lm_modules.parameters()).is_cuda:
self.lm_modules.to(inp_tokens.device)
logits = self.lm_modules(memory)
log_probs = self.softmax(logits / self.temperature_lm)
return log_probs[:, -1, :], memory
def batch_filter_seq2seq_output(prediction, eos_id=-1):
"""Calling batch_size times of filter_seq2seq_output.
Arguments
---------
prediction : list of torch.Tensor
A list containing the output ints predicted by the seq2seq system.
eos_id : int, string
The id of the eos.
Returns
------
list
The output predicted by seq2seq model.
Example
-------
>>> predictions = [torch.IntTensor([1,2,3,4]), torch.IntTensor([2,3,4,5,6])]
>>> predictions = batch_filter_seq2seq_output(predictions, eos_id=4)
>>> predictions
[[1, 2, 3], [2, 3]]
"""
outputs = []
for p in prediction:
res = filter_seq2seq_output(p.tolist(), eos_id=eos_id)
outputs.append(res)
return outputs
def filter_seq2seq_output(string_pred, eos_id=-1):
"""Filter the output until the first eos occurs (exclusive).
Arguments
---------
string_pred : list
A list containing the output strings/ints predicted by the seq2seq system.
eos_id : int, string
The id of the eos.
Returns
------
list
The output predicted by seq2seq model.
Example
-------
>>> string_pred = ['a','b','c','d','eos','e']
>>> string_out = filter_seq2seq_output(string_pred, eos_id='eos')
>>> string_out
['a', 'b', 'c', 'd']
"""
if isinstance(string_pred, list):
try:
eos_index = next(
i for i, v in enumerate(string_pred) if v == eos_id
)
except StopIteration:
eos_index = len(string_pred)
string_out = string_pred[:eos_index]
else:
raise ValueError("The input must be a list.")
return string_out
def inflate_tensor(tensor, times, dim):
"""This function inflates the tensor for times along dim.
Arguments
---------
tensor : torch.Tensor
The tensor to be inflated.
times : int
The tensor will inflate for this number of times.
dim : int
The dim to be inflated.
Returns
-------
torch.Tensor
The inflated tensor.
Example
-------
>>> tensor = torch.Tensor([[1,2,3], [4,5,6]])
>>> new_tensor = inflate_tensor(tensor, 2, dim=0)
>>> new_tensor
tensor([[1., 2., 3.],
[1., 2., 3.],
[4., 5., 6.],
[4., 5., 6.]])
"""
return torch.repeat_interleave(tensor, times, dim=dim)
def mask_by_condition(tensor, cond, fill_value):
"""This function will mask some element in the tensor with fill_value, if condition=False.
Arguments
---------
tensor : torch.Tensor
The tensor to be masked.
cond : torch.BoolTensor
This tensor has to be the same size as tensor.
Each element represents whether to keep the value in tensor.
fill_value : float
The value to fill in the masked element.
Returns
-------
torch.Tensor
The masked tensor.
Example
-------
>>> tensor = torch.Tensor([[1,2,3], [4,5,6]])
>>> cond = torch.BoolTensor([[True, True, False], [True, False, False]])
>>> mask_by_condition(tensor, cond, 0)
tensor([[1., 2., 0.],
[4., 0., 0.]])
"""
tensor = torch.where(
cond, tensor, torch.Tensor([fill_value]).to(tensor.device)
)
return tensor
def _update_mem(inp_tokens, memory):
"""This function is for updating the memory for transformer searches.
it is called at each decoding step. When being called, it appends the
predicted token of the previous step to existing memory.
Arguments:
-----------
inp_tokens : tensor
Predicted token of the previous decoding step.
memory : tensor
Contains all the predicted tokens.
"""
if memory is None:
return inp_tokens.unsqueeze(1)
return torch.cat([memory, inp_tokens.unsqueeze(1)], dim=-1)
| 56,679 | 33.560976 | 121 | py |
speechbrain | speechbrain-main/speechbrain/decoders/ctc.py | """Decoders and output normalization for CTC.
Authors
* Mirco Ravanelli 2020
* Aku Rouhe 2020
* Sung-Lin Yeh 2020
"""
import torch
from itertools import groupby
from speechbrain.dataio.dataio import length_to_mask
class CTCPrefixScorer:
"""This class implements the CTC prefix scorer of Algorithm 2 in
reference: https://www.merl.com/publications/docs/TR2017-190.pdf.
Official implementation: https://github.com/espnet/espnet/blob/master/espnet/nets/ctc_prefix_score.py
Arguments
---------
x : torch.Tensor
The encoder states.
enc_lens : torch.Tensor
The actual length of each enc_states sequence.
batch_size : int
The size of the batch.
beam_size : int
The width of beam.
blank_index : int
The index of the blank token.
eos_index : int
The index of the end-of-sequence (eos) token.
ctc_window_size: int
Compute the ctc scores over the time frames using windowing based on attention peaks.
If 0, no windowing applied.
"""
def __init__(
self,
x,
enc_lens,
batch_size,
beam_size,
blank_index,
eos_index,
ctc_window_size=0,
):
self.blank_index = blank_index
self.eos_index = eos_index
self.max_enc_len = x.size(1)
self.batch_size = batch_size
self.beam_size = beam_size
self.vocab_size = x.size(-1)
self.device = x.device
self.minus_inf = -1e20
self.last_frame_index = enc_lens - 1
self.ctc_window_size = ctc_window_size
# mask frames > enc_lens
mask = 1 - length_to_mask(enc_lens)
mask = mask.unsqueeze(-1).expand(-1, -1, x.size(-1)).eq(1)
x.masked_fill_(mask, self.minus_inf)
x[:, :, 0] = x[:, :, 0].masked_fill_(mask[:, :, 0], 0)
# dim=0: xnb, nonblank posteriors, dim=1: xb, blank posteriors
xnb = x.transpose(0, 1)
xb = (
xnb[:, :, self.blank_index]
.unsqueeze(2)
.expand(-1, -1, self.vocab_size)
)
# (2, L, batch_size * beam_size, vocab_size)
self.x = torch.stack([xnb, xb])
# The first index of each sentence.
self.beam_offset = (
torch.arange(batch_size, device=self.device) * self.beam_size
)
# The first index of each candidates.
self.cand_offset = (
torch.arange(batch_size, device=self.device) * self.vocab_size
)
def forward_step(self, g, state, candidates=None, attn=None):
"""This method if one step of forwarding operation
for the prefix ctc scorer.
Arguments
---------
g : torch.Tensor
The tensor of prefix label sequences, h = g + c.
state : tuple
Previous ctc states.
candidates : torch.Tensor
(batch_size * beam_size, ctc_beam_size), The topk candidates for rescoring.
The ctc_beam_size is set as 2 * beam_size. If given, performing partial ctc scoring.
"""
prefix_length = g.size(1)
last_char = [gi[-1] for gi in g] if prefix_length > 0 else [0] * len(g)
self.num_candidates = (
self.vocab_size if candidates is None else candidates.size(-1)
)
if state is None:
# r_prev: (L, 2, batch_size * beam_size)
r_prev = torch.full(
(self.max_enc_len, 2, self.batch_size, self.beam_size),
self.minus_inf,
device=self.device,
)
# Accumulate blank posteriors at each step
r_prev[:, 1] = torch.cumsum(
self.x[0, :, :, self.blank_index], 0
).unsqueeze(2)
r_prev = r_prev.view(-1, 2, self.batch_size * self.beam_size)
psi_prev = 0.0
else:
r_prev, psi_prev = state
# for partial search
if candidates is not None:
scoring_table = torch.full(
(self.batch_size * self.beam_size, self.vocab_size),
-1,
dtype=torch.long,
device=self.device,
)
# Assign indices of candidates to their positions in the table
col_index = torch.arange(
self.batch_size * self.beam_size, device=self.device
).unsqueeze(1)
scoring_table[col_index, candidates] = torch.arange(
self.num_candidates, device=self.device
)
# Select candidates indices for scoring
scoring_index = (
candidates
+ self.cand_offset.unsqueeze(1)
.repeat(1, self.beam_size)
.view(-1, 1)
).view(-1)
x_inflate = torch.index_select(
self.x.view(2, -1, self.batch_size * self.vocab_size),
2,
scoring_index,
).view(2, -1, self.batch_size * self.beam_size, self.num_candidates)
# for full search
else:
scoring_table = None
x_inflate = (
self.x.unsqueeze(3)
.repeat(1, 1, 1, self.beam_size, 1)
.view(
2, -1, self.batch_size * self.beam_size, self.num_candidates
)
)
# Prepare forward probs
r = torch.full(
(
self.max_enc_len,
2,
self.batch_size * self.beam_size,
self.num_candidates,
),
self.minus_inf,
device=self.device,
)
r.fill_(self.minus_inf)
# (Alg.2-6)
if prefix_length == 0:
r[0, 0] = x_inflate[0, 0]
# (Alg.2-10): phi = prev_nonblank + prev_blank = r_t-1^nb(g) + r_t-1^b(g)
r_sum = torch.logsumexp(r_prev, 1)
phi = r_sum.unsqueeze(2).repeat(1, 1, self.num_candidates)
# (Alg.2-10): if last token of prefix g in candidates, phi = prev_b + 0
if candidates is not None:
for i in range(self.batch_size * self.beam_size):
pos = scoring_table[i, last_char[i]]
if pos != -1:
phi[:, i, pos] = r_prev[:, 1, i]
else:
for i in range(self.batch_size * self.beam_size):
phi[:, i, last_char[i]] = r_prev[:, 1, i]
# Start, end frames for scoring (|g| < |h|).
# Scoring based on attn peak if ctc_window_size > 0
if self.ctc_window_size == 0 or attn is None:
start = max(1, prefix_length)
end = self.max_enc_len
else:
_, attn_peak = torch.max(attn, dim=1)
max_frame = torch.max(attn_peak).item() + self.ctc_window_size
min_frame = torch.min(attn_peak).item() - self.ctc_window_size
start = max(max(1, prefix_length), int(min_frame))
end = min(self.max_enc_len, int(max_frame))
# Compute forward prob log(r_t^nb(h)) and log(r_t^b(h)):
for t in range(start, end):
# (Alg.2-11): dim=0, p(h|cur step is nonblank) = [p(prev step=y) + phi] * p(c)
rnb_prev = r[t - 1, 0]
# (Alg.2-12): dim=1, p(h|cur step is blank) = [p(prev step is blank) + p(prev step is nonblank)] * p(blank)
rb_prev = r[t - 1, 1]
r_ = torch.stack([rnb_prev, phi[t - 1], rnb_prev, rb_prev]).view(
2, 2, self.batch_size * self.beam_size, self.num_candidates
)
r[t] = torch.logsumexp(r_, 1) + x_inflate[:, t]
# Compute the predix prob, psi
psi_init = r[start - 1, 0].unsqueeze(0)
# phi is prob at t-1 step, shift one frame and add it to the current prob p(c)
phix = torch.cat((phi[0].unsqueeze(0), phi[:-1]), dim=0) + x_inflate[0]
# (Alg.2-13): psi = psi + phi * p(c)
if candidates is not None:
psi = torch.full(
(self.batch_size * self.beam_size, self.vocab_size),
self.minus_inf,
device=self.device,
)
psi_ = torch.logsumexp(
torch.cat((phix[start:end], psi_init), dim=0), dim=0
)
# only assign prob to candidates
for i in range(self.batch_size * self.beam_size):
psi[i, candidates[i]] = psi_[i]
else:
psi = torch.logsumexp(
torch.cat((phix[start:end], psi_init), dim=0), dim=0
)
# (Alg.2-3): if c = <eos>, psi = log(r_T^n(g) + r_T^b(g)), where T is the length of max frames
for i in range(self.batch_size * self.beam_size):
psi[i, self.eos_index] = r_sum[
self.last_frame_index[i // self.beam_size], i
]
# Exclude blank probs for joint scoring
psi[:, self.blank_index] = self.minus_inf
return psi - psi_prev, (r, psi, scoring_table)
def permute_mem(self, memory, index):
"""This method permutes the CTC model memory
to synchronize the memory index with the current output.
Arguments
---------
memory : No limit
The memory variable to be permuted.
index : torch.Tensor
The index of the previous path.
Return
------
The variable of the memory being permuted.
"""
r, psi, scoring_table = memory
# The index of top-K vocab came from in (t-1) timesteps.
best_index = (
index
+ (self.beam_offset.unsqueeze(1).expand_as(index) * self.vocab_size)
).view(-1)
# synchronize forward prob
psi = torch.index_select(psi.view(-1), dim=0, index=best_index)
psi = (
psi.view(-1, 1)
.repeat(1, self.vocab_size)
.view(self.batch_size * self.beam_size, self.vocab_size)
)
# synchronize ctc states
if scoring_table is not None:
effective_index = (
index // self.vocab_size + self.beam_offset.view(-1, 1)
).view(-1)
selected_vocab = (index % self.vocab_size).view(-1)
score_index = scoring_table[effective_index, selected_vocab]
score_index[score_index == -1] = 0
best_index = score_index + effective_index * self.num_candidates
r = torch.index_select(
r.view(
-1, 2, self.batch_size * self.beam_size * self.num_candidates
),
dim=-1,
index=best_index,
)
r = r.view(-1, 2, self.batch_size * self.beam_size)
return r, psi
def filter_ctc_output(string_pred, blank_id=-1):
"""Apply CTC output merge and filter rules.
Removes the blank symbol and output repetitions.
Arguments
---------
string_pred : list
A list containing the output strings/ints predicted by the CTC system.
blank_id : int, string
The id of the blank.
Returns
-------
list
The output predicted by CTC without the blank symbol and
the repetitions.
Example
-------
>>> string_pred = ['a','a','blank','b','b','blank','c']
>>> string_out = filter_ctc_output(string_pred, blank_id='blank')
>>> print(string_out)
['a', 'b', 'c']
"""
if isinstance(string_pred, list):
# Filter the repetitions
string_out = [i[0] for i in groupby(string_pred)]
# Filter the blank symbol
string_out = list(filter(lambda elem: elem != blank_id, string_out))
else:
raise ValueError("filter_ctc_out can only filter python lists")
return string_out
def ctc_greedy_decode(probabilities, seq_lens, blank_id=-1):
"""Greedy decode a batch of probabilities and apply CTC rules.
Arguments
---------
probabilities : torch.tensor
Output probabilities (or log-probabilities) from the network with shape
[batch, probabilities, time]
seq_lens : torch.tensor
Relative true sequence lengths (to deal with padded inputs),
the longest sequence has length 1.0, others a value between zero and one
shape [batch, lengths].
blank_id : int, string
The blank symbol/index. Default: -1. If a negative number is given,
it is assumed to mean counting down from the maximum possible index,
so that -1 refers to the maximum possible index.
Returns
-------
list
Outputs as Python list of lists, with "ragged" dimensions; padding
has been removed.
Example
-------
>>> import torch
>>> probs = torch.tensor([[[0.3, 0.7], [0.0, 0.0]],
... [[0.2, 0.8], [0.9, 0.1]]])
>>> lens = torch.tensor([0.51, 1.0])
>>> blank_id = 0
>>> ctc_greedy_decode(probs, lens, blank_id)
[[1], [1]]
"""
if isinstance(blank_id, int) and blank_id < 0:
blank_id = probabilities.shape[-1] + blank_id
batch_max_len = probabilities.shape[1]
batch_outputs = []
for seq, seq_len in zip(probabilities, seq_lens):
actual_size = int(torch.round(seq_len * batch_max_len))
scores, predictions = torch.max(seq.narrow(0, 0, actual_size), dim=1)
out = filter_ctc_output(predictions.tolist(), blank_id=blank_id)
batch_outputs.append(out)
return batch_outputs
| 13,350 | 34.413793 | 119 | py |
speechbrain | speechbrain-main/speechbrain/decoders/transducer.py | """Decoders and output normalization for Transducer sequence.
Author:
Abdelwahab HEBA 2020
Sung-Lin Yeh 2020
"""
import torch
from functools import partial
class TransducerBeamSearcher(torch.nn.Module):
"""
This class implements the beam-search algorithm for the transducer model.
Parameters
----------
decode_network_lst : list
List of prediction network (PN) layers.
tjoint: transducer_joint module
This module perform the joint between TN and PN.
classifier_network : list
List of output layers (after performing joint between TN and PN)
exp: (TN,PN) => joint => classifier_network_list [DNN bloc, Linear..] => chars prob
blank_id : int
The blank symbol/index.
beam : int
The width of beam. Greedy Search is used when beam = 1.
nbest : int
Number of hypotheses to keep.
lm_module : torch.nn.ModuleList
Neural networks modules for LM.
lm_weight : float
The weight of LM when performing beam search (λ).
log P(y|x) + λ log P_LM(y). (default: 0.3)
state_beam : float
The threshold coefficient in log space to decide if hyps in A (process_hyps)
is likely to compete with hyps in B (beam_hyps), if not, end the while loop.
Reference: https://arxiv.org/pdf/1911.01629.pdf
expand_beam : float
The threshold coefficient to limit the number of expanded hypotheses
that are added in A (process_hyp).
Reference: https://arxiv.org/pdf/1911.01629.pdf
Reference: https://github.com/kaldi-asr/kaldi/blob/master/src/decoder/simple-decoder.cc (See PruneToks)
Example
-------
searcher = TransducerBeamSearcher(
decode_network_lst=[hparams["emb"], hparams["dec"]],
tjoint=hparams["Tjoint"],
classifier_network=[hparams["transducer_lin"]],
blank_id=0,
beam_size=hparams["beam_size"],
nbest=hparams["nbest"],
lm_module=hparams["lm_model"],
lm_weight=hparams["lm_weight"],
state_beam=2.3,
expand_beam=2.3,
)
>>> from speechbrain.nnet.transducer.transducer_joint import Transducer_joint
>>> import speechbrain as sb
>>> emb = sb.nnet.embedding.Embedding(
... num_embeddings=35,
... embedding_dim=3,
... consider_as_one_hot=True,
... blank_id=0
... )
>>> dec = sb.nnet.RNN.GRU(
... hidden_size=10, input_shape=(1, 40, 34), bidirectional=False
... )
>>> lin = sb.nnet.linear.Linear(input_shape=(1, 40, 10), n_neurons=35)
>>> joint_network= sb.nnet.linear.Linear(input_shape=(1, 1, 40, 35), n_neurons=35)
>>> tjoint = Transducer_joint(joint_network, joint="sum")
>>> searcher = TransducerBeamSearcher(
... decode_network_lst=[emb, dec],
... tjoint=tjoint,
... classifier_network=[lin],
... blank_id=0,
... beam_size=1,
... nbest=1,
... lm_module=None,
... lm_weight=0.0,
... )
>>> enc = torch.rand([1, 20, 10])
>>> hyps, scores, _, _ = searcher(enc)
"""
def __init__(
self,
decode_network_lst,
tjoint,
classifier_network,
blank_id,
beam_size=4,
nbest=5,
lm_module=None,
lm_weight=0.0,
state_beam=2.3,
expand_beam=2.3,
):
super(TransducerBeamSearcher, self).__init__()
self.decode_network_lst = decode_network_lst
self.tjoint = tjoint
self.classifier_network = classifier_network
self.blank_id = blank_id
self.beam_size = beam_size
self.nbest = nbest
self.lm = lm_module
self.lm_weight = lm_weight
if lm_module is None and lm_weight > 0:
raise ValueError("Language model is not provided.")
self.state_beam = state_beam
self.expand_beam = expand_beam
self.softmax = torch.nn.LogSoftmax(dim=-1)
if self.beam_size <= 1:
self.searcher = self.transducer_greedy_decode
else:
self.searcher = self.transducer_beam_search_decode
def forward(self, tn_output):
"""
Arguments
----------
tn_output : torch.tensor
Output from transcription network with shape
[batch, time_len, hiddens].
Returns
-------
Topk hypotheses
"""
hyps = self.searcher(tn_output)
return hyps
def transducer_greedy_decode(self, tn_output):
"""Transducer greedy decoder is a greedy decoder over batch which apply Transducer rules:
1- for each time step in the Transcription Network (TN) output:
-> Update the ith utterance only if
the previous target != the new one (we save the hiddens and the target)
-> otherwise:
---> keep the previous target prediction from the decoder
Arguments
----------
tn_output : torch.tensor
Output from transcription network with shape
[batch, time_len, hiddens].
Returns
-------
torch.tensor
Outputs a logits tensor [B,T,1,Output_Dim]; padding
has not been removed.
"""
hyp = {
"prediction": [[] for _ in range(tn_output.size(0))],
"logp_scores": [0.0 for _ in range(tn_output.size(0))],
}
# prepare BOS = Blank for the Prediction Network (PN)
hidden = None
input_PN = (
torch.ones(
(tn_output.size(0), 1),
device=tn_output.device,
dtype=torch.int32,
)
* self.blank_id
)
# First forward-pass on PN
out_PN, hidden = self._forward_PN(input_PN, self.decode_network_lst)
# For each time step
for t_step in range(tn_output.size(1)):
# do unsqueeze over since tjoint must be have a 4 dim [B,T,U,Hidden]
log_probs = self._joint_forward_step(
tn_output[:, t_step, :].unsqueeze(1).unsqueeze(1),
out_PN.unsqueeze(1),
)
# Sort outputs at time
logp_targets, positions = torch.max(
self.softmax(log_probs).squeeze(1).squeeze(1), dim=1
)
# Batch hidden update
have_update_hyp = []
for i in range(positions.size(0)):
# Update hiddens only if
# 1- current prediction is non blank
if positions[i].item() != self.blank_id:
hyp["prediction"][i].append(positions[i].item())
hyp["logp_scores"][i] += logp_targets[i]
input_PN[i][0] = positions[i]
have_update_hyp.append(i)
if len(have_update_hyp) > 0:
# Select sentence to update
# And do a forward steps + generated hidden
(
selected_input_PN,
selected_hidden,
) = self._get_sentence_to_update(
have_update_hyp, input_PN, hidden
)
selected_out_PN, selected_hidden = self._forward_PN(
selected_input_PN, self.decode_network_lst, selected_hidden
)
# update hiddens and out_PN
out_PN[have_update_hyp] = selected_out_PN
hidden = self._update_hiddens(
have_update_hyp, selected_hidden, hidden
)
return (
hyp["prediction"],
torch.Tensor(hyp["logp_scores"]).exp().mean(),
None,
None,
)
def transducer_beam_search_decode(self, tn_output):
"""Transducer beam search decoder is a beam search decoder over batch which apply Transducer rules:
1- for each utterance:
2- for each time steps in the Transcription Network (TN) output:
-> Do forward on PN and Joint network
-> Select topK <= beam
-> Do a while loop extending the hyps until we reach blank
-> otherwise:
--> extend hyp by the new token
Arguments
----------
tn_output : torch.tensor
Output from transcription network with shape
[batch, time_len, hiddens].
Returns
-------
torch.tensor
Outputs a logits tensor [B,T,1,Output_Dim]; padding
has not been removed.
"""
# min between beam and max_target_lent
nbest_batch = []
nbest_batch_score = []
for i_batch in range(tn_output.size(0)):
# if we use RNN LM keep there hiddens
# prepare BOS = Blank for the Prediction Network (PN)
# Prepare Blank prediction
blank = (
torch.ones((1, 1), device=tn_output.device, dtype=torch.int32)
* self.blank_id
)
input_PN = (
torch.ones((1, 1), device=tn_output.device, dtype=torch.int32)
* self.blank_id
)
# First forward-pass on PN
hyp = {
"prediction": [self.blank_id],
"logp_score": 0.0,
"hidden_dec": None,
}
if self.lm_weight > 0:
lm_dict = {"hidden_lm": None}
hyp.update(lm_dict)
beam_hyps = [hyp]
# For each time step
for t_step in range(tn_output.size(1)):
# get hyps for extension
process_hyps = beam_hyps
beam_hyps = []
while True:
if len(beam_hyps) >= self.beam_size:
break
# Add norm score
a_best_hyp = max(
process_hyps, key=partial(get_transducer_key),
)
# Break if best_hyp in A is worse by more than state_beam than best_hyp in B
if len(beam_hyps) > 0:
b_best_hyp = max(
beam_hyps, key=partial(get_transducer_key),
)
a_best_prob = a_best_hyp["logp_score"]
b_best_prob = b_best_hyp["logp_score"]
if b_best_prob >= self.state_beam + a_best_prob:
break
# remove best hyp from process_hyps
process_hyps.remove(a_best_hyp)
# forward PN
input_PN[0, 0] = a_best_hyp["prediction"][-1]
out_PN, hidden = self._forward_PN(
input_PN,
self.decode_network_lst,
a_best_hyp["hidden_dec"],
)
# do unsqueeze over since tjoint must be have a 4 dim [B,T,U,Hidden]
log_probs = self._joint_forward_step(
tn_output[i_batch, t_step, :]
.unsqueeze(0)
.unsqueeze(0)
.unsqueeze(0),
out_PN.unsqueeze(0),
)
if self.lm_weight > 0:
log_probs_lm, hidden_lm = self._lm_forward_step(
input_PN, a_best_hyp["hidden_lm"]
)
# Sort outputs at time
logp_targets, positions = torch.topk(
log_probs.view(-1), k=self.beam_size, dim=-1
)
best_logp = (
logp_targets[0]
if positions[0] != blank
else logp_targets[1]
)
# Extend hyp by selection
for j in range(logp_targets.size(0)):
# hyp
topk_hyp = {
"prediction": a_best_hyp["prediction"][:],
"logp_score": a_best_hyp["logp_score"]
+ logp_targets[j],
"hidden_dec": a_best_hyp["hidden_dec"],
}
if positions[j] == self.blank_id:
beam_hyps.append(topk_hyp)
if self.lm_weight > 0:
topk_hyp["hidden_lm"] = a_best_hyp["hidden_lm"]
continue
if logp_targets[j] >= best_logp - self.expand_beam:
topk_hyp["prediction"].append(positions[j].item())
topk_hyp["hidden_dec"] = hidden
if self.lm_weight > 0:
topk_hyp["hidden_lm"] = hidden_lm
topk_hyp["logp_score"] += (
self.lm_weight
* log_probs_lm[0, 0, positions[j]]
)
process_hyps.append(topk_hyp)
# Add norm score
nbest_hyps = sorted(
beam_hyps, key=partial(get_transducer_key), reverse=True,
)[: self.nbest]
all_predictions = []
all_scores = []
for hyp in nbest_hyps:
all_predictions.append(hyp["prediction"][1:])
all_scores.append(hyp["logp_score"] / len(hyp["prediction"]))
nbest_batch.append(all_predictions)
nbest_batch_score.append(all_scores)
return (
[nbest_utt[0] for nbest_utt in nbest_batch],
torch.Tensor(
[nbest_utt_score[0] for nbest_utt_score in nbest_batch_score]
)
.exp()
.mean(),
nbest_batch,
nbest_batch_score,
)
def _joint_forward_step(self, h_i, out_PN):
"""Join predictions (TN & PN)."""
with torch.no_grad():
# the output would be a tensor of [B,T,U, oneof[sum,concat](Hidden_TN,Hidden_PN)]
out = self.tjoint(h_i, out_PN,)
# forward the output layers + activation + save logits
out = self._forward_after_joint(out, self.classifier_network)
log_probs = self.softmax(out)
return log_probs
def _lm_forward_step(self, inp_tokens, memory):
"""This method should implement one step of
forwarding operation for language model.
Arguments
---------
inp_tokens : torch.Tensor
The input tensor of the current timestep.
memory : No limit
The memory variables input for this timestep.
(e.g., RNN hidden states).
Return
------
log_probs : torch.Tensor
Log-probabilities of the current timestep output.
hs : No limit
The memory variables are generated in this timestep.
(e.g., RNN hidden states).
"""
with torch.no_grad():
logits, hs = self.lm(inp_tokens, hx=memory)
log_probs = self.softmax(logits)
return log_probs, hs
def _get_sentence_to_update(self, selected_sentences, output_PN, hidden):
"""Select and return the updated hiddens and output
from the Prediction Network.
Arguments
----------
selected_sentences : list
List of updated sentences (indexes).
output_PN: torch.tensor
Output tensor from prediction network (PN).
hidden : torch.tensor
Optional: None, hidden tensor to be used for
recurrent layers in the prediction network.
Returns
-------
selected_output_PN: torch.tensor
Outputs a logits tensor [B_selected,U, hiddens].
hidden_update_hyp: torch.tensor
Selected hiddens tensor.
"""
selected_output_PN = output_PN[selected_sentences, :]
# for LSTM hiddens (hn, hc)
if isinstance(hidden, tuple):
hidden0_update_hyp = hidden[0][:, selected_sentences, :]
hidden1_update_hyp = hidden[1][:, selected_sentences, :]
hidden_update_hyp = (hidden0_update_hyp, hidden1_update_hyp)
else:
hidden_update_hyp = hidden[:, selected_sentences, :]
return selected_output_PN, hidden_update_hyp
def _update_hiddens(self, selected_sentences, updated_hidden, hidden):
"""Update hidden tensor by a subset of hidden tensor (updated ones).
Arguments
----------
selected_sentences : list
List of index to be updated.
updated_hidden : torch.tensor
Hidden tensor of the selected sentences for update.
hidden : torch.tensor
Hidden tensor to be updated.
Returns
-------
torch.tensor
Updated hidden tensor.
"""
if isinstance(hidden, tuple):
hidden[0][:, selected_sentences, :] = updated_hidden[0]
hidden[1][:, selected_sentences, :] = updated_hidden[1]
else:
hidden[:, selected_sentences, :] = updated_hidden
return hidden
def _forward_PN(self, out_PN, decode_network_lst, hidden=None):
"""Compute forward-pass through a list of prediction network (PN) layers.
Arguments
----------
out_PN : torch.tensor
Input sequence from prediction network with shape
[batch, target_seq_lens].
decode_network_lst: list
List of prediction network (PN) layers.
hinne : torch.tensor
Optional: None, hidden tensor to be used for
recurrent layers in the prediction network
Returns
-------
out_PN : torch.tensor
Outputs a logits tensor [B,U, hiddens].
hidden : torch.tensor
Hidden tensor to be used for the next step
by recurrent layers in prediction network.
"""
for layer in decode_network_lst:
if layer.__class__.__name__ in [
"RNN",
"LSTM",
"GRU",
"LiGRU",
"LiGRU_Layer",
]:
out_PN, hidden = layer(out_PN, hidden)
else:
out_PN = layer(out_PN)
return out_PN, hidden
def _forward_after_joint(self, out, classifier_network):
"""Compute forward-pass through a list of classifier neural network.
Arguments
----------
out : torch.tensor
Output from joint network with shape
[batch, target_len, time_len, hiddens]
classifier_network : list
List of output layers (after performing joint between TN and PN)
exp: (TN,PN) => joint => classifier_network_list [DNN bloc, Linear..] => chars prob
Returns
-------
torch.tensor
Outputs a logits tensor [B, U,T, Output_Dim];
"""
for layer in classifier_network:
out = layer(out)
return out
def get_transducer_key(x):
"""Argument function to customize the sort order (in sorted & max).
To be used as `key=partial(get_transducer_key)`.
Arguments
----------
x : dict
one of the items under comparison
Returns
-------
float
Normalized log-score.
"""
logp_key = x["logp_score"] / len(x["prediction"])
return logp_key
| 19,715 | 35.309392 | 111 | py |
speechbrain | speechbrain-main/speechbrain/wordemb/transformer.py | """
A convenience wrapper for word embeddings retrieved out of
HuggingFace transformers (e.g. BERT)
Authors
* Artem Ploujnikov 2021
"""
import torch
import numpy as np
from torch import nn
def _last_n_layers(count):
return range(-count, 0)
class TransformerWordEmbeddings(nn.Module):
"""A wrapper to retrieve word embeddings out of a pretrained Transformer model
from HuggingFace Transformers (e.g. BERT)
Arguments
---------
model: str|nn.Module
the underlying model instance or the name of the model
to download
tokenizer: str|transformers.tokenization_utils_base.PreTrainedTokenizerBase
a pretrained tokenizer - or the identifier to retrieve
one from HuggingFace
layers: int|list
a list of layer indexes from which to construct an embedding or the number of layers
device:
a torch device identifier. If provided, the model
will be transferred onto that device
Example
-------
NOTE: Doctests are disabled because the dependency on the
HuggingFace transformer library is optional.
>>> from transformers import AutoTokenizer, AutoModel # doctest: +SKIP
>>> from speechbrain.wordemb.transformer import TransformerWordEmbeddings
>>> model_name = "bert-base-uncased" # doctest: +SKIP
>>> tokenizer = AutoTokenizer.from_pretrained(
... model_name, return_tensors='pt') # doctest: +SKIP
>>> model = AutoModel.from_pretrained(
... model_name,
... output_hidden_states=True) # doctest: +SKIP
>>> word_emb = TransformerWordEmbeddings(
... model=model,
... layers=4,
... tokenizer=tokenizer
... ) # doctest: +SKIP
>>> embedding = word_emb.embedding(
... sentence="THIS IS A TEST SENTENCE",
... word="TEST"
... ) # doctest: +SKIP
>>> embedding[:8] # doctest: +SKIP
tensor([ 3.4332, -3.6702, 0.5152, -1.9301, 0.9197, 2.1628, -0.2841, -0.3549])
>>> embeddings = word_emb.embeddings("This is cool") # doctest: +SKIP
>>> embeddings.shape # doctest: +SKIP
torch.Size([3, 768])
>>> embeddings[:, :3] # doctest: +SKIP
tensor([[-2.9078, 1.2496, 0.7269],
[-0.9940, -0.6960, 1.4350],
[-1.2401, -3.8237, 0.2739]])
>>> sentences = [
... "This is the first test sentence",
... "This is the second test sentence",
... "A quick brown fox jumped over the lazy dog"
... ]
>>> batch_embeddings = word_emb.batch_embeddings(sentences) # doctest: +SKIP
>>> batch_embeddings.shape # doctest: +SKIP
torch.Size([3, 9, 768])
>>> batch_embeddings[:, :2, :3] # doctest: +SKIP
tensor([[[-5.0935, -1.2838, 0.7868],
[-4.6889, -2.1488, 2.1380]],
[[-4.4993, -2.0178, 0.9369],
[-4.1760, -2.4141, 1.9474]],
[[-1.0065, 1.4227, -2.6671],
[-0.3408, -0.6238, 0.1780]]])
"""
MSG_WORD = "'word' should be either a word or the index of a word"
DEFAULT_LAYERS = 4
def __init__(self, model, tokenizer=None, layers=None, device=None):
super().__init__()
if not layers:
layers = self.DEFAULT_LAYERS
layers = _last_n_layers(layers) if isinstance(layers, int) else layers
self.layers = list(layers)
if isinstance(model, str):
if tokenizer is None:
tokenizer = model
model = _get_model(model)
if isinstance(tokenizer, str):
tokenizer = _get_tokenizer(tokenizer)
elif tokenizer is None:
raise ValueError(self.MSG_)
self.model = model
self.tokenizer = tokenizer
if device is not None:
self.device = device
self.model = self.model.to(device)
else:
self.device = self.model.device
def forward(self, sentence, word=None):
"""Retrieves a word embedding for the specified word within
a given sentence, if a word is provided, or all word embeddings
if only a sentence is given
Arguments
---------
sentence: str
a sentence
word: str|int
a word or a word's index within the sentence. If a word
is given, and it is encountered multiple times in a
sentence, the first occurrence is used
Returns
-------
emb: torch.Tensor
the word embedding
"""
return (
self.embedding(sentence, word)
if word
else self.embeddings(sentence)
)
def embedding(self, sentence, word):
"""Retrieves a word embedding for the specified word within
a given sentence
Arguments
---------
sentence: str
a sentence
word: str|int
a word or a word's index within the sentence. If a word
is given, and it is encountered multiple times in a
sentence, the first occurrence is used
Returns
-------
emb: torch.Tensor
the word embedding
"""
encoded = self.tokenizer.encode_plus(sentence, return_tensors="pt")
with torch.no_grad():
output = self.model(**self._to_device(encoded))
if isinstance(word, str):
idx = self._get_word_idx(sentence, word)
elif isinstance(word, int):
idx = word
else:
raise ValueError(self.MSG_WORD)
states = torch.stack(output.hidden_states)
word_embedding = self._get_word_vector(encoded, states, idx).mean(dim=0)
return word_embedding
def embeddings(self, sentence):
"""
Returns the model embeddings for all words
in a sentence
Arguments
---------
sentence: str
a sentence
Returns
-------
emb: torch.Tensor
a tensor of all word embeddings
"""
encoded = self.tokenizer.encode_plus(sentence, return_tensors="pt")
with torch.no_grad():
output = self.model(**self._to_device(encoded))
token_ids_word = torch.tensor(
[
idx
for idx, word_id in enumerate(encoded.word_ids())
if word_id is not None
],
device=self.device,
)
states = torch.stack(output.hidden_states)
return self._get_hidden_states(states, token_ids_word)
def batch_embeddings(self, sentences):
"""Returns embeddings for a collection of sentences
Arguments
---------
sentences: List[str]
a list of strings corresponding to a batch of
sentences
Returns
-------
emb: torch.Tensor
a (B x W x E) tensor
B - the batch dimensions (samples)
W - the word dimension
E - the embedding dimension
"""
encoded = self.tokenizer.batch_encode_plus(
sentences, padding=True, return_tensors="pt"
)
with torch.no_grad():
output = self.model(**self._to_device(encoded))
states = torch.stack(output.hidden_states)
return self._get_hidden_states(states)
def _to_device(self, encoded):
return {
key: self._tensor_to_device(value) for key, value in encoded.items()
}
def _tensor_to_device(self, value):
return (
value.to(self.device) if isinstance(value, torch.Tensor) else value
)
def _get_word_idx(self, sent, word):
return sent.split(" ").index(word)
def _get_hidden_states(self, states, token_ids_word=None):
output = states[self.layers].sum(0).squeeze()
if token_ids_word is not None:
output = output[token_ids_word]
else:
output = output[:, 1:-1, :]
return output
def _get_word_vector(self, encoded, states, idx):
token_ids_word = torch.from_numpy(
np.where(np.array(encoded.word_ids()) == idx)[0]
).to(self.device)
return self._get_hidden_states(states, token_ids_word)
def to(self, device):
"""Transfers the model to the specified PyTorch device"""
self.device = device
self.model = self.model.to(device)
return self
class MissingTransformersError(Exception):
"""Thrown when HuggingFace Transformers is not installed"""
MESSAGE = "This module requires HuggingFace Transformers"
def __init__(self):
super().__init__(self.MESSAGE)
def _get_model(identifier):
"""Tries to retrieve a pretrained model from Huggingface"""
try:
from transformers import AutoModel # noqa
return AutoModel.from_pretrained(identifier, output_hidden_states=True)
except ImportError:
raise MissingTransformersError()
def _get_tokenizer(identifier):
"""Tries to retreive a pretrained tokenizer from HuggingFace"""
try:
from transformers import AutoTokenizer # noqa
return AutoTokenizer.from_pretrained(identifier)
except ImportError:
raise MissingTransformersError()
| 9,176 | 30.003378 | 92 | py |
speechbrain | speechbrain-main/speechbrain/wordemb/util.py | """
Utilities for word embeddings
Authors
* Artem Ploujnikov 2021
"""
import torch
def expand_to_chars(emb, seq, seq_len, word_separator):
"""Expands word embeddings to a sequence of character
embeddings, assigning each character the word embedding
of the word to which it belongs
Arguments
---------
emb: torch.Tensor
a tensor of word embeddings
seq: torch.Tensor
a tensor of character embeddings
seq_len: torch.Tensor
a tensor of character embedding lengths
word_separator: torch.Tensor
the word separator being used
Returns
-------
char_word_emb: torch.Tensor
a combined character + word embedding tensor
Example
-------
>>> import torch
>>> emb = torch.tensor(
... [[[1., 2., 3.],
... [3., 1., 2.],
... [0., 0., 0.]],
... [[1., 3., 2.],
... [3., 2., 1.],
... [2., 3., 1.]]]
... )
>>> seq = torch.tensor(
... [[1, 2, 0, 2, 1, 0],
... [1, 0, 1, 2, 0, 2]]
... )
>>> seq_len = torch.tensor([4, 5])
>>> word_separator = 0
>>> expand_to_chars(emb, seq, seq_len, word_separator)
tensor([[[1., 2., 3.],
[1., 2., 3.],
[0., 0., 0.],
[3., 1., 2.],
[3., 1., 2.],
[0., 0., 0.]],
<BLANKLINE>
[[1., 3., 2.],
[0., 0., 0.],
[3., 2., 1.],
[3., 2., 1.],
[0., 0., 0.],
[2., 3., 1.]]])
"""
word_boundaries = seq == word_separator
words = word_boundaries.cumsum(dim=-1)
# TODO: Find a way to vectorize over the batch axis
char_word_emb = torch.zeros(emb.size(0), seq.size(-1), emb.size(-1)).to(
emb.device
)
seq_len_idx = (seq_len * seq.size(-1)).int()
for idx, (item, item_length) in enumerate(zip(words, seq_len_idx)):
char_word_emb[idx] = emb[idx, item]
char_word_emb[idx, item_length:, :] = 0
char_word_emb[idx, word_boundaries[idx], :] = 0
return char_word_emb
| 2,076 | 25.974026 | 76 | py |
speechbrain | speechbrain-main/speechbrain/nnet/embedding.py | """Library implementing embedding.
Authors
* Abdelwahab Heba 2020
"""
import torch
import logging
import torch.nn as nn
logger = logging.getLogger(__name__)
class Embedding(nn.Module):
"""Computes an embedding x = wx.
Arguments
---------
num_embeddings : int
Size of the dictionary of embeddings.
embedding_dim : int
It is the dim of embedding (i.e, the dimensionality of the output).
consider_as_one_hot : bool
Create non-trainable one-hot vector.
blank_id : int
If consider_as_one_hot == True: consider the embedding as one_hot
and use blank_index as zero one_hot vector.
Example
-------
>>> from speechbrain.nnet.embedding import Embedding
>>> import torch
>>> emb = Embedding(
... num_embeddings=40,
... embedding_dim=39,
... consider_as_one_hot=True,
... blank_id=39
... )
>>> inputs = torch.Tensor([10,5,2,0,39]).long()
>>> output = emb(inputs)
>>> output.shape
torch.Size([5, 39])
>>> output
tensor([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0.],
[0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0.],
[0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0.]])
>>> emb = Embedding(num_embeddings=5, embedding_dim=3, consider_as_one_hot=False)
>>> e = emb(torch.LongTensor([[0, 1, 2], [3, 4, 2]]))
>>> e.shape
torch.Size([2, 3, 3])
"""
def __init__(
self,
num_embeddings,
embedding_dim=128,
consider_as_one_hot=False,
blank_id=0,
):
super().__init__()
self.num_embeddings = num_embeddings
self.consider_as_one_hot = consider_as_one_hot
if self.consider_as_one_hot:
self.embedding_dim = self.num_embeddings - 1
else:
self.embedding_dim = embedding_dim
self.blank_id = blank_id
if self.consider_as_one_hot:
# deal with blank_id, the output should be embedding_dim-1 as we consider blank output as zeros one_hot vect
# padding_idx fix the idx row to zeros
self.Embedding = nn.Embedding(
self.num_embeddings,
self.embedding_dim,
padding_idx=self.blank_id,
)
one_hot = torch.eye(self.embedding_dim)
if self.blank_id + 1 != self.num_embeddings:
self.Embedding.weight.data[self.blank_id + 1 :] = one_hot[
self.blank_id :
]
if self.blank_id != 0:
self.Embedding.weight.data[: self.blank_id] = one_hot[
: self.blank_id
]
self.Embedding.weight.requires_grad = False
else:
self.Embedding = nn.Embedding(
self.num_embeddings, self.embedding_dim
)
def forward(self, x):
"""Returns the embedding of input tensor.
Arguments
---------
x : torch.Tensor
Input to embed.
"""
# pytorch embedding layer only accept long dtype
return self.Embedding(x.long())
| 3,928 | 33.165217 | 120 | py |
speechbrain | speechbrain-main/speechbrain/nnet/pooling.py | """Library implementing pooling.
Authors
* Titouan Parcollet 2020
* Mirco Ravanelli 2020
* Nauman Dawalatabad 2020
* Jianyuan Zhong 2020
* Sarthak Yadav 2022
"""
import torch
import logging
import torch.nn as nn
import torch.nn.functional as F
logger = logging.getLogger(__name__)
class Pooling1d(nn.Module):
"""This function implements 1d pooling of the input tensor.
Arguments
---------
pool_type : str
It is the type of pooling function to use ('avg','max').
kernel_size : int
It is the kernel size that defines the pooling dimension.
For instance, kernel size=3 applies a 1D Pooling with a size=3.
input_dims : int
The count of dimensions expected in the input.
pool_axis : int
The axis where the pooling is applied.
stride : int
It is the stride size.
padding : int
It is the number of padding elements to apply.
dilation : int
Controls the dilation factor of pooling.
ceil_mode : bool
When True, will use ceil instead of floor to compute the output shape.
Example
-------
>>> pool = Pooling1d('max',3)
>>> inputs = torch.rand(10, 12, 40)
>>> output=pool(inputs)
>>> output.shape
torch.Size([10, 4, 40])
"""
def __init__(
self,
pool_type,
kernel_size,
input_dims=3,
pool_axis=1,
ceil_mode=False,
padding=0,
dilation=1,
stride=None,
):
super().__init__()
self.pool_axis = pool_axis
if stride is None:
stride = kernel_size
if pool_type == "avg":
if input_dims == 3:
self.pool_layer = torch.nn.AvgPool1d(
kernel_size,
stride=stride,
padding=padding,
ceil_mode=ceil_mode,
)
elif input_dims == 4:
self.pool_layer = torch.nn.AvgPool2d(
(1, kernel_size),
stride=(1, stride),
padding=(0, padding),
ceil_mode=ceil_mode,
)
else:
raise ValueError("input_dims must be 3 or 4")
elif pool_type == "max":
if input_dims == 3:
self.pool_layer = torch.nn.MaxPool1d(
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
ceil_mode=ceil_mode,
)
elif input_dims == 4:
self.pool_layer = torch.nn.MaxPool2d(
(1, kernel_size),
stride=(1, stride),
padding=(0, padding),
dilation=(1, dilation),
ceil_mode=ceil_mode,
)
else:
raise ValueError("input_dims must be 3 or 4")
else:
raise ValueError("pool_type must be 'avg' or 'max'")
def forward(self, x):
"""Performs 1d pooling to the input tensor.
Arguments
---------
x : torch.Tensor
It represents a tensor for a mini-batch.
"""
# Put the pooling axes as the last dimension for torch.nn.pool
x = x.transpose(-1, self.pool_axis)
# Apply pooling
x = self.pool_layer(x)
# Recover input shape
x = x.transpose(-1, self.pool_axis)
return x
class Pooling2d(nn.Module):
"""This function implements 2d pooling of the input tensor.
Arguments
---------
pool_type : str
It is the type of pooling function to use ('avg','max').
pool_axis : tuple
It is a list containing the axis that will be considered
during pooling.
kernel_size : int
It is the kernel size that defines the pooling dimension.
For instance, kernel size=3,3 performs a 2D Pooling with a 3x3 kernel.
stride : int
It is the stride size.
padding : int
It is the number of padding elements to apply.
dilation : int
Controls the dilation factor of pooling.
ceil_mode : bool
When True, will use ceil instead of floor to compute the output shape.
Example
-------
>>> pool = Pooling2d('max',(5,3))
>>> inputs = torch.rand(10, 15, 12)
>>> output=pool(inputs)
>>> output.shape
torch.Size([10, 3, 4])
"""
def __init__(
self,
pool_type,
kernel_size,
pool_axis=(1, 2),
ceil_mode=False,
padding=0,
dilation=1,
stride=None,
):
super().__init__()
self.pool_type = pool_type
self.kernel_size = kernel_size
self.pool_axis = pool_axis
self.ceil_mode = ceil_mode
self.padding = padding
self.dilation = dilation
if stride is None:
self.stride = kernel_size
else:
self.stride = stride
if self.pool_type == "avg":
self.pool_layer = torch.nn.AvgPool2d(
self.kernel_size,
stride=self.stride,
padding=self.padding,
ceil_mode=self.ceil_mode,
)
else:
self.pool_layer = torch.nn.MaxPool2d(
self.kernel_size,
stride=self.stride,
padding=self.padding,
ceil_mode=self.ceil_mode,
)
def forward(self, x):
"""Performs 2d pooling to the input tensor.
Arguments
---------
x : torch.Tensor
It represents a tensor for a mini-batch.
"""
# Add extra two dimension at the last two, and then swap the pool_axis to them
# Example: pool_axis=[1,2]
# [a,b,c,d] => [a,b,c,d,1,1]
# [a,b,c,d,1,1] => [a,1,c,d,b,1]
# [a,1,c,d,b,1] => [a,1,1,d,b,c]
# [a,1,1,d,b,c] => [a,d,b,c]
x = (
x.unsqueeze(-1)
.unsqueeze(-1)
.transpose(-2, self.pool_axis[0])
.transpose(-1, self.pool_axis[1])
.squeeze(self.pool_axis[1])
.squeeze(self.pool_axis[0])
)
# Apply pooling
x = self.pool_layer(x)
# Swap back the pool_axis from the last two dimension
# Example: pool_axis=[1,2]
# [a,d,b,c] => [a,1,d,b,c]
# [a,1,d,b,c] => [a,1,1,d,b,c]
# [a,1,1,d,b,c] => [a,b,1,d,1,c]
# [a,b,1,d,1,c] => [a,b,c,d,1,1]
# [a,b,c,d,1,1] => [a,b,c,d]
x = (
x.unsqueeze(self.pool_axis[0])
.unsqueeze(self.pool_axis[1])
.transpose(-2, self.pool_axis[0])
.transpose(-1, self.pool_axis[1])
.squeeze(-1)
.squeeze(-1)
)
return x
class StatisticsPooling(nn.Module):
"""This class implements a statistic pooling layer.
It returns the mean and/or std of input tensor.
Arguments
---------
return_mean : True
If True, the average pooling will be returned.
return_std : True
If True, the standard deviation will be returned.
Example
-------
>>> inp_tensor = torch.rand([5, 100, 50])
>>> sp_layer = StatisticsPooling()
>>> out_tensor = sp_layer(inp_tensor)
>>> out_tensor.shape
torch.Size([5, 1, 100])
"""
def __init__(self, return_mean=True, return_std=True):
super().__init__()
# Small value for GaussNoise
self.eps = 1e-5
self.return_mean = return_mean
self.return_std = return_std
if not (self.return_mean or self.return_std):
raise ValueError(
"both of statistics are equal to False \n"
"consider enabling mean and/or std statistic pooling"
)
def forward(self, x, lengths=None):
"""Calculates mean and std for a batch (input tensor).
Arguments
---------
x : torch.Tensor
It represents a tensor for a mini-batch.
"""
if lengths is None:
if self.return_mean:
mean = x.mean(dim=1)
if self.return_std:
std = x.std(dim=1)
else:
mean = []
std = []
for snt_id in range(x.shape[0]):
# Avoiding padded time steps
actual_size = int(torch.round(lengths[snt_id] * x.shape[1]))
# computing statistics
if self.return_mean:
mean.append(
torch.mean(x[snt_id, 0:actual_size, ...], dim=0)
)
if self.return_std:
std.append(torch.std(x[snt_id, 0:actual_size, ...], dim=0))
if self.return_mean:
mean = torch.stack(mean)
if self.return_std:
std = torch.stack(std)
if self.return_mean:
gnoise = self._get_gauss_noise(mean.size(), device=mean.device)
gnoise = gnoise
mean += gnoise
if self.return_std:
std = std + self.eps
# Append mean and std of the batch
if self.return_mean and self.return_std:
pooled_stats = torch.cat((mean, std), dim=1)
pooled_stats = pooled_stats.unsqueeze(1)
elif self.return_mean:
pooled_stats = mean.unsqueeze(1)
elif self.return_std:
pooled_stats = std.unsqueeze(1)
return pooled_stats
def _get_gauss_noise(self, shape_of_tensor, device="cpu"):
"""Returns a tensor of epsilon Gaussian noise.
Arguments
---------
shape_of_tensor : tensor
It represents the size of tensor for generating Gaussian noise.
"""
gnoise = torch.randn(shape_of_tensor, device=device)
gnoise -= torch.min(gnoise)
gnoise /= torch.max(gnoise)
gnoise = self.eps * ((1 - 9) * gnoise + 9)
return gnoise
class AdaptivePool(nn.Module):
"""This class implements the adaptive average pooling.
Arguments
---------
delations : output_size
The size of the output.
Example
-------
>>> pool = AdaptivePool(1)
>>> inp = torch.randn([8, 120, 40])
>>> output = pool(inp)
>>> output.shape
torch.Size([8, 1, 40])
"""
def __init__(self, output_size):
super().__init__()
condition = (
isinstance(output_size, int)
or isinstance(output_size, tuple)
or isinstance(output_size, list)
)
assert condition, "output size must be int, list or tuple"
if isinstance(output_size, tuple) or isinstance(output_size, list):
assert (
len(output_size) == 2
), "len of output size must not be greater than 2"
if isinstance(output_size, int):
self.pool = nn.AdaptiveAvgPool1d(output_size)
else:
self.pool = nn.AdaptiveAvgPool2d(output_size)
def forward(self, x):
"""Performs adpative pooling to the input tensor.
Arguments
---------
x : torch.Tensor
It represents a tensor for a mini-batch.
"""
if x.ndim == 3:
return self.pool(x.permute(0, 2, 1)).permute(0, 2, 1)
if x.ndim == 4:
return self.pool(x.permute(0, 3, 1, 2)).permute(0, 2, 3, 1)
class GaussianLowpassPooling(nn.Module):
"""
This class implements a learnable Gaussian lowpass pooling from
Neil Zeghidour, Olivier Teboul, F{\'e}lix de Chaumont Quitry & Marco Tagliasacchi, "LEAF: A LEARNABLE FRONTEND
FOR AUDIO CLASSIFICATION", in Proc. of ICLR 2021 (https://arxiv.org/abs/2101.08596)
Arguments
---------
in_channels : int
The number of input channels.
kernel_size: int
Kernel size of the gaussian lowpass filters.
stride : int
Stride factor of the convolutional filters. When the stride factor > 1,
a decimation in time is performed.
padding : str
(same, valid). If "valid", no padding is performed.
If "same" and stride is 1, output shape is the same as the input shape.
padding_mode : str
This flag specifies the type of padding. See torch.nn documentation
for more information.
bias : bool
If True, the additive bias b is adopted.
skip_transpose : bool
If False, uses batch x time x channel convention of speechbrain.
If True, uses batch x channel x time convention.
Example
-------
>>> inp_tensor = torch.rand([10, 8000, 40])
>>> low_pass_pooling = GaussianLowpassPooling(
... 40, kernel_size=401, stride=160,
... )
>>> # parameters corresponding to a window of 25 ms and stride 10 ms at 16000 kHz
>>> out_tensor = low_pass_pooling(inp_tensor)
>>> out_tensor.shape
torch.Size([10, 50, 40])
"""
def __init__(
self,
in_channels,
kernel_size,
stride=1,
initialization_constant=0.4,
padding="same",
padding_mode="constant",
bias=True,
skip_transpose=False,
):
super(GaussianLowpassPooling, self).__init__()
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.padding_mode = padding_mode
self.in_channels = in_channels
self.skip_transpose = skip_transpose
self.weights = nn.Parameter(
torch.ones((1, 1, in_channels, 1)) * initialization_constant
)
if bias:
self._bias = torch.nn.Parameter(torch.ones(in_channels,))
else:
self._bias = None
def _get_impulse_responses(self, sigma):
filter_size = self.kernel_size
sigma = torch.clamp(sigma, min=(2.0 / filter_size), max=0.5)
t = torch.arange(0, filter_size, dtype=sigma.dtype, device=sigma.device)
t = torch.reshape(t, (1, filter_size, 1, 1))
numerator = t - 0.5 * (filter_size - 1)
denominator = sigma * 0.5 * (filter_size - 1)
return torch.exp(-0.5 * (numerator / denominator) ** 2)
def forward(self, x):
"""Performs GaussianLowpass Pooling.
Arguments
---------
x : torch.Tensor
3D tensor in input [batch,time,channels].
"""
if not self.skip_transpose:
x = x.transpose(1, -1)
kernel = self._get_impulse_responses(self.weights)
kernel = kernel.reshape(-1, self.kernel_size, self.in_channels)
kernel = kernel.permute(2, 0, 1)
if self.padding == "same":
x = self._manage_padding(x, self.kernel_size)
elif self.padding == "valid":
pass
else:
raise ValueError(
"Padding must be 'same' or 'valid'. Got " + self.padding
)
outputs = F.conv1d(
x,
kernel,
bias=self._bias,
stride=self.stride,
padding=0,
groups=self.in_channels,
)
if not self.skip_transpose:
outputs = outputs.transpose(1, -1)
return outputs
def _manage_padding(self, x, kernel_size):
# this is the logic that gives correct shape that complies
# with the original implementation at https://github.com/google-research/leaf-audio
def get_padding_value(kernel_size):
"""Get number of elements to pad."""
kernel_sizes = (kernel_size,)
from functools import reduce
from operator import __add__
conv_padding = reduce(
__add__,
[
(k // 2 + (k - 2 * (k // 2)) - 1, k // 2)
for k in kernel_sizes[::-1]
],
)
return conv_padding
pad_value = get_padding_value(kernel_size)
x = F.pad(x, pad_value, mode=self.padding_mode, value=0)
return x
| 16,006 | 29.316288 | 114 | py |
speechbrain | speechbrain-main/speechbrain/nnet/losses.py | """
Losses for training neural networks.
Authors
* Mirco Ravanelli 2020
* Samuele Cornell 2020
* Hwidong Na 2020
* Yan Gao 2020
* Titouan Parcollet 2020
"""
import math
import torch
import logging
import functools
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from itertools import permutations
from speechbrain.dataio.dataio import length_to_mask
from speechbrain.decoders.ctc import filter_ctc_output
logger = logging.getLogger(__name__)
def transducer_loss(
logits,
targets,
input_lens,
target_lens,
blank_index,
reduction="mean",
use_torchaudio=True,
):
"""Transducer loss, see `speechbrain/nnet/loss/transducer_loss.py`.
Arguments
---------
logits : torch.Tensor
Predicted tensor, of shape [batch, maxT, maxU, num_labels].
targets : torch.Tensor
Target tensor, without any blanks, of shape [batch, target_len].
input_lens : torch.Tensor
Length of each utterance.
target_lens : torch.Tensor
Length of each target sequence.
blank_index : int
The location of the blank symbol among the label indices.
reduction : str
Specifies the reduction to apply to the output: 'mean' | 'batchmean' | 'sum'.
use_torchaudio: bool
If True, use Transducer loss implementation from torchaudio, otherwise,
use Speechbrain Numba implementation.
"""
input_lens = (input_lens * logits.shape[1]).round().int()
target_lens = (target_lens * targets.shape[1]).round().int()
if use_torchaudio:
try:
from torchaudio.functional import rnnt_loss
except ImportError:
err_msg = "The dependency torchaudio >= 0.10.0 is needed to use Transducer Loss\n"
err_msg += "Cannot import torchaudio.functional.rnnt_loss.\n"
err_msg += "To use it, please install torchaudio >= 0.10.0\n"
err_msg += "==================\n"
err_msg += "Otherwise, you can use our numba implementation, set `use_torchaudio=False`.\n"
raise ImportError(err_msg)
return rnnt_loss(
logits,
targets.int(),
input_lens,
target_lens,
blank=blank_index,
reduction=reduction,
)
else:
from speechbrain.nnet.loss.transducer_loss import Transducer
# Transducer.apply function take log_probs tensor.
log_probs = logits.log_softmax(-1)
return Transducer.apply(
log_probs, targets, input_lens, target_lens, blank_index, reduction,
)
class PitWrapper(nn.Module):
"""
Permutation Invariant Wrapper to allow Permutation Invariant Training
(PIT) with existing losses.
Permutation invariance is calculated over the sources/classes axis which is
assumed to be the rightmost dimension: predictions and targets tensors are
assumed to have shape [batch, ..., channels, sources].
Arguments
---------
base_loss : function
Base loss function, e.g. torch.nn.MSELoss. It is assumed that it takes
two arguments:
predictions and targets and no reduction is performed.
(if a pytorch loss is used, the user must specify reduction="none").
Returns
---------
pit_loss : torch.nn.Module
Torch module supporting forward method for PIT.
Example
-------
>>> pit_mse = PitWrapper(nn.MSELoss(reduction="none"))
>>> targets = torch.rand((2, 32, 4))
>>> p = (3, 0, 2, 1)
>>> predictions = targets[..., p]
>>> loss, opt_p = pit_mse(predictions, targets)
>>> loss
tensor([0., 0.])
"""
def __init__(self, base_loss):
super(PitWrapper, self).__init__()
self.base_loss = base_loss
def _fast_pit(self, loss_mat):
"""
Arguments
----------
loss_mat : torch.Tensor
Tensor of shape [sources, source] containing loss values for each
possible permutation of predictions.
Returns
-------
loss : torch.Tensor
Permutation invariant loss for the current batch, tensor of shape [1]
assigned_perm : tuple
Indexes for optimal permutation of the input over sources which
minimizes the loss.
"""
loss = None
assigned_perm = None
for p in permutations(range(loss_mat.shape[0])):
c_loss = loss_mat[range(loss_mat.shape[0]), p].mean()
if loss is None or loss > c_loss:
loss = c_loss
assigned_perm = p
return loss, assigned_perm
def _opt_perm_loss(self, pred, target):
"""
Arguments
---------
pred : torch.Tensor
Network prediction for the current example, tensor of
shape [..., sources].
target : torch.Tensor
Target for the current example, tensor of shape [..., sources].
Returns
-------
loss : torch.Tensor
Permutation invariant loss for the current example, tensor of shape [1]
assigned_perm : tuple
Indexes for optimal permutation of the input over sources which
minimizes the loss.
"""
n_sources = pred.size(-1)
pred = pred.unsqueeze(-2).repeat(
*[1 for x in range(len(pred.shape) - 1)], n_sources, 1
)
target = target.unsqueeze(-1).repeat(
1, *[1 for x in range(len(target.shape) - 1)], n_sources
)
loss_mat = self.base_loss(pred, target)
assert (
len(loss_mat.shape) >= 2
), "Base loss should not perform any reduction operation"
mean_over = [x for x in range(len(loss_mat.shape))]
loss_mat = loss_mat.mean(dim=mean_over[:-2])
return self._fast_pit(loss_mat)
def reorder_tensor(self, tensor, p):
"""
Arguments
---------
tensor : torch.Tensor
Tensor to reorder given the optimal permutation, of shape
[batch, ..., sources].
p : list of tuples
List of optimal permutations, e.g. for batch=2 and n_sources=3
[(0, 1, 2), (0, 2, 1].
Returns
-------
reordered : torch.Tensor
Reordered tensor given permutation p.
"""
reordered = torch.zeros_like(tensor, device=tensor.device)
for b in range(tensor.shape[0]):
reordered[b] = tensor[b][..., p[b]].clone()
return reordered
def forward(self, preds, targets):
"""
Arguments
---------
preds : torch.Tensor
Network predictions tensor, of shape
[batch, channels, ..., sources].
targets : torch.Tensor
Target tensor, of shape [batch, channels, ..., sources].
Returns
-------
loss : torch.Tensor
Permutation invariant loss for current examples, tensor of
shape [batch]
perms : list
List of indexes for optimal permutation of the inputs over
sources.
e.g., [(0, 1, 2), (2, 1, 0)] for three sources and 2 examples
per batch.
"""
losses = []
perms = []
for pred, label in zip(preds, targets):
loss, p = self._opt_perm_loss(pred, label)
perms.append(p)
losses.append(loss)
loss = torch.stack(losses)
return loss, perms
def ctc_loss(
log_probs, targets, input_lens, target_lens, blank_index, reduction="mean"
):
"""CTC loss.
Arguments
---------
predictions : torch.Tensor
Predicted tensor, of shape [batch, time, chars].
targets : torch.Tensor
Target tensor, without any blanks, of shape [batch, target_len]
input_lens : torch.Tensor
Length of each utterance.
target_lens : torch.Tensor
Length of each target sequence.
blank_index : int
The location of the blank symbol among the character indexes.
reduction : str
What reduction to apply to the output. 'mean', 'sum', 'batch',
'batchmean', 'none'.
See pytorch for 'mean', 'sum', 'none'. The 'batch' option returns
one loss per item in the batch, 'batchmean' returns sum / batch size.
"""
input_lens = (input_lens * log_probs.shape[1]).round().int()
target_lens = (target_lens * targets.shape[1]).round().int()
log_probs = log_probs.transpose(0, 1)
if reduction == "batchmean":
reduction_loss = "sum"
elif reduction == "batch":
reduction_loss = "none"
else:
reduction_loss = reduction
loss = torch.nn.functional.ctc_loss(
log_probs,
targets,
input_lens,
target_lens,
blank_index,
zero_infinity=True,
reduction=reduction_loss,
)
if reduction == "batchmean":
return loss / targets.shape[0]
elif reduction == "batch":
N = loss.size(0)
return loss.view(N, -1).sum(1) / target_lens.view(N, -1).sum(1)
else:
return loss
def l1_loss(
predictions, targets, length=None, allowed_len_diff=3, reduction="mean"
):
"""Compute the true l1 loss, accounting for length differences.
Arguments
---------
predictions : torch.Tensor
Predicted tensor, of shape ``[batch, time, *]``.
targets : torch.Tensor
Target tensor with the same size as predicted tensor.
length : torch.Tensor
Length of each utterance for computing true error with a mask.
allowed_len_diff : int
Length difference that will be tolerated before raising an exception.
reduction : str
Options are 'mean', 'batch', 'batchmean', 'sum'.
See pytorch for 'mean', 'sum'. The 'batch' option returns
one loss per item in the batch, 'batchmean' returns sum / batch size.
Example
-------
>>> probs = torch.tensor([[0.9, 0.1, 0.1, 0.9]])
>>> l1_loss(probs, torch.tensor([[1., 0., 0., 1.]]))
tensor(0.1000)
"""
predictions, targets = truncate(predictions, targets, allowed_len_diff)
loss = functools.partial(torch.nn.functional.l1_loss, reduction="none")
return compute_masked_loss(
loss, predictions, targets, length, reduction=reduction
)
def mse_loss(
predictions, targets, length=None, allowed_len_diff=3, reduction="mean"
):
"""Compute the true mean squared error, accounting for length differences.
Arguments
---------
predictions : torch.Tensor
Predicted tensor, of shape ``[batch, time, *]``.
targets : torch.Tensor
Target tensor with the same size as predicted tensor.
length : torch.Tensor
Length of each utterance for computing true error with a mask.
allowed_len_diff : int
Length difference that will be tolerated before raising an exception.
reduction : str
Options are 'mean', 'batch', 'batchmean', 'sum'.
See pytorch for 'mean', 'sum'. The 'batch' option returns
one loss per item in the batch, 'batchmean' returns sum / batch size.
Example
-------
>>> probs = torch.tensor([[0.9, 0.1, 0.1, 0.9]])
>>> mse_loss(probs, torch.tensor([[1., 0., 0., 1.]]))
tensor(0.0100)
"""
predictions, targets = truncate(predictions, targets, allowed_len_diff)
loss = functools.partial(torch.nn.functional.mse_loss, reduction="none")
return compute_masked_loss(
loss, predictions, targets, length, reduction=reduction
)
def classification_error(
probabilities, targets, length=None, allowed_len_diff=3, reduction="mean"
):
"""Computes the classification error at frame or batch level.
Arguments
---------
probabilities : torch.Tensor
The posterior probabilities of shape
[batch, prob] or [batch, frames, prob]
targets : torch.Tensor
The targets, of shape [batch] or [batch, frames]
length : torch.Tensor
Length of each utterance, if frame-level loss is desired.
allowed_len_diff : int
Length difference that will be tolerated before raising an exception.
reduction : str
Options are 'mean', 'batch', 'batchmean', 'sum'.
See pytorch for 'mean', 'sum'. The 'batch' option returns
one loss per item in the batch, 'batchmean' returns sum / batch size.
Example
-------
>>> probs = torch.tensor([[[0.9, 0.1], [0.1, 0.9]]])
>>> classification_error(probs, torch.tensor([1, 1]))
tensor(0.5000)
"""
if len(probabilities.shape) == 3 and len(targets.shape) == 2:
probabilities, targets = truncate(
probabilities, targets, allowed_len_diff
)
def error(predictions, targets):
"""Computes the classification error."""
predictions = torch.argmax(probabilities, dim=-1)
return (predictions != targets).float()
return compute_masked_loss(
error, probabilities, targets.long(), length, reduction=reduction
)
def nll_loss(
log_probabilities,
targets,
length=None,
label_smoothing=0.0,
allowed_len_diff=3,
reduction="mean",
):
"""Computes negative log likelihood loss.
Arguments
---------
log_probabilities : torch.Tensor
The probabilities after log has been applied.
Format is [batch, log_p] or [batch, frames, log_p].
targets : torch.Tensor
The targets, of shape [batch] or [batch, frames].
length : torch.Tensor
Length of each utterance, if frame-level loss is desired.
allowed_len_diff : int
Length difference that will be tolerated before raising an exception.
reduction : str
Options are 'mean', 'batch', 'batchmean', 'sum'.
See pytorch for 'mean', 'sum'. The 'batch' option returns
one loss per item in the batch, 'batchmean' returns sum / batch size.
Example
-------
>>> probs = torch.tensor([[0.9, 0.1], [0.1, 0.9]])
>>> nll_loss(torch.log(probs), torch.tensor([1, 1]))
tensor(1.2040)
"""
if len(log_probabilities.shape) == 3:
log_probabilities, targets = truncate(
log_probabilities, targets, allowed_len_diff
)
log_probabilities = log_probabilities.transpose(1, -1)
# Pass the loss function but apply reduction="none" first
loss = functools.partial(torch.nn.functional.nll_loss, reduction="none")
return compute_masked_loss(
loss,
log_probabilities,
targets.long(),
length,
label_smoothing=label_smoothing,
reduction=reduction,
)
def bce_loss(
inputs,
targets,
length=None,
weight=None,
pos_weight=None,
reduction="mean",
allowed_len_diff=3,
label_smoothing=0.0,
):
"""Computes binary cross-entropy (BCE) loss. It also applies the sigmoid
function directly (this improves the numerical stability).
Arguments
---------
inputs : torch.Tensor
The output before applying the final softmax
Format is [batch[, 1]?] or [batch, frames[, 1]?].
(Works with or without a singleton dimension at the end).
targets : torch.Tensor
The targets, of shape [batch] or [batch, frames].
length : torch.Tensor
Length of each utterance, if frame-level loss is desired.
weight : torch.Tensor
A manual rescaling weight if provided it’s repeated to match input
tensor shape.
pos_weight : torch.Tensor
A weight of positive examples. Must be a vector with length equal to
the number of classes.
allowed_len_diff : int
Length difference that will be tolerated before raising an exception.
reduction: str
Options are 'mean', 'batch', 'batchmean', 'sum'.
See pytorch for 'mean', 'sum'. The 'batch' option returns
one loss per item in the batch, 'batchmean' returns sum / batch size.
Example
-------
>>> inputs = torch.tensor([10.0, -6.0])
>>> targets = torch.tensor([1, 0])
>>> bce_loss(inputs, targets)
tensor(0.0013)
"""
# Squeeze singleton dimension so inputs + targets match
if len(inputs.shape) == len(targets.shape) + 1:
inputs = inputs.squeeze(-1)
# Make sure tensor lengths match
if len(inputs.shape) >= 2:
inputs, targets = truncate(inputs, targets, allowed_len_diff)
elif length is not None:
raise ValueError("length can be passed only for >= 2D inputs.")
# Pass the loss function but apply reduction="none" first
loss = functools.partial(
torch.nn.functional.binary_cross_entropy_with_logits,
weight=weight,
pos_weight=pos_weight,
reduction="none",
)
return compute_masked_loss(
loss,
inputs,
targets.float(),
length,
label_smoothing=label_smoothing,
reduction=reduction,
)
def kldiv_loss(
log_probabilities,
targets,
length=None,
label_smoothing=0.0,
allowed_len_diff=3,
pad_idx=0,
reduction="mean",
):
"""Computes the KL-divergence error at the batch level.
This loss applies label smoothing directly to the targets
Arguments
---------
probabilities : torch.Tensor
The posterior probabilities of shape
[batch, prob] or [batch, frames, prob].
targets : torch.Tensor
The targets, of shape [batch] or [batch, frames].
length : torch.Tensor
Length of each utterance, if frame-level loss is desired.
allowed_len_diff : int
Length difference that will be tolerated before raising an exception.
reduction : str
Options are 'mean', 'batch', 'batchmean', 'sum'.
See pytorch for 'mean', 'sum'. The 'batch' option returns
one loss per item in the batch, 'batchmean' returns sum / batch size.
Example
-------
>>> probs = torch.tensor([[0.9, 0.1], [0.1, 0.9]])
>>> kldiv_loss(torch.log(probs), torch.tensor([1, 1]))
tensor(1.2040)
"""
if label_smoothing > 0:
if log_probabilities.dim() == 2:
log_probabilities = log_probabilities.unsqueeze(1)
bz, time, n_class = log_probabilities.shape
targets = targets.long().detach()
confidence = 1 - label_smoothing
log_probabilities = log_probabilities.view(-1, n_class)
targets = targets.view(-1)
with torch.no_grad():
true_distribution = log_probabilities.clone()
true_distribution.fill_(label_smoothing / (n_class - 1))
ignore = targets == pad_idx
targets = targets.masked_fill(ignore, 0)
true_distribution.scatter_(1, targets.unsqueeze(1), confidence)
loss = torch.nn.functional.kl_div(
log_probabilities, true_distribution, reduction="none"
)
loss = loss.masked_fill(ignore.unsqueeze(1), 0)
# return loss according to reduction specified
if reduction == "mean":
return loss.sum().mean()
elif reduction == "batchmean":
return loss.sum() / bz
elif reduction == "batch":
return loss.view(bz, -1).sum(1) / length
elif reduction == "sum":
return loss.sum()
else:
return loss
else:
return nll_loss(log_probabilities, targets, length, reduction=reduction)
def truncate(predictions, targets, allowed_len_diff=3):
"""Ensure that predictions and targets are the same length.
Arguments
---------
predictions : torch.Tensor
First tensor for checking length.
targets : torch.Tensor
Second tensor for checking length.
allowed_len_diff : int
Length difference that will be tolerated before raising an exception.
"""
len_diff = predictions.shape[1] - targets.shape[1]
if len_diff == 0:
return predictions, targets
elif abs(len_diff) > allowed_len_diff:
raise ValueError(
"Predictions and targets should be same length, but got %s and "
"%s respectively." % (predictions.shape[1], targets.shape[1])
)
elif len_diff < 0:
return predictions, targets[:, : predictions.shape[1]]
else:
return predictions[:, : targets.shape[1]], targets
def compute_masked_loss(
loss_fn,
predictions,
targets,
length=None,
label_smoothing=0.0,
reduction="mean",
):
"""Compute the true average loss of a set of waveforms of unequal length.
Arguments
---------
loss_fn : function
A function for computing the loss taking just predictions and targets.
Should return all the losses, not a reduction (e.g. reduction="none").
predictions : torch.Tensor
First argument to loss function.
targets : torch.Tensor
Second argument to loss function.
length : torch.Tensor
Length of each utterance to compute mask. If None, global average is
computed and returned.
label_smoothing: float
The proportion of label smoothing. Should only be used for NLL loss.
Ref: Regularizing Neural Networks by Penalizing Confident Output
Distributions. https://arxiv.org/abs/1701.06548
reduction : str
One of 'mean', 'batch', 'batchmean', 'none' where 'mean' returns a
single value and 'batch' returns one per item in the batch and
'batchmean' is sum / batch_size and 'none' returns all.
"""
mask = torch.ones_like(targets)
if length is not None:
length_mask = length_to_mask(
length * targets.shape[1], max_len=targets.shape[1],
)
# Handle any dimensionality of input
while len(length_mask.shape) < len(mask.shape):
length_mask = length_mask.unsqueeze(-1)
length_mask = length_mask.type(mask.dtype)
mask *= length_mask
# Compute, then reduce loss
loss = loss_fn(predictions, targets) * mask
N = loss.size(0)
if reduction == "mean":
loss = loss.sum() / torch.sum(mask)
elif reduction == "batchmean":
loss = loss.sum() / N
elif reduction == "batch":
loss = loss.reshape(N, -1).sum(1) / mask.reshape(N, -1).sum(1)
if label_smoothing == 0:
return loss
else:
loss_reg = torch.mean(predictions, dim=1) * mask
if reduction == "mean":
loss_reg = torch.sum(loss_reg) / torch.sum(mask)
elif reduction == "batchmean":
loss_reg = torch.sum(loss_reg) / targets.shape[0]
elif reduction == "batch":
loss_reg = loss_reg.sum(1) / mask.sum(1)
return -label_smoothing * loss_reg + (1 - label_smoothing) * loss
def get_si_snr_with_pitwrapper(source, estimate_source):
"""This function wraps si_snr calculation with the speechbrain pit-wrapper.
Arguments:
---------
source: [B, T, C],
Where B is the batch size, T is the length of the sources, C is
the number of sources the ordering is made so that this loss is
compatible with the class PitWrapper.
estimate_source: [B, T, C]
The estimated source.
Example:
---------
>>> x = torch.arange(600).reshape(3, 100, 2)
>>> xhat = x[:, :, (1, 0)]
>>> si_snr = -get_si_snr_with_pitwrapper(x, xhat)
>>> print(si_snr)
tensor([135.2284, 135.2284, 135.2284])
"""
pit_si_snr = PitWrapper(cal_si_snr)
loss, perms = pit_si_snr(source, estimate_source)
return loss
def get_snr_with_pitwrapper(source, estimate_source):
"""This function wraps si_snr calculation with the speechbrain pit-wrapper.
Arguments:
---------
source: [B, T, E, C],
Where B is the batch size, T is the length of the sources, E is binaural channels, C is the number of sources
the ordering is made so that this loss is compatible with the class PitWrapper.
estimate_source: [B, T, E, C]
The estimated source.
"""
pit_snr = PitWrapper(cal_snr)
loss, perms = pit_snr(source, estimate_source)
return loss
def cal_si_snr(source, estimate_source):
"""Calculate SI-SNR.
Arguments:
---------
source: [T, B, C],
Where B is batch size, T is the length of the sources, C is the number of sources
the ordering is made so that this loss is compatible with the class PitWrapper.
estimate_source: [T, B, C]
The estimated source.
Example:
---------
>>> import numpy as np
>>> x = torch.Tensor([[1, 0], [123, 45], [34, 5], [2312, 421]])
>>> xhat = x[:, (1, 0)]
>>> x = x.unsqueeze(-1).repeat(1, 1, 2)
>>> xhat = xhat.unsqueeze(1).repeat(1, 2, 1)
>>> si_snr = -cal_si_snr(x, xhat)
>>> print(si_snr)
tensor([[[ 25.2142, 144.1789],
[130.9283, 25.2142]]])
"""
EPS = 1e-8
assert source.size() == estimate_source.size()
device = estimate_source.device.type
source_lengths = torch.tensor(
[estimate_source.shape[0]] * estimate_source.shape[-2], device=device
)
mask = get_mask(source, source_lengths)
estimate_source *= mask
num_samples = (
source_lengths.contiguous().reshape(1, -1, 1).float()
) # [1, B, 1]
mean_target = torch.sum(source, dim=0, keepdim=True) / num_samples
mean_estimate = (
torch.sum(estimate_source, dim=0, keepdim=True) / num_samples
)
zero_mean_target = source - mean_target
zero_mean_estimate = estimate_source - mean_estimate
# mask padding position along T
zero_mean_target *= mask
zero_mean_estimate *= mask
# Step 2. SI-SNR with PIT
# reshape to use broadcast
s_target = zero_mean_target # [T, B, C]
s_estimate = zero_mean_estimate # [T, B, C]
# s_target = <s', s>s / ||s||^2
dot = torch.sum(s_estimate * s_target, dim=0, keepdim=True) # [1, B, C]
s_target_energy = (
torch.sum(s_target ** 2, dim=0, keepdim=True) + EPS
) # [1, B, C]
proj = dot * s_target / s_target_energy # [T, B, C]
# e_noise = s' - s_target
e_noise = s_estimate - proj # [T, B, C]
# SI-SNR = 10 * log_10(||s_target||^2 / ||e_noise||^2)
si_snr_beforelog = torch.sum(proj ** 2, dim=0) / (
torch.sum(e_noise ** 2, dim=0) + EPS
)
si_snr = 10 * torch.log10(si_snr_beforelog + EPS) # [B, C]
return -si_snr.unsqueeze(0)
def cal_snr(source, estimate_source):
"""Calculate binaural channel SNR.
Arguments:
---------
source: [T, E, B, C],
Where B is batch size, T is the length of the sources, E is binaural channels, C is the number of sources
the ordering is made so that this loss is compatible with the class PitWrapper.
estimate_source: [T, E, B, C]
The estimated source.
"""
EPS = 1e-8
assert source.size() == estimate_source.size()
device = estimate_source.device.type
source_lengths = torch.tensor(
[estimate_source.shape[0]] * estimate_source.shape[-2], device=device
)
mask = get_mask(source, source_lengths) # [T, E, 1]
estimate_source *= mask
num_samples = (
source_lengths.contiguous().reshape(1, -1, 1).float()
) # [1, B, 1]
mean_target = torch.sum(source, dim=0, keepdim=True) / num_samples
mean_estimate = (
torch.sum(estimate_source, dim=0, keepdim=True) / num_samples
)
zero_mean_target = source - mean_target
zero_mean_estimate = estimate_source - mean_estimate
# mask padding position along T
zero_mean_target *= mask
zero_mean_estimate *= mask
# Step 2. SNR with PIT
# reshape to use broadcast
s_target = zero_mean_target # [T, E, B, C]
s_estimate = zero_mean_estimate # [T, E, B, C]
# SNR = 10 * log_10(||s_target||^2 / ||e_noise||^2)
# n_dim = [x for x in range(len(s_target.shape)-2)]
snr_beforelog = torch.sum(s_target ** 2, dim=0) / (
torch.sum((s_estimate - s_target) ** 2, dim=0) + EPS
)
snr = 10 * torch.log10(snr_beforelog + EPS) # [B, C]
return -snr.unsqueeze(0)
def get_mask(source, source_lengths):
"""
Arguments
---------
source : [T, B, C]
source_lengths : [B]
Returns
-------
mask : [T, B, 1]
Example:
---------
>>> source = torch.randn(4, 3, 2)
>>> source_lengths = torch.Tensor([2, 1, 4]).int()
>>> mask = get_mask(source, source_lengths)
>>> print(mask)
tensor([[[1.],
[1.],
[1.]],
<BLANKLINE>
[[1.],
[0.],
[1.]],
<BLANKLINE>
[[0.],
[0.],
[1.]],
<BLANKLINE>
[[0.],
[0.],
[1.]]])
"""
mask = source.new_ones(source.size()[:-1]).unsqueeze(-1).transpose(1, -2)
B = source.size(-2)
for i in range(B):
mask[source_lengths[i] :, i] = 0
return mask.transpose(-2, 1)
class AngularMargin(nn.Module):
"""
An implementation of Angular Margin (AM) proposed in the following
paper: '''Margin Matters: Towards More Discriminative Deep Neural Network
Embeddings for Speaker Recognition''' (https://arxiv.org/abs/1906.07317)
Arguments
---------
margin : float
The margin for cosine similiarity
scale : float
The scale for cosine similiarity
Return
---------
predictions : torch.Tensor
Example
-------
>>> pred = AngularMargin()
>>> outputs = torch.tensor([ [1., -1.], [-1., 1.], [0.9, 0.1], [0.1, 0.9] ])
>>> targets = torch.tensor([ [1., 0.], [0., 1.], [ 1., 0.], [0., 1.] ])
>>> predictions = pred(outputs, targets)
>>> predictions[:,0] > predictions[:,1]
tensor([ True, False, True, False])
"""
def __init__(self, margin=0.0, scale=1.0):
super(AngularMargin, self).__init__()
self.margin = margin
self.scale = scale
def forward(self, outputs, targets):
"""Compute AM between two tensors
Arguments
---------
outputs : torch.Tensor
The outputs of shape [N, C], cosine similarity is required.
targets : torch.Tensor
The targets of shape [N, C], where the margin is applied for.
Return
---------
predictions : torch.Tensor
"""
outputs = outputs - self.margin * targets
return self.scale * outputs
class AdditiveAngularMargin(AngularMargin):
"""
An implementation of Additive Angular Margin (AAM) proposed
in the following paper: '''Margin Matters: Towards More Discriminative Deep
Neural Network Embeddings for Speaker Recognition'''
(https://arxiv.org/abs/1906.07317)
Arguments
---------
margin : float
The margin for cosine similiarity.
scale: float
The scale for cosine similiarity.
Returns
-------
predictions : torch.Tensor
Tensor.
Example
-------
>>> outputs = torch.tensor([ [1., -1.], [-1., 1.], [0.9, 0.1], [0.1, 0.9] ])
>>> targets = torch.tensor([ [1., 0.], [0., 1.], [ 1., 0.], [0., 1.] ])
>>> pred = AdditiveAngularMargin()
>>> predictions = pred(outputs, targets)
>>> predictions[:,0] > predictions[:,1]
tensor([ True, False, True, False])
"""
def __init__(self, margin=0.0, scale=1.0, easy_margin=False):
super(AdditiveAngularMargin, self).__init__(margin, scale)
self.easy_margin = easy_margin
self.cos_m = math.cos(self.margin)
self.sin_m = math.sin(self.margin)
self.th = math.cos(math.pi - self.margin)
self.mm = math.sin(math.pi - self.margin) * self.margin
def forward(self, outputs, targets):
"""
Compute AAM between two tensors
Arguments
---------
outputs : torch.Tensor
The outputs of shape [N, C], cosine similarity is required.
targets : torch.Tensor
The targets of shape [N, C], where the margin is applied for.
Return
---------
predictions : torch.Tensor
"""
cosine = outputs.float()
cosine = torch.clamp(cosine, -1 + 1e-7, 1 - 1e-7)
sine = torch.sqrt(1.0 - torch.pow(cosine, 2))
phi = cosine * self.cos_m - sine * self.sin_m # cos(theta + m)
if self.easy_margin:
phi = torch.where(cosine > 0, phi, cosine)
else:
phi = torch.where(cosine > self.th, phi, cosine - self.mm)
outputs = (targets * phi) + ((1.0 - targets) * cosine)
return self.scale * outputs
class LogSoftmaxWrapper(nn.Module):
"""
Arguments
---------
Returns
---------
loss : torch.Tensor
Learning loss
predictions : torch.Tensor
Log probabilities
Example
-------
>>> outputs = torch.tensor([ [1., -1.], [-1., 1.], [0.9, 0.1], [0.1, 0.9] ])
>>> outputs = outputs.unsqueeze(1)
>>> targets = torch.tensor([ [0], [1], [0], [1] ])
>>> log_prob = LogSoftmaxWrapper(nn.Identity())
>>> loss = log_prob(outputs, targets)
>>> 0 <= loss < 1
tensor(True)
>>> log_prob = LogSoftmaxWrapper(AngularMargin(margin=0.2, scale=32))
>>> loss = log_prob(outputs, targets)
>>> 0 <= loss < 1
tensor(True)
>>> outputs = torch.tensor([ [1., -1.], [-1., 1.], [0.9, 0.1], [0.1, 0.9] ])
>>> log_prob = LogSoftmaxWrapper(AdditiveAngularMargin(margin=0.3, scale=32))
>>> loss = log_prob(outputs, targets)
>>> 0 <= loss < 1
tensor(True)
"""
def __init__(self, loss_fn):
super(LogSoftmaxWrapper, self).__init__()
self.loss_fn = loss_fn
self.criterion = torch.nn.KLDivLoss(reduction="sum")
def forward(self, outputs, targets, length=None):
"""
Arguments
---------
outputs : torch.Tensor
Network output tensor, of shape
[batch, 1, outdim].
targets : torch.Tensor
Target tensor, of shape [batch, 1].
Returns
-------
loss: torch.Tensor
Loss for current examples.
"""
outputs = outputs.squeeze(1)
targets = targets.squeeze(1)
targets = F.one_hot(targets.long(), outputs.shape[1]).float()
try:
predictions = self.loss_fn(outputs, targets)
except TypeError:
predictions = self.loss_fn(outputs)
predictions = F.log_softmax(predictions, dim=1)
loss = self.criterion(predictions, targets) / targets.sum()
return loss
def ctc_loss_kd(log_probs, targets, input_lens, blank_index, device):
"""Knowledge distillation for CTC loss.
Reference
---------
Distilling Knowledge from Ensembles of Acoustic Models for Joint CTC-Attention End-to-End Speech Recognition.
https://arxiv.org/abs/2005.09310
Arguments
---------
log_probs : torch.Tensor
Predicted tensor from student model, of shape [batch, time, chars].
targets : torch.Tensor
Predicted tensor from single teacher model, of shape [batch, time, chars].
input_lens : torch.Tensor
Length of each utterance.
blank_index : int
The location of the blank symbol among the character indexes.
device : str
Device for computing.
"""
scores, predictions = torch.max(targets, dim=-1)
pred_list = []
pred_len_list = []
for j in range(predictions.shape[0]):
# Getting current predictions
current_pred = predictions[j]
actual_size = (input_lens[j] * log_probs.shape[1]).round().int()
current_pred = current_pred[0:actual_size]
current_pred = filter_ctc_output(
list(current_pred.cpu().numpy()), blank_id=blank_index
)
current_pred_len = len(current_pred)
pred_list.append(current_pred)
pred_len_list.append(current_pred_len)
max_pred_len = max(pred_len_list)
for j in range(predictions.shape[0]):
diff = max_pred_len - pred_len_list[j]
for n in range(diff):
pred_list[j].append(0)
# generate soft label of teacher model
fake_lab = torch.from_numpy(np.array(pred_list))
fake_lab.to(device)
fake_lab = fake_lab.int()
fake_lab_lengths = torch.from_numpy(np.array(pred_len_list)).int()
fake_lab_lengths.to(device)
input_lens = (input_lens * log_probs.shape[1]).round().int()
log_probs = log_probs.transpose(0, 1)
return torch.nn.functional.ctc_loss(
log_probs,
fake_lab,
input_lens,
fake_lab_lengths,
blank_index,
zero_infinity=True,
)
def ce_kd(inp, target):
"""Simple version of distillation for cross-entropy loss.
Arguments
---------
inp : torch.Tensor
The probabilities from student model, of shape [batch_size * length, feature]
target : torch.Tensor
The probabilities from teacher model, of shape [batch_size * length, feature]
"""
return (-target * inp).sum(1)
def nll_loss_kd(
probabilities, targets, rel_lab_lengths,
):
"""Knowledge distillation for negative log-likelihood loss.
Reference
---------
Distilling Knowledge from Ensembles of Acoustic Models for Joint CTC-Attention End-to-End Speech Recognition.
https://arxiv.org/abs/2005.09310
Arguments
---------
probabilities : torch.Tensor
The predicted probabilities from the student model.
Format is [batch, frames, p]
targets : torch.Tensor
The target probabilities from the teacher model.
Format is [batch, frames, p]
rel_lab_lengths : torch.Tensor
Length of each utterance, if the frame-level loss is desired.
Example
-------
>>> probabilities = torch.tensor([[[0.8, 0.2], [0.2, 0.8]]])
>>> targets = torch.tensor([[[0.9, 0.1], [0.1, 0.9]]])
>>> rel_lab_lengths = torch.tensor([1.])
>>> nll_loss_kd(probabilities, targets, rel_lab_lengths)
tensor(-0.7400)
"""
# Getting the number of sentences in the minibatch
N_snt = probabilities.shape[0]
# Getting the maximum length of label sequence
max_len = probabilities.shape[1]
# Getting the label lengths
lab_lengths = torch.round(rel_lab_lengths * targets.shape[1]).int()
# Reshape to [batch_size * length, feature]
prob_curr = probabilities.reshape(N_snt * max_len, probabilities.shape[-1])
# Generating mask
mask = length_to_mask(
lab_lengths, max_len=max_len, dtype=torch.float, device=prob_curr.device
)
# Reshape to [batch_size * length, feature]
lab_curr = targets.reshape(N_snt * max_len, targets.shape[-1])
loss = ce_kd(prob_curr, lab_curr)
# Loss averaging
loss = torch.sum(loss.reshape(N_snt, max_len) * mask) / torch.sum(mask)
return loss
class ContrastiveLoss(nn.Module):
"""Contrastive loss as used in wav2vec2.
Reference
---------
wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations
https://arxiv.org/abs/2006.11477
Arguments
---------
logit_temp : torch.Float
A temperature to devide the logits.
"""
def __init__(self, logit_temp):
super().__init__()
self.logit_temp = logit_temp
def forward(self, x, y, negs):
"""
Arguments
----------
x : torch.Tensor
Encoded embeddings with shape (B, T, C).
y : torch.Tensor
Feature extractor target embeddings with shape (B, T, C).
negs : torch.Tensor
Negative embeddings from feature extractor with shape (N, B, T, C)
where N is number of negatives. Can be obtained with our sample_negatives
function (check in lobes/wav2vec2).
"""
neg_is_pos = (y == negs).all(-1)
y = y.unsqueeze(0)
target_and_negatives = torch.cat([y, negs], dim=0)
logits = torch.cosine_similarity(
x.float(), target_and_negatives.float(), dim=-1
).type_as(x)
if neg_is_pos.any():
logits[1:][neg_is_pos] = float("-inf")
# N, B, T -> T, B, N -> T*B, N
logits = logits.transpose(0, 2).reshape(-1, logits.size(0))
targets = torch.zeros(
(logits.size(0)), dtype=torch.long, device=logits.device
)
loss = F.cross_entropy(
logits / self.logit_temp, targets, reduction="sum"
)
accuracy = torch.sum(logits.argmax(-1) == 0) / (
logits.numel() / logits.size(-1)
)
return loss, accuracy
| 40,423 | 31.28754 | 117 | py |
speechbrain | speechbrain-main/speechbrain/nnet/CNN.py | """Library implementing convolutional neural networks.
Authors
* Mirco Ravanelli 2020
* Jianyuan Zhong 2020
* Cem Subakan 2021
* Davide Borra 2021
* Andreas Nautsch 2022
* Sarthak Yadav 2022
"""
import math
import torch
import logging
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torchaudio
from typing import Tuple
from speechbrain.processing.signal_processing import (
gabor_impulse_response,
gabor_impulse_response_legacy_complex,
)
logger = logging.getLogger(__name__)
class SincConv(nn.Module):
"""This function implements SincConv (SincNet).
M. Ravanelli, Y. Bengio, "Speaker Recognition from raw waveform with
SincNet", in Proc. of SLT 2018 (https://arxiv.org/abs/1808.00158)
Arguments
---------
input_shape : tuple
The shape of the input. Alternatively use ``in_channels``.
in_channels : int
The number of input channels. Alternatively use ``input_shape``.
out_channels : int
It is the number of output channels.
kernel_size: int
Kernel size of the convolutional filters.
stride : int
Stride factor of the convolutional filters. When the stride factor > 1,
a decimation in time is performed.
dilation : int
Dilation factor of the convolutional filters.
padding : str
(same, valid, causal). If "valid", no padding is performed.
If "same" and stride is 1, output shape is the same as the input shape.
"causal" results in causal (dilated) convolutions.
padding_mode : str
This flag specifies the type of padding. See torch.nn documentation
for more information.
groups : int
This option specifies the convolutional groups. See torch.nn
documentation for more information.
bias : bool
If True, the additive bias b is adopted.
sample_rate : int,
Sampling rate of the input signals. It is only used for sinc_conv.
min_low_hz : float
Lowest possible frequency (in Hz) for a filter. It is only used for
sinc_conv.
min_low_hz : float
Lowest possible value (in Hz) for a filter bandwidth.
Example
-------
>>> inp_tensor = torch.rand([10, 16000])
>>> conv = SincConv(input_shape=inp_tensor.shape, out_channels=25, kernel_size=11)
>>> out_tensor = conv(inp_tensor)
>>> out_tensor.shape
torch.Size([10, 16000, 25])
"""
def __init__(
self,
out_channels,
kernel_size,
input_shape=None,
in_channels=None,
stride=1,
dilation=1,
padding="same",
padding_mode="reflect",
sample_rate=16000,
min_low_hz=50,
min_band_hz=50,
):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
self.padding = padding
self.padding_mode = padding_mode
self.sample_rate = sample_rate
self.min_low_hz = min_low_hz
self.min_band_hz = min_band_hz
# input shape inference
if input_shape is None and self.in_channels is None:
raise ValueError("Must provide one of input_shape or in_channels")
if self.in_channels is None:
self.in_channels = self._check_input_shape(input_shape)
if self.out_channels % self.in_channels != 0:
raise ValueError(
"Number of output channels must be divisible by in_channels"
)
# Initialize Sinc filters
self._init_sinc_conv()
def forward(self, x):
"""Returns the output of the convolution.
Arguments
---------
x : torch.Tensor (batch, time, channel)
input to convolve. 2d or 4d tensors are expected.
"""
x = x.transpose(1, -1)
self.device = x.device
unsqueeze = x.ndim == 2
if unsqueeze:
x = x.unsqueeze(1)
if self.padding == "same":
x = self._manage_padding(
x, self.kernel_size, self.dilation, self.stride
)
elif self.padding == "causal":
num_pad = (self.kernel_size - 1) * self.dilation
x = F.pad(x, (num_pad, 0))
elif self.padding == "valid":
pass
else:
raise ValueError(
"Padding must be 'same', 'valid' or 'causal'. Got %s."
% (self.padding)
)
sinc_filters = self._get_sinc_filters()
wx = F.conv1d(
x,
sinc_filters,
stride=self.stride,
padding=0,
dilation=self.dilation,
groups=self.in_channels,
)
if unsqueeze:
wx = wx.squeeze(1)
wx = wx.transpose(1, -1)
return wx
def _check_input_shape(self, shape):
"""Checks the input shape and returns the number of input channels."""
if len(shape) == 2:
in_channels = 1
elif len(shape) == 3:
in_channels = shape[-1]
else:
raise ValueError(
"sincconv expects 2d or 3d inputs. Got " + str(len(shape))
)
# Kernel size must be odd
if self.kernel_size % 2 == 0:
raise ValueError(
"The field kernel size must be an odd number. Got %s."
% (self.kernel_size)
)
return in_channels
def _get_sinc_filters(self,):
"""This functions creates the sinc-filters to used for sinc-conv."""
# Computing the low frequencies of the filters
low = self.min_low_hz + torch.abs(self.low_hz_)
# Setting minimum band and minimum freq
high = torch.clamp(
low + self.min_band_hz + torch.abs(self.band_hz_),
self.min_low_hz,
self.sample_rate / 2,
)
band = (high - low)[:, 0]
# Passing from n_ to the corresponding f_times_t domain
self.n_ = self.n_.to(self.device)
self.window_ = self.window_.to(self.device)
f_times_t_low = torch.matmul(low, self.n_)
f_times_t_high = torch.matmul(high, self.n_)
# Left part of the filters.
band_pass_left = (
(torch.sin(f_times_t_high) - torch.sin(f_times_t_low))
/ (self.n_ / 2)
) * self.window_
# Central element of the filter
band_pass_center = 2 * band.view(-1, 1)
# Right part of the filter (sinc filters are symmetric)
band_pass_right = torch.flip(band_pass_left, dims=[1])
# Combining left, central, and right part of the filter
band_pass = torch.cat(
[band_pass_left, band_pass_center, band_pass_right], dim=1
)
# Amplitude normalization
band_pass = band_pass / (2 * band[:, None])
# Setting up the filter coefficients
filters = band_pass.view(self.out_channels, 1, self.kernel_size)
return filters
def _init_sinc_conv(self):
"""Initializes the parameters of the sinc_conv layer."""
# Initialize filterbanks such that they are equally spaced in Mel scale
high_hz = self.sample_rate / 2 - (self.min_low_hz + self.min_band_hz)
mel = torch.linspace(
self._to_mel(self.min_low_hz),
self._to_mel(high_hz),
self.out_channels + 1,
)
hz = self._to_hz(mel)
# Filter lower frequency and bands
self.low_hz_ = hz[:-1].unsqueeze(1)
self.band_hz_ = (hz[1:] - hz[:-1]).unsqueeze(1)
# Maiking freq and bands learnable
self.low_hz_ = nn.Parameter(self.low_hz_)
self.band_hz_ = nn.Parameter(self.band_hz_)
# Hamming window
n_lin = torch.linspace(
0, (self.kernel_size / 2) - 1, steps=int((self.kernel_size / 2))
)
self.window_ = 0.54 - 0.46 * torch.cos(
2 * math.pi * n_lin / self.kernel_size
)
# Time axis (only half is needed due to symmetry)
n = (self.kernel_size - 1) / 2.0
self.n_ = (
2 * math.pi * torch.arange(-n, 0).view(1, -1) / self.sample_rate
)
def _to_mel(self, hz):
"""Converts frequency in Hz to the mel scale."""
return 2595 * np.log10(1 + hz / 700)
def _to_hz(self, mel):
"""Converts frequency in the mel scale to Hz."""
return 700 * (10 ** (mel / 2595) - 1)
def _manage_padding(
self, x, kernel_size: int, dilation: int, stride: int,
):
"""This function performs zero-padding on the time axis
such that their lengths is unchanged after the convolution.
Arguments
---------
x : torch.Tensor
Input tensor.
kernel_size : int
Size of kernel.
dilation : int
Dilation used.
stride : int
Stride.
"""
# Detecting input shape
L_in = self.in_channels
# Time padding
padding = get_padding_elem(L_in, stride, kernel_size, dilation)
# Applying padding
x = F.pad(x, padding, mode=self.padding_mode)
return x
class Conv1d(nn.Module):
"""This function implements 1d convolution.
Arguments
---------
out_channels : int
It is the number of output channels.
kernel_size : int
Kernel size of the convolutional filters.
input_shape : tuple
The shape of the input. Alternatively use ``in_channels``.
in_channels : int
The number of input channels. Alternatively use ``input_shape``.
stride : int
Stride factor of the convolutional filters. When the stride factor > 1,
a decimation in time is performed.
dilation : int
Dilation factor of the convolutional filters.
padding : str
(same, valid, causal). If "valid", no padding is performed.
If "same" and stride is 1, output shape is the same as the input shape.
"causal" results in causal (dilated) convolutions.
groups: int
Number of blocked connections from input channels to output channels.
padding_mode : str
This flag specifies the type of padding. See torch.nn documentation
for more information.
skip_transpose : bool
If False, uses batch x time x channel convention of speechbrain.
If True, uses batch x channel x time convention.
weight_norm : bool
If True, use weight normalization,
to be removed with self.remove_weight_norm() at inference
default_padding: str or int
This sets the default padding mode that will be used by the pytorch Conv1d backend.
Example
-------
>>> inp_tensor = torch.rand([10, 40, 16])
>>> cnn_1d = Conv1d(
... input_shape=inp_tensor.shape, out_channels=8, kernel_size=5
... )
>>> out_tensor = cnn_1d(inp_tensor)
>>> out_tensor.shape
torch.Size([10, 40, 8])
"""
def __init__(
self,
out_channels,
kernel_size,
input_shape=None,
in_channels=None,
stride=1,
dilation=1,
padding="same",
groups=1,
bias=True,
padding_mode="reflect",
skip_transpose=False,
weight_norm=False,
conv_init=None,
default_padding=0,
):
super().__init__()
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
self.padding = padding
self.padding_mode = padding_mode
self.unsqueeze = False
self.skip_transpose = skip_transpose
if input_shape is None and in_channels is None:
raise ValueError("Must provide one of input_shape or in_channels")
if in_channels is None:
in_channels = self._check_input_shape(input_shape)
self.in_channels = in_channels
self.conv = nn.Conv1d(
in_channels,
out_channels,
self.kernel_size,
stride=self.stride,
dilation=self.dilation,
padding=default_padding,
groups=groups,
bias=bias,
)
if conv_init == "kaiming":
nn.init.kaiming_normal_(self.conv.weight)
if weight_norm:
self.conv = nn.utils.weight_norm(self.conv)
def forward(self, x):
"""Returns the output of the convolution.
Arguments
---------
x : torch.Tensor (batch, time, channel)
input to convolve. 2d or 4d tensors are expected.
"""
if not self.skip_transpose:
x = x.transpose(1, -1)
if self.unsqueeze:
x = x.unsqueeze(1)
if self.padding == "same":
x = self._manage_padding(
x, self.kernel_size, self.dilation, self.stride
)
elif self.padding == "causal":
num_pad = (self.kernel_size - 1) * self.dilation
x = F.pad(x, (num_pad, 0))
elif self.padding == "valid":
pass
else:
raise ValueError(
"Padding must be 'same', 'valid' or 'causal'. Got "
+ self.padding
)
wx = self.conv(x)
if self.unsqueeze:
wx = wx.squeeze(1)
if not self.skip_transpose:
wx = wx.transpose(1, -1)
return wx
def _manage_padding(
self, x, kernel_size: int, dilation: int, stride: int,
):
"""This function performs zero-padding on the time axis
such that their lengths is unchanged after the convolution.
Arguments
---------
x : torch.Tensor
Input tensor.
kernel_size : int
Size of kernel.
dilation : int
Dilation used.
stride : int
Stride.
"""
# Detecting input shape
L_in = self.in_channels
# Time padding
padding = get_padding_elem(L_in, stride, kernel_size, dilation)
# Applying padding
x = F.pad(x, padding, mode=self.padding_mode)
return x
def _check_input_shape(self, shape):
"""Checks the input shape and returns the number of input channels."""
if len(shape) == 2:
self.unsqueeze = True
in_channels = 1
elif self.skip_transpose:
in_channels = shape[1]
elif len(shape) == 3:
in_channels = shape[2]
else:
raise ValueError(
"conv1d expects 2d, 3d inputs. Got " + str(len(shape))
)
# Kernel size must be odd
if not self.padding == "valid" and self.kernel_size % 2 == 0:
raise ValueError(
"The field kernel size must be an odd number. Got %s."
% (self.kernel_size)
)
return in_channels
def remove_weight_norm(self):
"""Removes weight normalization at inference if used during training."""
self.conv = nn.utils.remove_weight_norm(self.conv)
class Conv2d(nn.Module):
"""This function implements 2d convolution.
Arguments
---------
out_channels : int
It is the number of output channels.
kernel_size : tuple
Kernel size of the 2d convolutional filters over time and frequency
axis.
input_shape : tuple
The shape of the input. Alternatively use ``in_channels``.
in_channels : int
The number of input channels. Alternatively use ``input_shape``.
stride: int
Stride factor of the 2d convolutional filters over time and frequency
axis.
dilation : int
Dilation factor of the 2d convolutional filters over time and
frequency axis.
padding : str
(same, valid, causal).
If "valid", no padding is performed.
If "same" and stride is 1, output shape is same as input shape.
If "causal" then proper padding is inserted to simulate causal convolution on the first spatial dimension.
(spatial dim 1 is dim 3 for both skip_transpose=False and skip_transpose=True)
padding_mode : str
This flag specifies the type of padding. See torch.nn documentation
for more information.
groups : int
This option specifies the convolutional groups. See torch.nn
documentation for more information.
bias : bool
If True, the additive bias b is adopted.
skip_transpose : bool
If False, uses batch x spatial.dim2 x spatial.dim1 x channel convention of speechbrain.
If True, uses batch x channel x spatial.dim1 x spatial.dim2 convention.
weight_norm : bool
If True, use weight normalization,
to be removed with self.remove_weight_norm() at inference
Example
-------
>>> inp_tensor = torch.rand([10, 40, 16, 8])
>>> cnn_2d = Conv2d(
... input_shape=inp_tensor.shape, out_channels=5, kernel_size=(7, 3)
... )
>>> out_tensor = cnn_2d(inp_tensor)
>>> out_tensor.shape
torch.Size([10, 40, 16, 5])
"""
def __init__(
self,
out_channels,
kernel_size,
input_shape=None,
in_channels=None,
stride=(1, 1),
dilation=(1, 1),
padding="same",
groups=1,
bias=True,
padding_mode="reflect",
skip_transpose=False,
weight_norm=False,
conv_init=None,
):
super().__init__()
# handle the case if some parameter is int
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if isinstance(stride, int):
stride = (stride, stride)
if isinstance(dilation, int):
dilation = (dilation, dilation)
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
self.padding = padding
self.padding_mode = padding_mode
self.unsqueeze = False
self.skip_transpose = skip_transpose
if input_shape is None and in_channels is None:
raise ValueError("Must provide one of input_shape or in_channels")
if in_channels is None:
in_channels = self._check_input(input_shape)
self.in_channels = in_channels
# Weights are initialized following pytorch approach
self.conv = nn.Conv2d(
self.in_channels,
out_channels,
self.kernel_size,
stride=self.stride,
padding=0,
dilation=self.dilation,
groups=groups,
bias=bias,
)
if conv_init == "kaiming":
nn.init.kaiming_normal_(self.conv.weight)
if weight_norm:
self.conv = nn.utils.weight_norm(self.conv)
def forward(self, x):
"""Returns the output of the convolution.
Arguments
---------
x : torch.Tensor (batch, time, channel)
input to convolve. 2d or 4d tensors are expected.
"""
if not self.skip_transpose:
x = x.transpose(1, -1)
if self.unsqueeze:
x = x.unsqueeze(1)
if self.padding == "same":
x = self._manage_padding(
x, self.kernel_size, self.dilation, self.stride
)
elif self.padding == "causal":
num_pad = (self.kernel_size[0] - 1) * self.dilation[1]
x = F.pad(x, (0, 0, num_pad, 0))
elif self.padding == "valid":
pass
else:
raise ValueError(
"Padding must be 'same','valid' or 'causal'. Got "
+ self.padding
)
wx = self.conv(x)
if self.unsqueeze:
wx = wx.squeeze(1)
if not self.skip_transpose:
wx = wx.transpose(1, -1)
return wx
def _manage_padding(
self,
x,
kernel_size: Tuple[int, int],
dilation: Tuple[int, int],
stride: Tuple[int, int],
):
"""This function performs zero-padding on the time and frequency axes
such that their lengths is unchanged after the convolution.
Arguments
---------
x : torch.Tensor
kernel_size : int
dilation : int
stride: int
"""
# Detecting input shape
L_in = self.in_channels
# Time padding
padding_time = get_padding_elem(
L_in, stride[-1], kernel_size[-1], dilation[-1]
)
padding_freq = get_padding_elem(
L_in, stride[-2], kernel_size[-2], dilation[-2]
)
padding = padding_time + padding_freq
# Applying padding
x = nn.functional.pad(x, padding, mode=self.padding_mode)
return x
def _check_input(self, shape):
"""Checks the input shape and returns the number of input channels."""
if len(shape) == 3:
self.unsqueeze = True
in_channels = 1
elif len(shape) == 4:
in_channels = shape[3]
else:
raise ValueError("Expected 3d or 4d inputs. Got " + len(shape))
# Kernel size must be odd
if not self.padding == "valid" and (
self.kernel_size[0] % 2 == 0 or self.kernel_size[1] % 2 == 0
):
raise ValueError(
"The field kernel size must be an odd number. Got %s."
% (self.kernel_size)
)
return in_channels
def remove_weight_norm(self):
"""Removes weight normalization at inference if used during training."""
self.conv = nn.utils.remove_weight_norm(self.conv)
class Conv2dWithConstraint(Conv2d):
"""This function implements 2d convolution with kernel max-norm constaint.
This corresponds to set an upper bound for the kernel norm.
Arguments
---------
out_channels : int
It is the number of output channels.
kernel_size : tuple
Kernel size of the 2d convolutional filters over time and frequency
axis.
input_shape : tuple
The shape of the input. Alternatively use ``in_channels``.
in_channels : int
The number of input channels. Alternatively use ``input_shape``.
stride: int
Stride factor of the 2d convolutional filters over time and frequency
axis.
dilation : int
Dilation factor of the 2d convolutional filters over time and
frequency axis.
padding : str
(same, valid). If "valid", no padding is performed.
If "same" and stride is 1, output shape is same as input shape.
padding_mode : str
This flag specifies the type of padding. See torch.nn documentation
for more information.
groups : int
This option specifies the convolutional groups. See torch.nn
documentation for more information.
bias : bool
If True, the additive bias b is adopted.
max_norm : float
kernel max-norm
Example
-------
>>> inp_tensor = torch.rand([10, 40, 16, 8])
>>> max_norm = 1
>>> cnn_2d_constrained = Conv2dWithConstraint(
... in_channels=inp_tensor.shape[-1], out_channels=5, kernel_size=(7, 3)
... )
>>> out_tensor = cnn_2d_constrained(inp_tensor)
>>> torch.any(torch.norm(cnn_2d_constrained.conv.weight.data, p=2, dim=0)>max_norm)
tensor(False)
"""
def __init__(self, *args, max_norm=1, **kwargs):
self.max_norm = max_norm
super(Conv2dWithConstraint, self).__init__(*args, **kwargs)
def forward(self, x):
"""Returns the output of the convolution.
Arguments
---------
x : torch.Tensor (batch, time, channel)
input to convolve. 2d or 4d tensors are expected.
"""
self.conv.weight.data = torch.renorm(
self.conv.weight.data, p=2, dim=0, maxnorm=self.max_norm
)
return super(Conv2dWithConstraint, self).forward(x)
class ConvTranspose1d(nn.Module):
"""This class implements 1d transposed convolution with speechbrain.
Transpose convolution is normally used to perform upsampling.
Arguments
---------
out_channels : int
It is the number of output channels.
kernel_size : int
Kernel size of the convolutional filters.
input_shape : tuple
The shape of the input. Alternatively use ``in_channels``.
in_channels : int
The number of input channels. Alternatively use ``input_shape``.
stride : int
Stride factor of the convolutional filters. When the stride factor > 1,
upsampling in time is performed.
dilation : int
Dilation factor of the convolutional filters.
padding : str or int
To have in output the target dimension, we suggest tuning the kernel
size and the padding properly. We also support the following function
to have some control over the padding and the corresponding ouput
dimensionality.
if "valid", no padding is applied
if "same", padding amount is inferred so that the output size is closest
to possible to input size. Note that for some kernel_size / stride combinations
it is not possible to obtain the exact same size, but we return the closest
possible size.
if "factor", padding amount is inferred so that the output size is closest
to inputsize*stride. Note that for some kernel_size / stride combinations
it is not possible to obtain the exact size, but we return the closest
possible size.
if an integer value is entered, a custom padding is used.
output_padding : int,
Additional size added to one side of the output shape
groups: int
Number of blocked connections from input channels to output channels.
Default: 1
bias: bool
If True, adds a learnable bias to the output
skip_transpose : bool
If False, uses batch x time x channel convention of speechbrain.
If True, uses batch x channel x time convention.
weight_norm : bool
If True, use weight normalization,
to be removed with self.remove_weight_norm() at inference
Example
-------
>>> from speechbrain.nnet.CNN import Conv1d, ConvTranspose1d
>>> inp_tensor = torch.rand([10, 12, 40]) #[batch, time, fea]
>>> convtranspose_1d = ConvTranspose1d(
... input_shape=inp_tensor.shape, out_channels=8, kernel_size=3, stride=2
... )
>>> out_tensor = convtranspose_1d(inp_tensor)
>>> out_tensor.shape
torch.Size([10, 25, 8])
>>> # Combination of Conv1d and ConvTranspose1d
>>> from speechbrain.nnet.CNN import Conv1d, ConvTranspose1d
>>> signal = torch.tensor([1,100])
>>> signal = torch.rand([1,100]) #[batch, time]
>>> conv1d = Conv1d(input_shape=signal.shape, out_channels=1, kernel_size=3, stride=2)
>>> conv_out = conv1d(signal)
>>> conv_t = ConvTranspose1d(input_shape=conv_out.shape, out_channels=1, kernel_size=3, stride=2, padding=1)
>>> signal_rec = conv_t(conv_out, output_size=[100])
>>> signal_rec.shape
torch.Size([1, 100])
>>> signal = torch.rand([1,115]) #[batch, time]
>>> conv_t = ConvTranspose1d(input_shape=signal.shape, out_channels=1, kernel_size=3, stride=2, padding='same')
>>> signal_rec = conv_t(signal)
>>> signal_rec.shape
torch.Size([1, 115])
>>> signal = torch.rand([1,115]) #[batch, time]
>>> conv_t = ConvTranspose1d(input_shape=signal.shape, out_channels=1, kernel_size=7, stride=2, padding='valid')
>>> signal_rec = conv_t(signal)
>>> signal_rec.shape
torch.Size([1, 235])
>>> signal = torch.rand([1,115]) #[batch, time]
>>> conv_t = ConvTranspose1d(input_shape=signal.shape, out_channels=1, kernel_size=7, stride=2, padding='factor')
>>> signal_rec = conv_t(signal)
>>> signal_rec.shape
torch.Size([1, 231])
>>> signal = torch.rand([1,115]) #[batch, time]
>>> conv_t = ConvTranspose1d(input_shape=signal.shape, out_channels=1, kernel_size=3, stride=2, padding=10)
>>> signal_rec = conv_t(signal)
>>> signal_rec.shape
torch.Size([1, 211])
"""
def __init__(
self,
out_channels,
kernel_size,
input_shape=None,
in_channels=None,
stride=1,
dilation=1,
padding=0,
output_padding=0,
groups=1,
bias=True,
skip_transpose=False,
weight_norm=False,
):
super().__init__()
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
self.padding = padding
self.unsqueeze = False
self.skip_transpose = skip_transpose
if input_shape is None and in_channels is None:
raise ValueError("Must provide one of input_shape or in_channels")
if in_channels is None:
in_channels = self._check_input_shape(input_shape)
if self.padding == "same":
L_in = input_shape[-1] if skip_transpose else input_shape[1]
padding_value = get_padding_elem_transposed(
L_in,
L_in,
stride=stride,
kernel_size=kernel_size,
dilation=dilation,
output_padding=output_padding,
)
elif self.padding == "factor":
L_in = input_shape[-1] if skip_transpose else input_shape[1]
padding_value = get_padding_elem_transposed(
L_in * stride,
L_in,
stride=stride,
kernel_size=kernel_size,
dilation=dilation,
output_padding=output_padding,
)
elif self.padding == "valid":
padding_value = 0
elif type(self.padding) is int:
padding_value = padding
else:
raise ValueError("Not supported padding type")
self.conv = nn.ConvTranspose1d(
in_channels,
out_channels,
self.kernel_size,
stride=self.stride,
dilation=self.dilation,
padding=padding_value,
groups=groups,
bias=bias,
)
if weight_norm:
self.conv = nn.utils.weight_norm(self.conv)
def forward(self, x, output_size=None):
"""Returns the output of the convolution.
Arguments
---------
x : torch.Tensor (batch, time, channel)
input to convolve. 2d or 4d tensors are expected.
"""
if not self.skip_transpose:
x = x.transpose(1, -1)
if self.unsqueeze:
x = x.unsqueeze(1)
wx = self.conv(x, output_size=output_size)
if self.unsqueeze:
wx = wx.squeeze(1)
if not self.skip_transpose:
wx = wx.transpose(1, -1)
return wx
def _check_input_shape(self, shape):
"""Checks the input shape and returns the number of input channels."""
if len(shape) == 2:
self.unsqueeze = True
in_channels = 1
elif self.skip_transpose:
in_channels = shape[1]
elif len(shape) == 3:
in_channels = shape[2]
else:
raise ValueError(
"conv1d expects 2d, 3d inputs. Got " + str(len(shape))
)
return in_channels
def remove_weight_norm(self):
"""Removes weight normalization at inference if used during training."""
self.conv = nn.utils.remove_weight_norm(self.conv)
class DepthwiseSeparableConv1d(nn.Module):
"""This class implements the depthwise separable 1d convolution.
First, a channel-wise convolution is applied to the input
Then, a point-wise convolution to project the input to output
Arguments
---------
out_channels : int
It is the number of output channels.
kernel_size : int
Kernel size of the convolutional filters.
input_shape : tuple
Expected shape of the input.
stride : int
Stride factor of the convolutional filters. When the stride factor > 1,
a decimation in time is performed.
dilation : int
Dilation factor of the convolutional filters.
padding : str
(same, valid, causal). If "valid", no padding is performed.
If "same" and stride is 1, output shape is the same as the input shape.
"causal" results in causal (dilated) convolutions.
padding_mode : str
This flag specifies the type of padding. See torch.nn documentation
for more information.
bias : bool
If True, the additive bias b is adopted.
Example
-------
>>> inp = torch.randn([8, 120, 40])
>>> conv = DepthwiseSeparableConv1d(256, 3, input_shape=inp.shape)
>>> out = conv(inp)
>>> out.shape
torch.Size([8, 120, 256])
"""
def __init__(
self,
out_channels,
kernel_size,
input_shape,
stride=1,
dilation=1,
padding="same",
bias=True,
):
super().__init__()
assert len(input_shape) == 3, "input must be a 3d tensor"
bz, time, chn = input_shape
self.depthwise = Conv1d(
chn,
kernel_size,
input_shape=input_shape,
stride=stride,
dilation=dilation,
padding=padding,
groups=chn,
bias=bias,
)
self.pointwise = Conv1d(
out_channels, kernel_size=1, input_shape=input_shape,
)
def forward(self, x):
"""Returns the output of the convolution.
Arguments
---------
x : torch.Tensor (batch, time, channel)
input to convolve. 3d tensors are expected.
"""
return self.pointwise(self.depthwise(x))
class DepthwiseSeparableConv2d(nn.Module):
"""This class implements the depthwise separable 2d convolution.
First, a channel-wise convolution is applied to the input
Then, a point-wise convolution to project the input to output
Arguments
---------
ut_channels : int
It is the number of output channels.
kernel_size : int
Kernel size of the convolutional filters.
stride : int
Stride factor of the convolutional filters. When the stride factor > 1,
a decimation in time is performed.
dilation : int
Dilation factor of the convolutional filters.
padding : str
(same, valid, causal). If "valid", no padding is performed.
If "same" and stride is 1, output shape is the same as the input shape.
"causal" results in causal (dilated) convolutions.
padding_mode : str
This flag specifies the type of padding. See torch.nn documentation
for more information.
bias : bool
If True, the additive bias b is adopted.
Example
-------
>>> inp = torch.randn([8, 120, 40, 1])
>>> conv = DepthwiseSeparableConv2d(256, (3, 3), input_shape=inp.shape)
>>> out = conv(inp)
>>> out.shape
torch.Size([8, 120, 40, 256])
"""
def __init__(
self,
out_channels,
kernel_size,
input_shape,
stride=(1, 1),
dilation=(1, 1),
padding="same",
bias=True,
):
super().__init__()
# handle the case if some parameter is int
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if isinstance(stride, int):
stride = (stride, stride)
if isinstance(dilation, int):
dilation = (dilation, dilation)
assert len(input_shape) in {3, 4}, "input must be a 3d or 4d tensor"
self.unsqueeze = len(input_shape) == 3
bz, time, chn1, chn2 = input_shape
self.depthwise = Conv2d(
chn2,
kernel_size,
input_shape=input_shape,
stride=stride,
dilation=dilation,
padding=padding,
groups=chn2,
bias=bias,
)
self.pointwise = Conv2d(
out_channels, kernel_size=(1, 1), input_shape=input_shape,
)
def forward(self, x):
"""Returns the output of the convolution.
Arguments
---------
x : torch.Tensor (batch, time, channel)
input to convolve. 3d tensors are expected.
"""
if self.unsqueeze:
x = x.unsqueeze(1)
out = self.pointwise(self.depthwise(x))
if self.unsqueeze:
out = out.squeeze(1)
return out
class GaborConv1d(nn.Module):
"""
This class implements 1D Gabor Convolutions from
Neil Zeghidour, Olivier Teboul, F{\'e}lix de Chaumont Quitry & Marco Tagliasacchi, "LEAF: A LEARNABLE FRONTEND
FOR AUDIO CLASSIFICATION", in Proc. of ICLR 2021 (https://arxiv.org/abs/2101.08596)
Arguments
---------
out_channels : int
It is the number of output channels.
kernel_size: int
Kernel size of the convolutional filters.
stride : int
Stride factor of the convolutional filters. When the stride factor > 1,
a decimation in time is performed.
padding : str
(same, valid). If "valid", no padding is performed.
If "same" and stride is 1, output shape is the same as the input shape.
padding_mode : str
This flag specifies the type of padding. See torch.nn documentation
for more information.
sample_rate : int,
Sampling rate of the input signals. It is only used for sinc_conv.
min_freq : float
Lowest possible frequency (in Hz) for a filter
max_freq : float
Highest possible frequency (in Hz) for a filter
n_fft: int
number of FFT bins for initialization
normalize_energy: bool
whether to normalize energy at initialization. Default is False
bias : bool
If True, the additive bias b is adopted.
sort_filters: bool
whether to sort filters by center frequencies. Default is False
use_legacy_complex: bool
If False, torch.complex64 data type is used for gabor impulse responses
If True, computation is performed on two real-valued tensors
skip_transpose: bool
If False, uses batch x time x channel convention of speechbrain.
If True, uses batch x channel x time convention.
Example
-------
>>> inp_tensor = torch.rand([10, 8000])
>>> # 401 corresponds to a window of 25 ms at 16000 kHz
>>> gabor_conv = GaborConv1d(
... 40, kernel_size=401, stride=1, in_channels=1
... )
>>> #
>>> out_tensor = gabor_conv(inp_tensor)
>>> out_tensor.shape
torch.Size([10, 8000, 40])
"""
def __init__(
self,
out_channels,
kernel_size,
stride,
input_shape=None,
in_channels=None,
padding="same",
padding_mode="constant",
sample_rate=16000,
min_freq=60.0,
max_freq=None,
n_fft=512,
normalize_energy=False,
bias=False,
sort_filters=False,
use_legacy_complex=False,
skip_transpose=False,
):
super(GaborConv1d, self).__init__()
self.filters = out_channels // 2
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.padding_mode = padding_mode
self.sort_filters = sort_filters
self.sample_rate = sample_rate
self.min_freq = min_freq
if max_freq is None:
max_freq = sample_rate / 2
self.max_freq = max_freq
self.n_fft = n_fft
self.normalize_energy = normalize_energy
self.use_legacy_complex = use_legacy_complex
self.skip_transpose = skip_transpose
if input_shape is None and in_channels is None:
raise ValueError("Must provide one of input_shape or in_channels")
if in_channels is None:
in_channels = self._check_input_shape(input_shape)
self.kernel = nn.Parameter(self._initialize_kernel())
if bias:
self.bias = torch.nn.Parameter(torch.ones(self.filters * 2,))
else:
self.bias = None
def forward(self, x):
"""Returns the output of the Gabor convolution.
Arguments
---------
x : torch.Tensor (batch, time, channel)
input to convolve.
"""
if not self.skip_transpose:
x = x.transpose(1, -1)
unsqueeze = x.ndim == 2
if unsqueeze:
x = x.unsqueeze(1)
kernel = self._gabor_constraint(self.kernel)
if self.sort_filters:
idxs = torch.argsort(kernel[:, 0])
kernel = kernel[idxs, :]
filters = self._gabor_filters(kernel)
if not self.use_legacy_complex:
temp = torch.view_as_real(filters)
real_filters = temp[:, :, 0]
img_filters = temp[:, :, 1]
else:
real_filters = filters[:, :, 0]
img_filters = filters[:, :, 1]
stacked_filters = torch.cat(
[real_filters.unsqueeze(1), img_filters.unsqueeze(1)], dim=1
)
stacked_filters = torch.reshape(
stacked_filters, (2 * self.filters, self.kernel_size)
)
stacked_filters = stacked_filters.unsqueeze(1)
if self.padding == "same":
x = self._manage_padding(x, self.kernel_size)
elif self.padding == "valid":
pass
else:
raise ValueError(
"Padding must be 'same' or 'valid'. Got " + self.padding
)
output = F.conv1d(
x, stacked_filters, bias=self.bias, stride=self.stride, padding=0
)
if not self.skip_transpose:
output = output.transpose(1, -1)
return output
def _gabor_constraint(self, kernel_data):
mu_lower = 0.0
mu_upper = math.pi
sigma_lower = (
4
* torch.sqrt(
2.0 * torch.log(torch.tensor(2.0, device=kernel_data.device))
)
/ math.pi
)
sigma_upper = (
self.kernel_size
* torch.sqrt(
2.0 * torch.log(torch.tensor(2.0, device=kernel_data.device))
)
/ math.pi
)
clipped_mu = torch.clamp(
kernel_data[:, 0], mu_lower, mu_upper
).unsqueeze(1)
clipped_sigma = torch.clamp(
kernel_data[:, 1], sigma_lower, sigma_upper
).unsqueeze(1)
return torch.cat([clipped_mu, clipped_sigma], dim=-1)
def _gabor_filters(self, kernel):
t = torch.arange(
-(self.kernel_size // 2),
(self.kernel_size + 1) // 2,
dtype=kernel.dtype,
device=kernel.device,
)
if not self.use_legacy_complex:
return gabor_impulse_response(
t, center=kernel[:, 0], fwhm=kernel[:, 1]
)
else:
return gabor_impulse_response_legacy_complex(
t, center=kernel[:, 0], fwhm=kernel[:, 1]
)
def _manage_padding(self, x, kernel_size):
# this is the logic that gives correct shape that complies
# with the original implementation at https://github.com/google-research/leaf-audio
def get_padding_value(kernel_size):
"""Gets the number of elements to pad."""
kernel_sizes = (kernel_size,)
from functools import reduce
from operator import __add__
conv_padding = reduce(
__add__,
[
(k // 2 + (k - 2 * (k // 2)) - 1, k // 2)
for k in kernel_sizes[::-1]
],
)
return conv_padding
pad_value = get_padding_value(kernel_size)
x = F.pad(x, pad_value, mode=self.padding_mode, value=0)
return x
def _mel_filters(self):
def _mel_filters_areas(filters):
peaks, _ = torch.max(filters, dim=1, keepdim=True)
return (
peaks
* (torch.sum((filters > 0).float(), dim=1, keepdim=True) + 2)
* np.pi
/ self.n_fft
)
mel_filters = torchaudio.functional.melscale_fbanks(
n_freqs=self.n_fft // 2 + 1,
f_min=self.min_freq,
f_max=self.max_freq,
n_mels=self.filters,
sample_rate=self.sample_rate,
)
mel_filters = mel_filters.transpose(1, 0)
if self.normalize_energy:
mel_filters = mel_filters / _mel_filters_areas(mel_filters)
return mel_filters
def _gabor_params_from_mels(self):
coeff = torch.sqrt(2.0 * torch.log(torch.tensor(2.0))) * self.n_fft
sqrt_filters = torch.sqrt(self._mel_filters())
center_frequencies = torch.argmax(sqrt_filters, dim=1)
peaks, _ = torch.max(sqrt_filters, dim=1, keepdim=True)
half_magnitudes = peaks / 2.0
fwhms = torch.sum((sqrt_filters >= half_magnitudes).float(), dim=1)
output = torch.cat(
[
(center_frequencies * 2 * np.pi / self.n_fft).unsqueeze(1),
(coeff / (np.pi * fwhms)).unsqueeze(1),
],
dim=-1,
)
return output
def _initialize_kernel(self):
return self._gabor_params_from_mels()
def _check_input_shape(self, shape):
"""Checks the input shape and returns the number of input channels."""
if len(shape) == 2:
in_channels = 1
elif len(shape) == 3:
in_channels = 1
else:
raise ValueError(
"GaborConv1d expects 2d or 3d inputs. Got " + str(len(shape))
)
# Kernel size must be odd
if self.kernel_size % 2 == 0:
raise ValueError(
"The field kernel size must be an odd number. Got %s."
% (self.kernel_size)
)
return in_channels
def get_padding_elem(L_in: int, stride: int, kernel_size: int, dilation: int):
"""This function computes the number of elements to add for zero-padding.
Arguments
---------
L_in : int
stride: int
kernel_size : int
dilation : int
"""
if stride > 1:
padding = [math.floor(kernel_size / 2), math.floor(kernel_size / 2)]
else:
L_out = (
math.floor((L_in - dilation * (kernel_size - 1) - 1) / stride) + 1
)
padding = [
math.floor((L_in - L_out) / 2),
math.floor((L_in - L_out) / 2),
]
return padding
def get_padding_elem_transposed(
L_out: int,
L_in: int,
stride: int,
kernel_size: int,
dilation: int,
output_padding: int,
):
"""This function computes the required padding size for transposed convolution
Arguments
---------
L_out : int
L_in : int
stride: int
kernel_size : int
dilation : int
output_padding : int
"""
padding = -0.5 * (
L_out
- (L_in - 1) * stride
- dilation * (kernel_size - 1)
- output_padding
- 1
)
return int(padding)
| 47,329 | 30.261559 | 117 | py |
speechbrain | speechbrain-main/speechbrain/nnet/schedulers.py | """
Schedulers for updating hyperparameters (such as learning rate).
Authors
* Mirco Ravanelli 2020
* Peter Plantinga 2020
* Loren Lugosch 2020
"""
import math
import torch
import logging
from speechbrain.utils import checkpoints
logger = logging.getLogger(__name__)
def update_learning_rate(optimizer, new_lr, param_group=None):
"""Change the learning rate value within an optimizer.
Arguments
---------
optimizer : torch.optim object
Updates the learning rate for this optimizer.
new_lr : float
The new value to use for the learning rate.
param_group : list of int
The param group indices to update. If not provided, all groups updated.
Example
-------
>>> from torch.optim import SGD
>>> from speechbrain.nnet.linear import Linear
>>> model = Linear(n_neurons=10, input_size=10)
>>> optimizer = SGD(model.parameters(), lr=0.1)
>>> update_learning_rate(optimizer, 0.2)
>>> optimizer.param_groups[0]["lr"]
0.2
"""
# Iterate all groups if none is provided
if param_group is None:
groups = range(len(optimizer.param_groups))
else:
groups = param_group
for i in groups:
old_lr = optimizer.param_groups[i]["lr"]
# Change learning rate if new value is different from old.
if new_lr != old_lr:
optimizer.param_groups[i]["lr"] = new_lr
optimizer.param_groups[i]["prev_lr"] = old_lr
logger.info("Changing lr from %.2g to %.2g" % (old_lr, new_lr))
@checkpoints.register_checkpoint_hooks
class NewBobScheduler:
"""Scheduler with new-bob technique, used for LR annealing.
The learning rate is annealed based on the validation performance.
In particular: if (past_loss-current_loss)/past_loss< impr_threshold:
lr=lr * annealing_factor.
Arguments
---------
initial_value : float
The initial hyperparameter value.
annealing_factor : float
It is annealing factor used in new_bob strategy.
improvement_threshold : float
It is the improvement rate between losses used to perform learning
annealing in new_bob strategy.
patient : int
When the annealing condition is violated patient times,
the learning rate is finally reduced.
Example
-------
>>> scheduler = NewBobScheduler(initial_value=1.0)
>>> scheduler(metric_value=10.0)
(1.0, 1.0)
>>> scheduler(metric_value=2.0)
(1.0, 1.0)
>>> scheduler(metric_value=2.5)
(1.0, 0.5)
"""
def __init__(
self,
initial_value,
annealing_factor=0.5,
improvement_threshold=0.0025,
patient=0,
):
self.hyperparam_value = initial_value
self.annealing_factor = annealing_factor
self.improvement_threshold = improvement_threshold
self.patient = patient
self.metric_values = []
self.current_patient = self.patient
def __call__(self, metric_value):
"""Returns the current and new value for the hyperparameter.
Arguments
---------
metric_value : int
A number for determining whether to change the hyperparameter value.
"""
old_value = new_value = self.hyperparam_value
if len(self.metric_values) > 0:
prev_metric = self.metric_values[-1]
# Update value if improvement too small and patience is 0
if prev_metric == 0: # Prevent division by zero
improvement = 0
else:
improvement = (prev_metric - metric_value) / prev_metric
if improvement < self.improvement_threshold:
if self.current_patient == 0:
new_value *= self.annealing_factor
self.current_patient = self.patient
else:
self.current_patient -= 1
# Store relevant info
self.metric_values.append(metric_value)
self.hyperparam_value = new_value
return old_value, new_value
@checkpoints.mark_as_saver
def save(self, path):
"""Saves the current metrics on the specified path."""
data = {
"hyperparam_value": self.hyperparam_value,
"metric_values": self.metric_values,
"current_patient": self.current_patient,
}
torch.save(data, path)
@checkpoints.mark_as_loader
def load(self, path, end_of_epoch=False, device=None):
"""Loads the needed information."""
del end_of_epoch # Unused in this class
del device # Unused in here
data = torch.load(path)
self.hyperparam_value = data["hyperparam_value"]
self.metric_values = data["metric_values"]
self.current_patient = data["current_patient"]
class LinearScheduler:
"""Scheduler with linear annealing technique.
The learning rate linearly decays over the specified number of epochs.
Arguments
---------
initial_value : float
The value upon initialization.
final_value : float
The value used when the epoch count reaches ``epoch_count - 1``.
epoch_count : int
Number of epochs.
Example
-------
>>> scheduler = LinearScheduler(1.0, 0.0, 4)
>>> scheduler(current_epoch=1)
(1.0, 0.666...)
>>> scheduler(current_epoch=2)
(0.666..., 0.333...)
>>> scheduler(current_epoch=3)
(0.333..., 0.0)
>>> scheduler(current_epoch=4)
(0.0, 0.0)
"""
def __init__(self, initial_value, final_value, epoch_count):
self.value_at_epoch = torch.linspace(
initial_value, final_value, steps=epoch_count
).tolist()
def __call__(self, current_epoch):
"""Returns the current and new value for the hyperparameter.
Arguments
---------
current_epoch : int
Number of times the dataset has been iterated.
"""
old_index = max(0, current_epoch - 1)
index = min(current_epoch, len(self.value_at_epoch) - 1)
return self.value_at_epoch[old_index], self.value_at_epoch[index]
@checkpoints.register_checkpoint_hooks
class LinearWarmupScheduler:
"""Create a schedule with a learning rate that decreases linearly
from the initial lr set in the optimizer to 0, after
a warmup period during which it increases linearly
from 0 to the initial lr set in the optimizer.
* Ge Li 2022
Arguments
---------
initial_value : float
The value upon initialization (lr0).
num_warmup_steps : int
Number of warmup steps. The learning rate reaches lr0 at
``num_warmup_steps + 1`` step.
num_training_steps: int
The total number of training steps.
Example
-------
>>> scheduler = LinearWarmupScheduler(1.0, 2, 4)
>>> scheduler.get_next_value()
0.0
>>> scheduler.get_next_value()
0.5
>>> scheduler.get_next_value()
1.0
>>> scheduler.get_next_value()
0.5
>>> scheduler.get_next_value()
0.0
"""
def __init__(self, initial_value, num_warmup_steps, num_training_steps):
self.lr0 = initial_value
self.num_warmup_steps = num_warmup_steps
self.num_training_steps = num_training_steps
self.current_step = 0
def calculate_lr(self, current_step):
"""Returns the current and new value for the hyperparameter.
Arguments
---------
current_step : int
Number of steps the model has been updated.
"""
if current_step < self.num_warmup_steps:
return (
float(current_step)
/ float(max(1, self.num_warmup_steps))
* self.lr0
)
return self.lr0 * max(
0.0,
float(self.num_training_steps - current_step)
/ float(max(1, self.num_training_steps - self.num_warmup_steps)),
)
def get_next_value(self):
"""Returns the next learning rate value for the hyperparameter.
"""
new_value = self.calculate_lr(self.current_step)
self.current_step += 1
return new_value
@checkpoints.mark_as_saver
def save(self, path):
"""Saves the current metrics on the specified path."""
data = {
"initial_value": self.lr0,
"num_warmup_steps": self.num_warmup_steps,
"num_training_steps": self.num_training_steps,
"current_step": self.current_step,
}
torch.save(data, path)
@checkpoints.mark_as_loader
def load(self, path, end_of_epoch=False, device=None):
"""Loads the needed information."""
del end_of_epoch # Unused in this class
del device # Unused in here
data = torch.load(path)
self.lr0 = data["initial_value"]
self.num_warmup_steps = data["num_warmup_steps"]
self.num_training_steps = data["num_training_steps"]
self.current_step = data["current_step"]
class StepScheduler:
"""Learning rate scheduler with step annealing technique.
The hyperparameter's value decays over the epochs with the
selected ``epoch_decay`` factor.
``value = init_value * decay_factor ^ floor((1 + epoch) / decay_drop)``
Arguments
---------
initial_value : float
Initial value for the hyperparameter being updated.
decay_factor : float
Factor multiplied with the initial_value
decay_drop : float
Annealing factor (the decay of the hyperparameter value is faster
with higher ``decay_drop`` values).
half_life: int
A convenience parameter to set decay_factor such that the parameter
will drop to half its value at the specified epoch. May not
be used together with decay_factor or decay_drop
Example
-------
>>> scheduler = StepScheduler(initial_value=1.0)
>>> scheduler(current_epoch=1)
(1.0, 0.5)
>>> scheduler(current_epoch=2)
(0.5, 0.5)
>>> scheduler(current_epoch=3)
(0.5, 0.25)
"""
DEFAULT_DECAY_FACTOR = 0.5
DEFAULT_DECAY_DROP = 2
def __init__(
self, initial_value, decay_factor=None, decay_drop=None, half_life=None
):
self.initial_value = initial_value
if half_life:
if decay_factor or decay_drop:
raise ValueError(
"half_life cannot be used together with decay_factor and decay_drop"
)
self.decay_factor = self._compute_half_life_decay_factor(half_life)
self.decay_drop = 1.0
else:
self.decay_factor = decay_factor or self.DEFAULT_DECAY_FACTOR
self.decay_drop = decay_drop or self.DEFAULT_DECAY_DROP
def _compute_half_life_decay_factor(self, half_life):
return math.exp(-math.log(2) / half_life)
def __call__(self, current_epoch):
"""Returns current and new hyperparameter value.
Arguments
---------
current_epoch : int
Number of times the dataset has been iterated.
"""
current_value = self._compute_value(current_epoch - 1)
next_value = self._compute_value(current_epoch)
return current_value, next_value
def _compute_value(self, current_epoch):
return self.initial_value * math.pow(
self.decay_factor,
math.floor((1 + current_epoch) / self.decay_drop),
)
@checkpoints.register_checkpoint_hooks
class NoamScheduler:
"""The is an implementation of the transformer's learning rate scheduler with warmup.
Reference: https://arxiv.org/abs/1706.03762
Note: this scheduler anneals the lr at each update of the model's weight,
and n_steps must be saved for restarting.
Arguments
---------
lr_initial : float
Initial learning rate (i.e. the lr used at epoch 0).
n_warmup_steps : int
numer of warm-up steps
model_size : int
size of transformer embed_dim. It is used to scale the maximum learning rate value reached
by the scheduler. It is divided by model_size ** (0.5).
If not specified the maximum learning rate value is instead multiplied by warmup_steps ** (0.5).
Example
-------
>>> from speechbrain.nnet.linear import Linear
>>> inp_tensor = torch.rand([1,660,3])
>>> model = Linear(input_size=3, n_neurons=4)
>>> optim = torch.optim.Adam(model.parameters(), lr=1)
>>> output = model(inp_tensor)
>>> scheduler =NoamScheduler(optim.param_groups[0]["lr"], 3)
>>> curr_lr,next_lr=scheduler(optim)
>>> optim.param_groups[0]["lr"]
0.3333333333333333
>>> curr_lr,next_lr=scheduler(optim)
>>> optim.param_groups[0]["lr"]
0.6666666666666666
>>> curr_lr,next_lr=scheduler(optim)
>>> optim.param_groups[0]["lr"]
0.9999999999999999
"""
def __init__(self, lr_initial, n_warmup_steps, model_size=None):
self.lr_initial = lr_initial
self.n_warmup_steps = n_warmup_steps
self.current_lr = lr_initial
self.losses = []
self.n_steps = 0
self.normalize = n_warmup_steps ** 0.5
if model_size is not None:
self.normalize = model_size ** (-0.5)
def __call__(self, opt):
"""
Arguments
---------
opt : optimizer
The optimizer to update using this scheduler.
Returns
-------
current_lr : float
The learning rate before the update.
lr : float
The learning rate after the update.
"""
self.n_steps += 1
current_lr = opt.param_groups[0]["lr"]
lr = self.lr_initial * self._get_lr_scale()
# Changing the learning rate within the optimizer
for param_group in opt.param_groups:
param_group["lr"] = lr
self.current_lr = current_lr
return current_lr, lr
def _get_lr_scale(self):
n_steps, n_warmup_steps = self.n_steps, self.n_warmup_steps
return self.normalize * min(
n_steps ** (-0.5), n_steps * n_warmup_steps ** (-1.5)
)
@checkpoints.mark_as_saver
def save(self, path):
"""Saves the current metrics on the specified path."""
data = {"losses": self.losses, "n_steps": self.n_steps}
torch.save(data, path)
@checkpoints.mark_as_loader
def load(self, path, end_of_epoch=False, device=None):
"""Loads the needed information."""
del end_of_epoch # Unused in this class
del device
data = torch.load(path)
self.losses = data["losses"]
self.n_steps = data["n_steps"]
@checkpoints.register_checkpoint_hooks
class CyclicCosineScheduler:
"""The is an implementation of the Cyclic-Cosine learning rate scheduler with warmup.
Reference: https://openreview.net/pdf?id=BJYwwY9ll
Note: this scheduler anneals the lr at each update of the model's weight,
and n_steps must be saved for restarting.
Arguments
---------
lr_initial : float
Initial learning rate (i.e. the lr used at epoch 0).
n_warmup_steps : int
Number of warm up steps.
total_steps : int
Total number of updating steps.
Example
-------
>>> from speechbrain.nnet.linear import Linear
>>> inp_tensor = torch.rand([1,660,3])
>>> model = Linear(input_size=3, n_neurons=4)
>>> optim = torch.optim.Adam(model.parameters(), lr=1)
>>> output = model(inp_tensor)
>>> scheduler =CyclicCosineScheduler(3, optim.param_groups[0]["lr"])
>>> curr_lr,next_lr=scheduler(optim)
>>> optim.param_groups[0]["lr"]
0.9999999990130395
>>> curr_lr,next_lr=scheduler(optim)
>>> optim.param_groups[0]["lr"]
0.9999999997532598
>>> curr_lr,next_lr=scheduler(optim)
>>> optim.param_groups[0]["lr"]
1.0
"""
def __init__(self, n_warmup_steps, lr_initial=None, total_steps=100000):
self.n_warmup_steps = n_warmup_steps
self.losses = []
self.initial_lr = lr_initial
self.current_lr = lr_initial
self.total = total_steps
self.n_steps = 0
self.normalize = 1 / (n_warmup_steps * n_warmup_steps ** -1.5)
def __call__(self, opt):
"""
Arguments
---------
opt : list of optimizers
The optimizers to update using this scheduler.
current_epoch : int
Number of times the dataset has been iterated.
current_loss : int
A number for determining whether to change the learning rate.
Returns
-------
current_lr : float
The learning rate before the update.
lr : float
The learning rate after the update.
"""
self.n_steps += 1
if self.initial_lr is None:
current_lr = opt.param_groups[0]["lr"]
else:
current_lr = self.current_lr
lr = current_lr * self._get_lr_scale()
# Changing the learning rate within the optimizer
for param_group in opt.param_groups:
param_group["lr"] = lr
self.current_lr = current_lr
return current_lr, lr
def _get_lr_scale(self):
n_steps, n_warmup_steps = self.n_steps, self.n_warmup_steps
return 0.5 * (
math.cos(math.pi * (n_steps - n_warmup_steps) / self.total) + 1
)
@checkpoints.mark_as_saver
def save(self, path):
"""Saves the curent metrics on the specified path."""
data = {"losses": self.losses, "n_steps": self.n_steps}
torch.save(data, path)
@checkpoints.mark_as_loader
def load(self, path, end_of_epoch=False, device=None):
"""Loads the needed information."""
del end_of_epoch # Unused in this class
del device # Unused here
data = torch.load(path)
self.losses = data["losses"]
self.n_steps = data["n_steps"]
@checkpoints.register_checkpoint_hooks
class ReduceLROnPlateau:
"""Learning rate scheduler which decreases the learning rate if the loss
function of interest gets stuck on a plateau, or starts to increase.
The difference from NewBobLRScheduler is that, this one keeps a memory of
the last step where do not observe improvement, and compares against that
particular loss value as opposed to the most recent loss.
Arguments
---------
lr_min : float
The minimum allowable learning rate.
factor : float
Factor with which to reduce the learning rate.
patience : int
How many epochs to wait before reducing the learning rate.
Example
-------
>>> from torch.optim import Adam
>>> from speechbrain.nnet.linear import Linear
>>> inp_tensor = torch.rand([1,660,3])
>>> model = Linear(n_neurons=10, input_size=3)
>>> optim = Adam(lr=1.0, params=model.parameters())
>>> output = model(inp_tensor)
>>> scheduler = ReduceLROnPlateau(0.25, 0.5, 2, 1)
>>> curr_lr,next_lr=scheduler([optim],current_epoch=1, current_loss=10.0)
>>> curr_lr,next_lr=scheduler([optim],current_epoch=2, current_loss=11.0)
>>> curr_lr,next_lr=scheduler([optim],current_epoch=3, current_loss=13.0)
>>> curr_lr,next_lr=scheduler([optim],current_epoch=4, current_loss=14.0)
>>> next_lr
0.5
"""
def __init__(
self, lr_min=1e-8, factor=0.5, patience=2, dont_halve_until_epoch=65
):
self.lr_min = lr_min
self.factor = factor
self.patience = patience
self.patience_counter = 0
self.losses = []
self.dont_halve_until_epoch = dont_halve_until_epoch
self.anchor = 99999
def __call__(self, optim_list, current_epoch, current_loss):
"""
Arguments
---------
optim_list : list of optimizers
The optimizers to update using this scheduler.
current_epoch : int
Number of times the dataset has been iterated.
current_loss : int
A number for determining whether to change the learning rate.
Returns
-------
current_lr : float
The learning rate before the update.
next_lr : float
The learning rate after the update.
"""
for opt in optim_list:
current_lr = opt.param_groups[0]["lr"]
if current_epoch <= self.dont_halve_until_epoch:
next_lr = current_lr
self.anchor = current_loss
else:
if current_loss <= self.anchor:
self.patience_counter = 0
next_lr = current_lr
self.anchor = current_loss
elif (
current_loss > self.anchor
and self.patience_counter < self.patience
):
self.patience_counter = self.patience_counter + 1
next_lr = current_lr
else:
next_lr = current_lr * self.factor
self.patience_counter = 0
# impose the lower bound
next_lr = max(next_lr, self.lr_min)
# Updating current loss
self.losses.append(current_loss)
return current_lr, next_lr
@checkpoints.mark_as_saver
def save(self, path):
"""Saves the curent metrics on the specified path."""
data = {
"losses": self.losses,
"anchor": self.anchor,
"patience_counter": self.patience_counter,
}
torch.save(data, path)
@checkpoints.mark_as_loader
def load(self, path, end_of_epoch=False, device=None):
"""Loads the needed information."""
del end_of_epoch # Unused in this class
del device # Not used
data = torch.load(path)
self.losses = data["losses"]
self.anchor = data["anchor"]
self.patience_counter = data["patience_counter"]
@checkpoints.register_checkpoint_hooks
class CyclicLRScheduler:
"""This implements a cyclical learning rate policy (CLR).
The method cycles the learning rate between two boundaries with
some constant frequency, as detailed in this paper (https://arxiv.org/abs/1506.01186).
The amplitude of the cycle can be scaled on a per-iteration or
per-cycle basis.
This class has three built-in policies, as put forth in the paper.
"triangular":
A basic triangular cycle w/ no amplitude scaling.
"triangular2":
A basic triangular cycle that scales initial amplitude by half each cycle.
"exp_range":
A cycle that scales initial amplitude by gamma**(cycle iterations) at each
cycle iteration.
For more detail, please see the reference paper.
Arguments
---------
base_lr : float
initial learning rate which is the
lower boundary in the cycle.
max_lr : float
upper boundary in the cycle. Functionally,
it defines the cycle amplitude (max_lr - base_lr).
The lr at any cycle is the sum of base_lr
and some scaling of the amplitude; therefore
max_lr may not actually be reached depending on
scaling function.
step_size : int
number of training iterations per
half cycle. The authors suggest setting step_size
2-8 x training iterations in epoch.
mode : str
one of {triangular, triangular2, exp_range}.
Default 'triangular'.
Values correspond to policies detailed above.
If scale_fn is not None, this argument is ignored.
gamma : float
constant in 'exp_range' scaling function:
gamma**(cycle iterations)
scale_fn : lambda function
Custom scaling policy defined by a single
argument lambda function, where
0 <= scale_fn(x) <= 1 for all x >= 0.
mode parameter is ignored
scale_mode : str
{'cycle', 'iterations'}.
Defines whether scale_fn is evaluated on
cycle number or cycle iterations (training
iterations since start of cycle). Default is 'cycle'.
Example
-------
>>> from speechbrain.nnet.linear import Linear
>>> inp_tensor = torch.rand([1,660,3])
>>> model = Linear(input_size=3, n_neurons=4)
>>> optim = torch.optim.Adam(model.parameters(), lr=1)
>>> output = model(inp_tensor)
>>> scheduler = CyclicLRScheduler(base_lr=0.1, max_lr=0.3, step_size=2)
>>> scheduler.on_batch_end(optim)
>>> optim.param_groups[0]["lr"]
0.2
>>> scheduler.on_batch_end(optim)
>>> optim.param_groups[0]["lr"]
0.3
>>> scheduler.on_batch_end(optim)
>>> optim.param_groups[0]["lr"]
0.2
"""
def __init__(
self,
base_lr=0.001,
max_lr=0.006,
step_size=2000.0,
mode="triangular",
gamma=1.0,
scale_fn=None,
scale_mode="cycle",
):
super(CyclicLRScheduler, self).__init__()
self.losses = []
self.base_lr = base_lr
self.max_lr = max_lr
self.step_size = step_size
self.mode = mode
self.gamma = gamma
if scale_fn is None:
if self.mode == "triangular":
self.scale_fn = lambda x: 1.0
self.scale_mode = "cycle"
elif self.mode == "triangular2":
self.scale_fn = lambda x: 1 / (2.0 ** (x - 1))
self.scale_mode = "cycle"
elif self.mode == "exp_range":
self.scale_fn = lambda x: gamma ** (x)
self.scale_mode = "iterations"
else:
self.scale_fn = scale_fn
self.scale_mode = scale_mode
self.clr_iterations = 0.0
self._reset()
def _reset(self, new_base_lr=None, new_max_lr=None, new_step_size=None):
"""Resets cycle iterations.
Optional boundary/step size adjustment.
"""
if new_base_lr is not None:
self.base_lr = new_base_lr
if new_max_lr is not None:
self.max_lr = new_max_lr
if new_step_size is not None:
self.step_size = new_step_size
self.clr_iterations = 0.0
def __call__(self, epoch):
old_lr = self.current_lr
new_lr = self.clr(self.clr_iterations + 1)
return old_lr, new_lr
def clr(self, clr_iterations):
"""Clears interations."""
cycle = math.floor(1 + clr_iterations / (2 * self.step_size))
x = abs(clr_iterations / self.step_size - 2 * cycle + 1)
if self.scale_mode == "cycle":
return self.base_lr + (self.max_lr - self.base_lr) * max(
0, (1 - x)
) * self.scale_fn(cycle)
else:
return self.base_lr + (self.max_lr - self.base_lr) * max(
0, (1 - x)
) * self.scale_fn(clr_iterations)
def on_batch_end(self, opt):
"""
Arguments
---------
opt : optimizers
The optimizers to update using this scheduler.
"""
self.clr_iterations += 1
lr = self.clr(self.clr_iterations)
current_lr = opt.param_groups[0]["lr"]
# Changing the learning rate within the optimizer
for param_group in opt.param_groups:
param_group["lr"] = lr
self.current_lr = current_lr
@checkpoints.mark_as_saver
def save(self, path):
"""Saves the current metrics on the specified path."""
data = {"losses": self.losses, "clr_iterations": self.clr_iterations}
torch.save(data, path)
@checkpoints.mark_as_loader
def load(self, path, end_of_epoch=False, device=None):
"""Loads the needed information."""
del end_of_epoch # Unused in this class
del device
data = torch.load(path)
self.losses = data["losses"]
self.clr_iterations = data["clr_iterations"]
@checkpoints.register_checkpoint_hooks
class IntervalScheduler:
"""A simple scheduler implementation that sets the learning rate to
specific values after a specific number of steps has been reached.
Arguments
---------
intervals: list
a list of dictionaries: {"steps": <number of steps>, "lr": the learning rate}
'steps' indicates the global step count at which a given
rate will apply
Example
-------
>>> import torch
>>> from speechbrain.nnet.schedulers import IntervalScheduler
>>> from speechbrain.nnet.linear import Linear
>>> model = Linear(input_size=3, n_neurons=4)
>>> optim = torch.optim.Adam(model.parameters(), lr=1)
>>> scheduler = IntervalScheduler(
... intervals=[
... {"steps": 2, "lr": 0.01},
... {"steps": 5, "lr": 0.005},
... {"steps": 9, "lr": 0.001}
... ]
... )
>>> optim.param_groups[0]["lr"]
1
>>> for _ in range(10):
... pre, post = scheduler(optim)
... print(f"{pre} -> {post}")
1 -> 1
1 -> 0.01
0.01 -> 0.01
0.01 -> 0.01
0.01 -> 0.005
0.005 -> 0.005
0.005 -> 0.005
0.005 -> 0.005
0.005 -> 0.001
0.001 -> 0.001
"""
def __init__(self, intervals):
self.intervals = intervals
self.n_steps = 0
self.losses = []
self._compute_next()
def __call__(self, opt):
"""
Arguments
---------
opt : optimizer
The optimizer to update using this scheduler.
Returns
-------
current_lr : float
The learning rate before the update.
lr : float
The learning rate after the update.
"""
self.n_steps += 1
current_lr = opt.param_groups[0]["lr"]
lr = self._get_lr(current_lr)
# Changing the learning rate within the optimizer
for param_group in opt.param_groups:
param_group["lr"] = lr
self.current_lr = current_lr
return current_lr, lr
def _compute_next(self):
self._next_intervals = [
interval
for interval in self.intervals
if interval["steps"] > self.n_steps
]
def _get_lr(self, current_lr):
lr = current_lr
if self._next_intervals:
next_interval = self._next_intervals[0]
if self.n_steps >= next_interval["steps"]:
lr = next_interval["lr"]
del self._next_intervals[0]
return lr
@checkpoints.mark_as_saver
def save(self, path):
"""Saves the current metrics on the specified path."""
data = {"losses": self.losses, "n_steps": self.n_steps}
torch.save(data, path)
@checkpoints.mark_as_loader
def load(self, path, end_of_epoch=False, device=None):
"""Loads the needed information."""
del end_of_epoch # Unused in this class
del device
data = torch.load(path)
self.losses = data["losses"]
self.n_steps = data["n_steps"]
self._compute_next()
@checkpoints.register_checkpoint_hooks
class InverseSquareRootScheduler:
"""The Inverse Square Root Scheduler, as defined in the T5 paper
https://arxiv.org/pdf/1910.10683.pdf
Arguments
---------
warmup_steps : int
The number of steps over which the learning rate will be constant
"""
def __init__(self, warmup_steps):
self.warmup_steps = warmup_steps
self.n_steps = 0
def __call__(self, opt):
"""Returns current and new hyperparameter value.
Arguments
---------
current_epoch : int
Number of times the dataset has been iterated.
"""
self.n_steps += 1
current_lr = opt.param_groups[0]["lr"]
lr = self._compute_value()
# Changing the learning rate within the optimizer
for param_group in opt.param_groups:
param_group["lr"] = lr
self.current_lr = current_lr
return current_lr, lr
def _compute_value(self):
return 1 / math.sqrt(max(self.warmup_steps, self.n_steps))
@checkpoints.mark_as_saver
def save(self, path):
"""Saves the current metrics on the specified path."""
data = {"n_steps": self.n_steps}
torch.save(data, path)
@checkpoints.register_checkpoint_hooks
class WarmCoolDecayLRSchedule:
"""Warms up linearly, very slowly decays and cools down linearly again
at the end of training. This is a three steps scheduler.
Reference
---------
Scaling Vision Transformers
arxiv.org/abs/2106.04560
Arguments
---------
lr : float
The max learning rate to reach after warmup.
warmup : int
Number of warmup steps (following a linear increase).
cooldown : int
Number of cooldown steps (following a linear decrease).
total_steps : int
Total number of steps (used to decay).
decay_factor : float
Decay factor applied every decay_every steps.
decay_every : int
Apply the decay factor to the learning rate every decay_every steps.
Example
-------
>>> from speechbrain.nnet.linear import Linear
>>> inp_tensor = torch.rand([1,660,3])
>>> model = Linear(input_size=3, n_neurons=4)
>>> optim = torch.optim.Adam(model.parameters(), lr=1)
>>> output = model(inp_tensor)
>>> scheduler = WarmCoolDecayLRSchedule(lr=1, warmup=2, total_steps=6, decay_factor=0.5, decay_every=1, cooldown=1)
>>> optim.param_groups[0]["lr"]
1
>>> scheduler(optim, 1)
>>> optim.param_groups[0]["lr"]
0.5
>>> scheduler(optim, 2)
>>> optim.param_groups[0]["lr"]
1.0
>>> scheduler(optim, 3)
>>> optim.param_groups[0]["lr"]
0.5
>>> scheduler(optim, 4)
>>> optim.param_groups[0]["lr"]
0.25
>>> scheduler(optim, 5)
>>> optim.param_groups[0]["lr"]
0.12500000000000003
>>> scheduler(optim, 6)
>>> optim.param_groups[0]["lr"]
0.0
"""
def __init__(
self,
lr,
warmup,
cooldown,
total_steps,
decay_factor=0.75,
decay_every=100000,
):
super(WarmCoolDecayLRSchedule, self).__init__()
self.base_lr = lr
self.warmup = warmup
self.cooldown = cooldown
self.total_steps = total_steps
self.power = math.log(decay_factor) / decay_every
def __call__(self, opt, num_updates):
if num_updates < self.warmup:
# Warming up at the start of training.
lr = self.base_lr * num_updates / self.warmup
elif num_updates > self.total_steps - self.cooldown:
# Cooling down to 0. at the end of training.
base_lr = self.base_lr * math.exp(
self.power * (self.total_steps - self.cooldown)
)
decrease = base_lr / self.cooldown
n = num_updates - (self.total_steps - self.cooldown)
lr = base_lr - decrease * n
else:
# Slow decay for training.
lr = self.base_lr * math.exp(
self.power * (num_updates - self.warmup)
)
for param_group in opt.param_groups:
param_group["lr"] = lr
@checkpoints.mark_as_saver
def save(self, path):
"""Saves the current metrics on the specified path."""
data = {
"base_lr": self.base_lr,
"warmup": self.warmup,
"power": self.power,
"cooldown": self.cooldown,
"total_steps": self.total_steps,
}
torch.save(data, path)
@checkpoints.mark_as_loader
def load(self, path, end_of_epoch=False, device=None):
"""Loads the needed information."""
del end_of_epoch
del device
data = torch.load(path)
self.base_lr = data["base_lr"]
self.warmup = data["warmup"]
self.power = data["power"]
self.cooldown = data["cooldown"]
self.total_steps = data["total_steps"]
| 35,940 | 31.379279 | 119 | py |
speechbrain | speechbrain-main/speechbrain/nnet/quantisers.py | """
Gumbel Softmax implementation with multiple groups possible.
Authors
* Rudolf A. Braun 2022
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class GumbelVectorQuantizer(nn.Module):
"""Vector quantization using gumbel softmax. Copied from fairseq implementation.
Arguments
---------
input_dim: int
Input dimension (channels).
num_vars: int
Number of quantized vectors per group.
temp_tuple: float
Temperature for training. this should be a tuple of 3 elements: (start, stop, decay factor).
groups: int
Number of groups for vector quantization.
vq_dim: int
Dimensionality of the resulting quantized vector.
Example
-------
>>> quantiser = GumbelVectorQuantizer(128, 100, (2.0, 0.25, 0.999995,), 2, 50 )
>>> inputs = torch.rand(10, 12, 128)
>>> output = quantiser(inputs)
>>> output["x"].shape
torch.Size([10, 12, 50])
"""
def __init__(self, input_dim, num_vars, temp_tuple, groups, vq_dim):
super().__init__()
self.groups = groups
self.input_dim = input_dim
self.num_vars = num_vars
self.vq_dim = vq_dim
assert (
vq_dim % groups == 0
), f"dim {vq_dim} must be divisible by groups {groups} for concatenation"
var_dim = vq_dim // groups
self.vars = nn.Parameter(
torch.FloatTensor(1, groups * num_vars, var_dim)
)
nn.init.uniform_(self.vars)
self.weight_proj = nn.Linear(self.input_dim, groups * num_vars)
nn.init.normal_(self.weight_proj.weight, mean=0, std=1)
nn.init.zeros_(self.weight_proj.bias)
assert len(temp_tuple) == 3, temp_tuple
self.max_temp, self.min_temp, self.temp_decay = temp_tuple
self.curr_temp = self.max_temp
self.max_ent = nn.Parameter(
torch.log(torch.tensor(float(self.num_vars * self.groups))),
requires_grad=False,
)
def update_temp(self, steps):
""" Update the temperature given the current step """
self.curr_temp = max(
self.max_temp * self.temp_decay ** steps, self.min_temp
)
def forward(self, x):
""" Forward the latent vector to obtain a quantised output """
result = {
"num_vars": self.num_vars * self.groups,
"temp": self.curr_temp,
}
bsz, tsz, fsz = x.shape
x = x.reshape(-1, fsz)
x = self.weight_proj(x)
x = x.view(bsz * tsz * self.groups, -1)
_, k = x.max(-1)
hard_x = (
x.new_zeros(*x.shape)
.scatter_(-1, k.view(-1, 1), 1.0)
.view(bsz * tsz, self.groups, -1)
)
hard_probs = torch.mean(hard_x.float(), dim=0)
result["code_perplexity"] = torch.exp(
-torch.sum(hard_probs * torch.log(hard_probs + 1e-7), dim=-1)
).sum()
avg_probs = torch.softmax(
x.view(bsz * tsz, self.groups, -1).float(), dim=-1
).mean(dim=0)
result["prob_perplex"] = torch.exp(
-torch.sum(avg_probs * torch.log(avg_probs + 1e-7), dim=-1)
).sum()
result["temp"] = self.curr_temp
if self.training:
x = F.gumbel_softmax(
x.float(), tau=self.curr_temp, hard=True
).type_as(x)
else:
x = hard_x
x = x.view(bsz * tsz, -1)
vars = self.vars
x = x.unsqueeze(-1) * vars
x = x.view(bsz * tsz, self.groups, self.num_vars, -1)
x = x.sum(-2)
x = x.view(bsz, tsz, -1)
result["x"] = x
return result
| 3,699 | 28.83871 | 104 | py |
speechbrain | speechbrain-main/speechbrain/nnet/RNN.py | """Library implementing recurrent neural networks.
Authors
* Mirco Ravanelli 2020
* Ju-Chieh Chou 2020
* Jianyuan Zhong 2020
* Loren Lugosch 2020
"""
import torch
import logging
import torch.nn as nn
from speechbrain.nnet.attention import (
ContentBasedAttention,
LocationAwareAttention,
KeyValueAttention,
)
from torch import Tensor
from typing import Optional
logger = logging.getLogger(__name__)
def pack_padded_sequence(inputs, lengths):
"""Returns packed speechbrain-formatted tensors.
Arguments
---------
inputs : torch.Tensor
The sequences to pack.
lengths : torch.Tensor
The length of each sequence.
"""
lengths = (lengths * inputs.size(1)).cpu()
return torch.nn.utils.rnn.pack_padded_sequence(
inputs, lengths, batch_first=True, enforce_sorted=False
)
def pad_packed_sequence(inputs):
"""Returns speechbrain-formatted tensor from packed sequences.
Arguments
---------
inputs : torch.nn.utils.rnn.PackedSequence
An input set of sequences to convert to a tensor.
"""
outputs, lengths = torch.nn.utils.rnn.pad_packed_sequence(
inputs, batch_first=True
)
return outputs
class RNN(torch.nn.Module):
"""This function implements a vanilla RNN.
It accepts in input tensors formatted as (batch, time, fea).
In the case of 4d inputs like (batch, time, fea, channel) the tensor is
flattened as (batch, time, fea*channel).
Arguments
---------
hidden_size : int
Number of output neurons (i.e, the dimensionality of the output).
values (i.e, time and frequency kernel sizes respectively).
input_shape : tuple
The shape of an example input. Alternatively, use ``input_size``.
input_size : int
The size of the input. Alternatively, use ``input_shape``.
nonlinearity : str
Type of nonlinearity (tanh, relu).
num_layers : int
Number of layers to employ in the RNN architecture.
bias : bool
If True, the additive bias b is adopted.
dropout : float
It is the dropout factor (must be between 0 and 1).
re_init : bool
If True, orthogonal initialization is used for the recurrent weights.
Xavier initialization is used for the input connection weights.
bidirectional : bool
If True, a bidirectional model that scans the sequence both
right-to-left and left-to-right is used.
Example
-------
>>> inp_tensor = torch.rand([4, 10, 20])
>>> net = RNN(hidden_size=5, input_shape=inp_tensor.shape)
>>> out_tensor, _ = net(inp_tensor)
>>>
torch.Size([4, 10, 5])
"""
def __init__(
self,
hidden_size,
input_shape=None,
input_size=None,
nonlinearity="relu",
num_layers=1,
bias=True,
dropout=0.0,
re_init=True,
bidirectional=False,
):
super().__init__()
self.reshape = False
if input_shape is None and input_size is None:
raise ValueError("Expected one of input_shape or input_size.")
# Computing the feature dimensionality
if input_size is None:
if len(input_shape) > 3:
self.reshape = True
input_size = torch.prod(torch.tensor(input_shape[2:]))
self.rnn = torch.nn.RNN(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional,
bias=bias,
batch_first=True,
nonlinearity=nonlinearity,
)
if re_init:
rnn_init(self.rnn)
def forward(self, x, hx=None, lengths=None):
"""Returns the output of the vanilla RNN.
Arguments
---------
x : torch.Tensor
Input tensor.
hx : torch.Tensor
Starting hidden state.
lengths : torch.Tensor
Relative lengths of the input signals.
"""
# Reshaping input tensors for 4d inputs
if self.reshape:
if x.ndim == 4:
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3])
# Flatten params for data parallel
self.rnn.flatten_parameters()
# Pack sequence for proper RNN handling of padding
if lengths is not None:
x = pack_padded_sequence(x, lengths)
# Support custom initial state
if hx is not None:
output, hn = self.rnn(x, hx=hx)
else:
output, hn = self.rnn(x)
# Unpack the packed sequence
if lengths is not None:
output = pad_packed_sequence(output)
return output, hn
class LSTM(torch.nn.Module):
"""This function implements a basic LSTM.
It accepts in input tensors formatted as (batch, time, fea).
In the case of 4d inputs like (batch, time, fea, channel) the tensor is
flattened as (batch, time, fea*channel).
Arguments
---------
hidden_size : int
Number of output neurons (i.e, the dimensionality of the output).
values (i.e, time and frequency kernel sizes respectively).
input_shape : tuple
The shape of an example input. Alternatively, use ``input_size``.
input_size : int
The size of the input. Alternatively, use ``input_shape``.
num_layers : int
Number of layers to employ in the RNN architecture.
bias : bool
If True, the additive bias b is adopted.
dropout : float
It is the dropout factor (must be between 0 and 1).
re_init : bool
It True, orthogonal initialization is used for the recurrent weights.
Xavier initialization is used for the input connection weights.
bidirectional : bool
If True, a bidirectional model that scans the sequence both
right-to-left and left-to-right is used.
Example
-------
>>> inp_tensor = torch.rand([4, 10, 20])
>>> net = LSTM(hidden_size=5, input_shape=inp_tensor.shape)
>>> out_tensor = net(inp_tensor)
>>>
torch.Size([4, 10, 5])
"""
def __init__(
self,
hidden_size,
input_shape=None,
input_size=None,
num_layers=1,
bias=True,
dropout=0.0,
re_init=True,
bidirectional=False,
):
super().__init__()
self.reshape = False
if input_shape is None and input_size is None:
raise ValueError("Expected one of input_shape or input_size.")
# Computing the feature dimensionality
if input_size is None:
if len(input_shape) > 3:
self.reshape = True
input_size = torch.prod(torch.tensor(input_shape[2:])).item()
self.rnn = torch.nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional,
bias=bias,
batch_first=True,
)
if re_init:
rnn_init(self.rnn)
def forward(self, x, hx=None, lengths=None):
"""Returns the output of the LSTM.
Arguments
---------
x : torch.Tensor
Input tensor.
hx : torch.Tensor
Starting hidden state.
lengths : torch.Tensor
Relative length of the input signals.
"""
# Reshaping input tensors for 4d inputs
if self.reshape:
if x.ndim == 4:
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3])
# Flatten params for data parallel
self.rnn.flatten_parameters()
# Pack sequence for proper RNN handling of padding
if lengths is not None:
x = pack_padded_sequence(x, lengths)
# Support custom initial state
if hx is not None:
output, hn = self.rnn(x, hx=hx)
else:
output, hn = self.rnn(x)
# Unpack the packed sequence
if lengths is not None:
output = pad_packed_sequence(output)
return output, hn
class GRU(torch.nn.Module):
""" This function implements a basic GRU.
It accepts input tensors formatted as (batch, time, fea).
In the case of 4d inputs like (batch, time, fea, channel) the tensor is
flattened as (batch, time, fea*channel).
Arguments
---------
hidden_size : int
Number of output neurons (i.e, the dimensionality of the output).
values (i.e, time and frequency kernel sizes respectively).
input_shape : tuple
The shape of an example input. Alternatively, use ``input_size``.
input_size : int
The size of the input. Alternatively, use ``input_shape``.
num_layers : int
Number of layers to employ in the RNN architecture.
bias : bool
If True, the additive bias b is adopted.
dropou t: float
It is the dropout factor (must be between 0 and 1).
re_init : bool
If True, orthogonal initialization is used for the recurrent weights.
Xavier initialization is used for the input connection weights.
bidirectional : bool
If True, a bidirectional model that scans the sequence both
right-to-left and left-to-right is used.
Example
-------
>>> inp_tensor = torch.rand([4, 10, 20])
>>> net = GRU(hidden_size=5, input_shape=inp_tensor.shape)
>>> out_tensor, _ = net(inp_tensor)
>>>
torch.Size([4, 10, 5])
"""
def __init__(
self,
hidden_size,
input_shape=None,
input_size=None,
num_layers=1,
bias=True,
dropout=0.0,
re_init=True,
bidirectional=False,
):
super().__init__()
self.reshape = False
if input_shape is None and input_size is None:
raise ValueError("Expected one of input_shape or input_size.")
# Computing the feature dimensionality
if input_size is None:
if len(input_shape) > 3:
self.reshape = True
input_size = torch.prod(torch.tensor(input_shape[2:])).item()
self.rnn = torch.nn.GRU(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional,
bias=bias,
batch_first=True,
)
if re_init:
rnn_init(self.rnn)
def forward(self, x, hx=None, lengths=None):
"""Returns the output of the GRU.
Arguments
---------
x : torch.Tensor
Input tensor.
hx : torch.Tensor
Starting hidden state.
lengths : torch.Tensor
Relative length of the input signals.
"""
# Reshaping input tensors for 4d inputs
if self.reshape:
if x.ndim == 4:
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3])
# Flatten params for data parallel
self.rnn.flatten_parameters()
# Pack sequence for proper RNN handling of padding
if lengths is not None:
x = pack_padded_sequence(x, lengths)
# Support custom initial state
if hx is not None:
output, hn = self.rnn(x, hx=hx)
else:
output, hn = self.rnn(x)
# Unpack the packed sequence
if lengths is not None:
output = pad_packed_sequence(output)
return output, hn
class RNNCell(nn.Module):
""" This class implements a basic RNN Cell for a timestep of input,
while RNN() takes the whole sequence as input.
It is designed for an autoregressive decoder (ex. attentional decoder),
which takes one input at a time.
Using torch.nn.RNNCell() instead of torch.nn.RNN() to reduce VRAM
consumption.
It accepts in input tensors formatted as (batch, fea).
Arguments
---------
hidden_size : int
Number of output neurons (i.e, the dimensionality of the output).
input_shape : tuple
The shape of an example input. Alternatively, use ``input_size``.
input_size : int
The size of the input. Alternatively, use ``input_shape``.
num_layers : int
Number of layers to employ in the RNN architecture.
bias : bool
If True, the additive bias b is adopted.
dropout : float
It is the dropout factor (must be between 0 and 1).
re_init : bool
It True, orthogonal initialization is used for the recurrent weights.
Xavier initialization is used for the input connection weights.
Example
-------
>>> inp_tensor = torch.rand([4, 20])
>>> net = RNNCell(hidden_size=5, input_shape=inp_tensor.shape)
>>> out_tensor, _ = net(inp_tensor)
>>> out_tensor.shape
torch.Size([4, 5])
"""
def __init__(
self,
hidden_size,
input_shape=None,
input_size=None,
num_layers=1,
bias=True,
dropout=0.0,
re_init=True,
nonlinearity="tanh",
):
super(RNNCell, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
if input_shape is None and input_size is None:
raise ValueError("Expected one of input_shape or input_size.")
# Computing the feature dimensionality
if input_size is None:
if len(input_shape) > 3:
self.reshape = True
input_size = torch.prod(torch.tensor(input_shape[1:]))
kwargs = {
"input_size": input_size,
"hidden_size": self.hidden_size,
"bias": bias,
"nonlinearity": nonlinearity,
}
self.rnn_cells = nn.ModuleList([torch.nn.RNNCell(**kwargs)])
kwargs["input_size"] = self.hidden_size
for i in range(self.num_layers - 1):
self.rnn_cells.append(torch.nn.RNNCell(**kwargs))
self.dropout_layers = nn.ModuleList(
[torch.nn.Dropout(p=dropout) for _ in range(self.num_layers - 1)]
)
if re_init:
rnn_init(self.rnn_cells)
def forward(self, x, hx=None):
"""Returns the output of the RNNCell.
Arguments
---------
x : torch.Tensor
The input of RNNCell.
hx : torch.Tensor
The hidden states of RNNCell.
"""
# if not provided, initialized with zeros
if hx is None:
hx = x.new_zeros(self.num_layers, x.shape[0], self.hidden_size)
h = self.rnn_cells[0](x, hx[0])
hidden_lst = [h]
for i in range(1, self.num_layers):
drop_h = self.dropout_layers[i - 1](h)
h = self.rnn_cells[i](drop_h, hx[i])
hidden_lst.append(h)
hidden = torch.stack(hidden_lst, dim=0)
return h, hidden
class GRUCell(nn.Module):
""" This class implements a basic GRU Cell for a timestep of input,
while GRU() takes the whole sequence as input.
It is designed for an autoregressive decoder (ex. attentional decoder),
which takes one input at a time.
Using torch.nn.GRUCell() instead of torch.nn.GRU() to reduce VRAM
consumption.
It accepts in input tensors formatted as (batch, fea).
Arguments
---------
hidden_size: int
Number of output neurons (i.e, the dimensionality of the output).
input_shape : tuple
The shape of an example input. Alternatively, use ``input_size``.
input_size : int
The size of the input. Alternatively, use ``input_shape``.
num_layers : int
Number of layers to employ in the GRU architecture.
bias : bool
If True, the additive bias b is adopted.
dropout : float
It is the dropout factor (must be between 0 and 1).
re_init : bool
It True, orthogonal initialization is used for the recurrent weights.
Xavier initialization is used for the input connection weights.
Example
-------
>>> inp_tensor = torch.rand([4, 20])
>>> net = GRUCell(hidden_size=5, input_shape=inp_tensor.shape)
>>> out_tensor, _ = net(inp_tensor)
>>> out_tensor.shape
torch.Size([4, 5])
"""
def __init__(
self,
hidden_size,
input_shape=None,
input_size=None,
num_layers=1,
bias=True,
dropout=0.0,
re_init=True,
):
super(GRUCell, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
if input_shape is None and input_size is None:
raise ValueError("Expected one of input_shape or input_size.")
# Computing the feature dimensionality
if input_size is None:
if len(input_shape) > 3:
self.reshape = True
input_size = torch.prod(torch.tensor(input_shape[1:]))
kwargs = {
"input_size": input_size,
"hidden_size": self.hidden_size,
"bias": bias,
}
self.rnn_cells = nn.ModuleList([torch.nn.GRUCell(**kwargs)])
kwargs["input_size"] = self.hidden_size
for i in range(self.num_layers - 1):
self.rnn_cells.append(torch.nn.GRUCell(**kwargs))
self.dropout_layers = nn.ModuleList(
[torch.nn.Dropout(p=dropout) for _ in range(self.num_layers - 1)]
)
if re_init:
rnn_init(self.rnn_cells)
def forward(self, x, hx=None):
"""Returns the output of the GRUCell.
Arguments
---------
x : torch.Tensor
The input of GRUCell.
hx : torch.Tensor
The hidden states of GRUCell.
"""
# if not provided, initialized with zeros
if hx is None:
hx = x.new_zeros(self.num_layers, x.shape[0], self.hidden_size)
h = self.rnn_cells[0](x, hx[0])
hidden_lst = [h]
for i in range(1, self.num_layers):
drop_h = self.dropout_layers[i - 1](h)
h = self.rnn_cells[i](drop_h, hx[i])
hidden_lst.append(h)
hidden = torch.stack(hidden_lst, dim=0)
return h, hidden
class LSTMCell(nn.Module):
""" This class implements a basic LSTM Cell for a timestep of input,
while LSTM() takes the whole sequence as input.
It is designed for an autoregressive decoder (ex. attentional decoder),
which takes one input at a time.
Using torch.nn.LSTMCell() instead of torch.nn.LSTM() to reduce VRAM
consumption.
It accepts in input tensors formatted as (batch, fea).
Arguments
---------
hidden_size: int
Number of output neurons (i.e, the dimensionality of the output).
input_shape : tuple
The shape of an example input. Alternatively, use ``input_size``.
input_size : int
The size of the input. Alternatively, use ``input_shape``.
num_layers : int
Number of layers to employ in the LSTM architecture.
bias : bool
If True, the additive bias b is adopted.
dropout : float
It is the dropout factor (must be between 0 and 1).
re_init : bool
If True, orthogonal initialization is used for the recurrent weights.
Xavier initialization is used for the input connection weights.
Example
-------
>>> inp_tensor = torch.rand([4, 20])
>>> net = LSTMCell(hidden_size=5, input_shape=inp_tensor.shape)
>>> out_tensor, _ = net(inp_tensor)
>>> out_tensor.shape
torch.Size([4, 5])
"""
def __init__(
self,
hidden_size,
input_shape=None,
input_size=None,
num_layers=1,
bias=True,
dropout=0.0,
re_init=True,
):
super(LSTMCell, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
if input_shape is None and input_size is None:
raise ValueError("Expected one of input_shape or input_size.")
# Computing the feature dimensionality
if input_size is None:
if len(input_shape) > 3:
self.reshape = True
input_size = torch.prod(torch.tensor(input_shape[1:]))
kwargs = {
"input_size": input_size,
"hidden_size": self.hidden_size,
"bias": bias,
}
self.rnn_cells = nn.ModuleList([torch.nn.LSTMCell(**kwargs)])
kwargs["input_size"] = self.hidden_size
for i in range(self.num_layers - 1):
self.rnn_cells.append(torch.nn.LSTMCell(**kwargs))
self.dropout_layers = nn.ModuleList(
[torch.nn.Dropout(p=dropout) for _ in range(self.num_layers - 1)]
)
if re_init:
rnn_init(self.rnn_cells)
def forward(self, x, hx=None):
"""Returns the output of the LSTMCell.
Arguments
---------
x : torch.Tensor
The input of LSTMCell.
hx : torch.Tensor
The hidden states of LSTMCell.
"""
# if not provided, initialized with zeros
if hx is None:
hx = (
x.new_zeros(self.num_layers, x.shape[0], self.hidden_size),
x.new_zeros(self.num_layers, x.shape[0], self.hidden_size),
)
h, c = self.rnn_cells[0](x, (hx[0][0], hx[1][0]))
hidden_lst = [h]
cell_lst = [c]
for i in range(1, self.num_layers):
drop_h = self.dropout_layers[i - 1](h)
h, c = self.rnn_cells[i](drop_h, (hx[0][i], hx[1][i]))
hidden_lst.append(h)
cell_lst.append(c)
hidden = torch.stack(hidden_lst, dim=0)
cell = torch.stack(cell_lst, dim=0)
return h, (hidden, cell)
class AttentionalRNNDecoder(nn.Module):
"""This function implements RNN decoder model with attention.
This function implements different RNN models. It accepts in enc_states
tensors formatted as (batch, time, fea). In the case of 4d inputs
like (batch, time, fea, channel) the tensor is flattened in this way:
(batch, time, fea*channel).
Arguments
---------
rnn_type : str
Type of recurrent neural network to use (rnn, lstm, gru).
attn_type : str
type of attention to use (location, content).
hidden_size : int
Number of the neurons.
attn_dim : int
Number of attention module internal and output neurons.
num_layers : int
Number of layers to employ in the RNN architecture.
input_shape : tuple
Expected shape of an input.
input_size : int
Expected size of the relevant input dimension.
nonlinearity : str
Type of nonlinearity (tanh, relu). This option is active for
rnn and ligru models only. For lstm and gru tanh is used.
re_init : bool
It True, orthogonal init is used for the recurrent weights.
Xavier initialization is used for the input connection weights.
normalization : str
Type of normalization for the ligru model (batchnorm, layernorm).
Every string different from batchnorm and layernorm will result
in no normalization.
scaling : float
A scaling factor to sharpen or smoothen the attention distribution.
channels : int
Number of channels for location-aware attention.
kernel_size : int
Size of the kernel for location-aware attention.
bias : bool
If True, the additive bias b is adopted.
dropout : float
It is the dropout factor (must be between 0 and 1).
Example
-------
>>> enc_states = torch.rand([4, 10, 20])
>>> wav_len = torch.rand([4])
>>> inp_tensor = torch.rand([4, 5, 6])
>>> net = AttentionalRNNDecoder(
... rnn_type="lstm",
... attn_type="content",
... hidden_size=7,
... attn_dim=5,
... num_layers=1,
... enc_dim=20,
... input_size=6,
... )
>>> out_tensor, attn = net(inp_tensor, enc_states, wav_len)
>>> out_tensor.shape
torch.Size([4, 5, 7])
"""
def __init__(
self,
rnn_type,
attn_type,
hidden_size,
attn_dim,
num_layers,
enc_dim,
input_size,
nonlinearity="relu",
re_init=True,
normalization="batchnorm",
scaling=1.0,
channels=None,
kernel_size=None,
bias=True,
dropout=0.0,
):
super(AttentionalRNNDecoder, self).__init__()
self.rnn_type = rnn_type.lower()
self.attn_type = attn_type.lower()
self.hidden_size = hidden_size
self.attn_dim = attn_dim
self.num_layers = num_layers
self.scaling = scaling
self.bias = bias
self.dropout = dropout
self.normalization = normalization
self.re_init = re_init
self.nonlinearity = nonlinearity
# only for location-aware attention
self.channels = channels
self.kernel_size = kernel_size
# Combining the context vector and output of rnn
self.proj = nn.Linear(
self.hidden_size + self.attn_dim, self.hidden_size
)
if self.attn_type == "content":
self.attn = ContentBasedAttention(
enc_dim=enc_dim,
dec_dim=self.hidden_size,
attn_dim=self.attn_dim,
output_dim=self.attn_dim,
scaling=self.scaling,
)
elif self.attn_type == "location":
self.attn = LocationAwareAttention(
enc_dim=enc_dim,
dec_dim=self.hidden_size,
attn_dim=self.attn_dim,
output_dim=self.attn_dim,
conv_channels=self.channels,
kernel_size=self.kernel_size,
scaling=self.scaling,
)
elif self.attn_type == "keyvalue":
self.attn = KeyValueAttention(
enc_dim=enc_dim,
dec_dim=self.hidden_size,
attn_dim=self.attn_dim,
output_dim=self.attn_dim,
)
else:
raise ValueError(f"{self.attn_type} is not implemented.")
self.drop = nn.Dropout(p=self.dropout)
# set dropout to 0 when only one layer
dropout = 0 if self.num_layers == 1 else self.dropout
# using cell implementation to reduce the usage of memory
if self.rnn_type == "rnn":
cell_class = RNNCell
elif self.rnn_type == "gru":
cell_class = GRUCell
elif self.rnn_type == "lstm":
cell_class = LSTMCell
else:
raise ValueError(f"{self.rnn_type} not implemented.")
kwargs = {
"input_size": input_size + self.attn_dim,
"hidden_size": self.hidden_size,
"num_layers": self.num_layers,
"bias": self.bias,
"dropout": dropout,
"re_init": self.re_init,
}
if self.rnn_type == "rnn":
kwargs["nonlinearity"] = self.nonlinearity
self.rnn = cell_class(**kwargs)
def forward_step(self, inp, hs, c, enc_states, enc_len):
"""One step of forward pass process.
Arguments
---------
inp : torch.Tensor
The input of current timestep.
hs : torch.Tensor or tuple of torch.Tensor
The cell state for RNN.
c : torch.Tensor
The context vector of previous timestep.
enc_states : torch.Tensor
The tensor generated by encoder, to be attended.
enc_len : torch.LongTensor
The actual length of encoder states.
Returns
-------
dec_out : torch.Tensor
The output tensor.
hs : torch.Tensor or tuple of torch.Tensor
The new cell state for RNN.
c : torch.Tensor
The context vector of the current timestep.
w : torch.Tensor
The weight of attention.
"""
cell_inp = torch.cat([inp, c], dim=-1)
cell_inp = self.drop(cell_inp)
cell_out, hs = self.rnn(cell_inp, hs)
c, w = self.attn(enc_states, enc_len, cell_out)
dec_out = torch.cat([c, cell_out], dim=1)
dec_out = self.proj(dec_out)
return dec_out, hs, c, w
def forward(self, inp_tensor, enc_states, wav_len):
"""This method implements the forward pass of the attentional RNN decoder.
Arguments
---------
inp_tensor : torch.Tensor
The input tensor for each timesteps of RNN decoder.
enc_states : torch.Tensor
The tensor to be attended by the decoder.
wav_len : torch.Tensor
This variable stores the relative length of wavform.
Returns
-------
outputs : torch.Tensor
The output of the RNN decoder.
attn : torch.Tensor
The attention weight of each timestep.
"""
# calculating the actual length of enc_states
enc_len = torch.round(enc_states.shape[1] * wav_len).long()
# initialization
self.attn.reset()
c = torch.zeros(
enc_states.shape[0], self.attn_dim, device=enc_states.device
)
hs = None
# store predicted tokens
outputs_lst, attn_lst = [], []
for t in range(inp_tensor.shape[1]):
outputs, hs, c, w = self.forward_step(
inp_tensor[:, t], hs, c, enc_states, enc_len
)
outputs_lst.append(outputs)
attn_lst.append(w)
# [B, L_d, hidden_size]
outputs = torch.stack(outputs_lst, dim=1)
# [B, L_d, L_e]
attn = torch.stack(attn_lst, dim=1)
return outputs, attn
class LiGRU(torch.nn.Module):
""" This function implements a Light GRU (liGRU).
LiGRU is single-gate GRU model based on batch-norm + relu
activations + recurrent dropout. For more info see:
"M. Ravanelli, P. Brakel, M. Omologo, Y. Bengio,
Light Gated Recurrent Units for Speech Recognition,
in IEEE Transactions on Emerging Topics in Computational Intelligence,
2018" (https://arxiv.org/abs/1803.10225)
This is a custm RNN and to speed it up it must be compiled with
the torch just-in-time compiler (jit) right before using it.
You can compile it with:
compiled_model = torch.jit.script(model)
It accepts in input tensors formatted as (batch, time, fea).
In the case of 4d inputs like (batch, time, fea, channel) the tensor is
flattened as (batch, time, fea*channel).
Arguments
---------
hidden_size : int
Number of output neurons (i.e, the dimensionality of the output).
values (i.e, time and frequency kernel sizes respectively).
input_shape : tuple
The shape of an example input.
nonlinearity : str
Type of nonlinearity (tanh, relu).
normalization : str
Type of normalization for the ligru model (batchnorm, layernorm).
Every string different from batchnorm and layernorm will result
in no normalization.
num_layers : int
Number of layers to employ in the RNN architecture.
bias : bool
If True, the additive bias b is adopted.
dropout : float
It is the dropout factor (must be between 0 and 1).
re_init : bool
If True, orthogonal initialization is used for the recurrent weights.
Xavier initialization is used for the input connection weights.
bidirectional : bool
If True, a bidirectional model that scans the sequence both
right-to-left and left-to-right is used.
Example
-------
>>> inp_tensor = torch.rand([4, 10, 20])
>>> net = LiGRU(input_shape=inp_tensor.shape, hidden_size=5)
>>> out_tensor, _ = net(inp_tensor)
>>>
torch.Size([4, 10, 5])
"""
def __init__(
self,
hidden_size,
input_shape,
nonlinearity="relu",
normalization="batchnorm",
num_layers=1,
bias=True,
dropout=0.0,
re_init=True,
bidirectional=False,
):
super().__init__()
self.hidden_size = hidden_size
self.nonlinearity = nonlinearity
self.num_layers = num_layers
self.normalization = normalization
self.bias = bias
self.dropout = dropout
self.re_init = re_init
self.bidirectional = bidirectional
self.reshape = False
# Computing the feature dimensionality
if len(input_shape) > 3:
self.reshape = True
self.fea_dim = float(torch.prod(torch.tensor(input_shape[2:])))
self.batch_size = input_shape[0]
self.rnn = self._init_layers()
if self.re_init:
rnn_init(self.rnn)
def _init_layers(self):
"""Initializes the layers of the liGRU."""
rnn = torch.nn.ModuleList([])
current_dim = self.fea_dim
for i in range(self.num_layers):
rnn_lay = LiGRU_Layer(
current_dim,
self.hidden_size,
self.num_layers,
self.batch_size,
dropout=self.dropout,
nonlinearity=self.nonlinearity,
normalization=self.normalization,
bidirectional=self.bidirectional,
)
rnn.append(rnn_lay)
if self.bidirectional:
current_dim = self.hidden_size * 2
else:
current_dim = self.hidden_size
return rnn
def forward(self, x, hx: Optional[Tensor] = None):
"""Returns the output of the liGRU.
Arguments
---------
x : torch.Tensor
The input tensor.
hx : torch.Tensor
Starting hidden state.
"""
# Reshaping input tensors for 4d inputs
if self.reshape:
if x.ndim == 4:
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3])
# run ligru
output, hh = self._forward_ligru(x, hx=hx)
return output, hh
def _forward_ligru(self, x, hx: Optional[Tensor]):
"""Returns the output of the vanilla liGRU.
Arguments
---------
x : torch.Tensor
Input tensor.
hx : torch.Tensor
"""
h = []
if hx is not None:
if self.bidirectional:
hx = hx.reshape(
self.num_layers, self.batch_size * 2, self.hidden_size
)
# Processing the different layers
for i, ligru_lay in enumerate(self.rnn):
if hx is not None:
x = ligru_lay(x, hx=hx[i])
else:
x = ligru_lay(x, hx=None)
h.append(x[:, -1, :])
h = torch.stack(h, dim=1)
if self.bidirectional:
h = h.reshape(h.shape[1] * 2, h.shape[0], self.hidden_size)
else:
h = h.transpose(0, 1)
return x, h
class LiGRU_Layer(torch.nn.Module):
""" This function implements Light-Gated Recurrent Units (ligru) layer.
Arguments
---------
input_size : int
Feature dimensionality of the input tensors.
batch_size : int
Batch size of the input tensors.
hidden_size : int
Number of output neurons.
num_layers : int
Number of layers to employ in the RNN architecture.
nonlinearity : str
Type of nonlinearity (tanh, relu).
normalization : str
Type of normalization (batchnorm, layernorm).
Every string different from batchnorm and layernorm will result
in no normalization.
dropout : float
It is the dropout factor (must be between 0 and 1).
bidirectional : bool
if True, a bidirectional model that scans the sequence both
right-to-left and left-to-right is used.
"""
def __init__(
self,
input_size,
hidden_size,
num_layers,
batch_size,
dropout=0.0,
nonlinearity="relu",
normalization="batchnorm",
bidirectional=False,
):
super(LiGRU_Layer, self).__init__()
self.hidden_size = int(hidden_size)
self.input_size = int(input_size)
self.batch_size = batch_size
self.bidirectional = bidirectional
self.dropout = dropout
self.w = nn.Linear(self.input_size, 2 * self.hidden_size, bias=False)
self.u = nn.Linear(self.hidden_size, 2 * self.hidden_size, bias=False)
if self.bidirectional:
self.batch_size = self.batch_size * 2
# Initializing batch norm
self.normalize = False
if normalization == "batchnorm":
self.norm = nn.BatchNorm1d(2 * self.hidden_size, momentum=0.05)
self.normalize = True
elif normalization == "layernorm":
self.norm = torch.nn.LayerNorm(2 * self.hidden_size)
self.normalize = True
else:
# Normalization is disabled here. self.norm is only formally
# initialized to avoid jit issues.
self.norm = torch.nn.LayerNorm(2 * self.hidden_size)
self.normalize = True
# Initial state
self.register_buffer("h_init", torch.zeros(1, self.hidden_size))
# Preloading dropout masks (gives some speed improvement)
self._init_drop(self.batch_size)
# Setting the activation function
if nonlinearity == "tanh":
self.act = torch.nn.Tanh()
elif nonlinearity == "sin":
self.act = torch.sin
elif nonlinearity == "leaky_relu":
self.act = torch.nn.LeakyReLU()
else:
self.act = torch.nn.ReLU()
def forward(self, x, hx: Optional[Tensor] = None):
# type: (Tensor, Optional[Tensor]) -> Tensor # noqa F821
"""Returns the output of the liGRU layer.
Arguments
---------
x : torch.Tensor
Input tensor.
"""
if self.bidirectional:
x_flip = x.flip(1)
x = torch.cat([x, x_flip], dim=0)
# Change batch size if needed
self._change_batch_size(x)
# Feed-forward affine transformations (all steps in parallel)
w = self.w(x)
# Apply batch normalization
if self.normalize:
w_bn = self.norm(w.reshape(w.shape[0] * w.shape[1], w.shape[2]))
w = w_bn.reshape(w.shape[0], w.shape[1], w.shape[2])
# Processing time steps
if hx is not None:
h = self._ligru_cell(w, hx)
else:
h = self._ligru_cell(w, self.h_init)
if self.bidirectional:
h_f, h_b = h.chunk(2, dim=0)
h_b = h_b.flip(1)
h = torch.cat([h_f, h_b], dim=2)
return h
def _ligru_cell(self, w, ht):
"""Returns the hidden states for each time step.
Arguments
---------
wx : torch.Tensor
Linearly transformed input.
"""
hiddens = []
# Sampling dropout mask
drop_mask = self._sample_drop_mask(w)
# Loop over time axis
for k in range(w.shape[1]):
gates = w[:, k] + self.u(ht)
at, zt = gates.chunk(2, 1)
zt = torch.sigmoid(zt)
hcand = self.act(at) * drop_mask
ht = zt * ht + (1 - zt) * hcand
hiddens.append(ht)
# Stacking hidden states
h = torch.stack(hiddens, dim=1)
return h
def _init_drop(self, batch_size):
"""Initializes the recurrent dropout operation. To speed it up,
the dropout masks are sampled in advance.
"""
self.drop = torch.nn.Dropout(p=self.dropout, inplace=False)
self.N_drop_masks = 16000
self.drop_mask_cnt = 0
self.register_buffer(
"drop_masks",
self.drop(torch.ones(self.N_drop_masks, self.hidden_size)).data,
)
self.register_buffer("drop_mask_te", torch.tensor([1.0]).float())
def _sample_drop_mask(self, w):
"""Selects one of the pre-defined dropout masks"""
if self.training:
# Sample new masks when needed
if self.drop_mask_cnt + self.batch_size > self.N_drop_masks:
self.drop_mask_cnt = 0
self.drop_masks = self.drop(
torch.ones(
self.N_drop_masks, self.hidden_size, device=w.device
)
).data
# Sampling the mask
drop_mask = self.drop_masks[
self.drop_mask_cnt : self.drop_mask_cnt + self.batch_size
]
self.drop_mask_cnt = self.drop_mask_cnt + self.batch_size
else:
self.drop_mask_te = self.drop_mask_te.to(w.device)
drop_mask = self.drop_mask_te
return drop_mask
def _change_batch_size(self, x):
"""This function changes the batch size when it is different from
the one detected in the initialization method. This might happen in
the case of multi-gpu or when we have different batch sizes in train
and test. We also update the h_int and drop masks.
"""
if self.batch_size != x.shape[0]:
self.batch_size = x.shape[0]
if self.training:
self.drop_masks = self.drop(
torch.ones(
self.N_drop_masks, self.hidden_size, device=x.device,
)
).data
class QuasiRNNLayer(torch.nn.Module):
"""Applies a single layer Quasi-Recurrent Neural Network (QRNN) to an
input sequence.
Arguments
---------
input_size : int
The number of expected features in the input x.
hidden_size : int
The number of features in the hidden state h. If not specified,
the input size is used.
zoneout : float
Whether to apply zoneout (i.e. failing to update elements in the
hidden state) to the hidden state updates. Default: 0.
output_gate : bool
If True, performs QRNN-fo (applying an output gate to the output).
If False, performs QRNN-f. Default: True.
Example
-------
>>> import torch
>>> model = QuasiRNNLayer(60, 256, bidirectional=True)
>>> a = torch.rand([10, 120, 60])
>>> b = model(a)
>>> b[0].shape
torch.Size([10, 120, 512])
"""
def __init__(
self,
input_size,
hidden_size,
bidirectional,
zoneout=0.0,
output_gate=True,
):
super().__init__()
self.hidden_size = hidden_size
self.zoneout = zoneout
self.output_gate = output_gate
self.bidirectional = bidirectional
stacked_hidden = (
3 * self.hidden_size if self.output_gate else 2 * self.hidden_size
)
self.w = torch.nn.Linear(input_size, stacked_hidden, True)
self.z_gate = nn.Tanh()
self.f_gate = nn.Sigmoid()
if self.output_gate:
self.o_gate = nn.Sigmoid()
def forgetMult(self, f, x, hidden):
# type: (Tensor, Tensor, Optional[Tensor]) -> Tensor # noqa F821
"""Returns the hidden states for each time step.
Arguments
---------
wx : torch.Tensor
Linearly transformed input.
"""
result = []
htm1 = hidden
hh = f * x
for i in range(hh.shape[0]):
h_t = hh[i, :, :]
ft = f[i, :, :]
if htm1 is not None:
h_t = h_t + (1 - ft) * htm1
result.append(h_t)
htm1 = h_t
return torch.stack(result)
def split_gate_inputs(self, y):
# type: (Tensor) -> Tuple[Tensor, Tensor, Optional[Tensor]] # noqa F821
"""Splits the input gates."""
if self.output_gate:
z, f, o = y.chunk(3, dim=-1)
else:
z, f = y.chunk(2, dim=-1)
o = None
return z, f, o
def forward(self, x, hidden=None):
# type: (Tensor, Optional[Tensor]) -> Tuple[Tensor, Tensor] # noqa F821
"""Returns the output of the QRNN layer.
Arguments
---------
x : torch.Tensor
Input to transform linearly.
"""
if x.ndim == 4:
# if input is a 4d tensor (batch, time, channel1, channel2)
# reshape input to (batch, time, channel)
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3])
# give a tensor of shape (time, batch, channel)
x = x.permute(1, 0, 2)
if self.bidirectional:
x_flipped = x.flip(0)
x = torch.cat([x, x_flipped], dim=1)
# note: this is equivalent to doing 1x1 convolution on the input
y = self.w(x)
z, f, o = self.split_gate_inputs(y)
z = self.z_gate(z)
f = self.f_gate(f)
if o is not None:
o = self.o_gate(o)
# If zoneout is specified, we perform dropout on the forget gates in F
# If an element of F is zero, that means the corresponding neuron
# keeps the old value
if self.zoneout:
if self.training:
mask = (
torch.empty(f.shape)
.bernoulli_(1 - self.zoneout)
.to(f.get_device())
).detach()
f = f * mask
else:
f = f * (1 - self.zoneout)
z = z.contiguous()
f = f.contiguous()
# Forget Mult
c = self.forgetMult(f, z, hidden)
# Apply output gate
if o is not None:
h = o * c
else:
h = c
# recover shape (batch, time, channel)
c = c.permute(1, 0, 2)
h = h.permute(1, 0, 2)
if self.bidirectional:
h_fwd, h_bwd = h.chunk(2, dim=0)
h_bwd = h_bwd.flip(1)
h = torch.cat([h_fwd, h_bwd], dim=2)
c_fwd, c_bwd = c.chunk(2, dim=0)
c_bwd = c_bwd.flip(1)
c = torch.cat([c_fwd, c_bwd], dim=2)
return h, c[-1, :, :]
class QuasiRNN(nn.Module):
"""This is a implementation for the Quasi-RNN.
https://arxiv.org/pdf/1611.01576.pdf
Part of the code is adapted from:
https://github.com/salesforce/pytorch-qrnn
Arguments
---------
hidden_size : int
The number of features in the hidden state h. If not specified,
the input size is used.
input_shape : tuple
The shape of an example input. Alternatively, use ``input_size``.
input_size : int
The size of the input. Alternatively, use ``input_shape``.
num_layers : int
The number of QRNN layers to produce.
zoneout : bool
Whether to apply zoneout (i.e. failing to update elements in the
hidden state) to the hidden state updates. Default: 0.
output_gate : bool
If True, performs QRNN-fo (applying an output gate to the output).
If False, performs QRNN-f. Default: True.
Example
-------
>>> a = torch.rand([8, 120, 40])
>>> model = QuasiRNN(
... 256, num_layers=4, input_shape=a.shape, bidirectional=True
... )
>>> b, _ = model(a)
>>> b.shape
torch.Size([8, 120, 512])
"""
def __init__(
self,
hidden_size,
input_shape=None,
input_size=None,
num_layers=1,
bias=True,
batch_first=False,
dropout=0,
bidirectional=False,
**kwargs,
):
assert bias is True, "Removing underlying bias is not yet supported"
super().__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bidirectional = bidirectional
self.dropout = dropout if dropout > 0 else None
self.kwargs = kwargs
if input_shape is None and input_size is None:
raise ValueError("Expected one of input_shape or input_size.")
# Computing the feature dimensionality
if input_size is None:
if len(input_shape) > 3:
self.reshape = True
input_size = torch.prod(torch.tensor(input_shape[2:]))
layers = []
for layer in range(self.num_layers):
layers.append(
QuasiRNNLayer(
input_size
if layer == 0
else self.hidden_size * 2
if self.bidirectional
else self.hidden_size,
self.hidden_size,
self.bidirectional,
**self.kwargs,
)
)
self.qrnn = torch.nn.ModuleList(layers)
if self.dropout:
self.dropout = torch.nn.Dropout(self.dropout)
def forward(self, x, hidden=None):
"""Applies the QuasiRNN to the input tensor x."""
next_hidden = []
for i, layer in enumerate(self.qrnn):
x, h = layer(x, None if hidden is None else hidden[i])
next_hidden.append(h)
if self.dropout and i < len(self.qrnn) - 1:
x = self.dropout(x)
hidden = torch.cat(next_hidden, 0).view(
self.num_layers, *next_hidden[0].shape[-2:]
)
return x, hidden
def rnn_init(module):
"""This function is used to initialize the RNN weight.
Recurrent connection: orthogonal initialization.
Arguments
---------
module: torch.nn.Module
Recurrent neural network module.
Example
-------
>>> inp_tensor = torch.rand([4, 10, 20])
>>> net = RNN(hidden_size=5, input_shape=inp_tensor.shape)
>>> out_tensor = net(inp_tensor)
>>> rnn_init(net)
"""
for name, param in module.named_parameters():
if "weight_hh" in name or ".u.weight" in name:
nn.init.orthogonal_(param)
| 49,939 | 29.999379 | 82 | py |
speechbrain | speechbrain-main/speechbrain/nnet/linear.py | """Library implementing linear transformation.
Authors
* Mirco Ravanelli 2020
* Davide Borra 2021
"""
import torch
import logging
import torch.nn as nn
logger = logging.getLogger(__name__)
class Linear(torch.nn.Module):
"""Computes a linear transformation y = wx + b.
Arguments
---------
n_neurons : int
It is the number of output neurons (i.e, the dimensionality of the
output).
input_shape: tuple
It is the shape of the input tensor.
input_size: int
Size of the input tensor.
bias : bool
If True, the additive bias b is adopted.
combine_dims : bool
If True and the input is 4D, combine 3rd and 4th dimensions of input.
Example
-------
>>> inputs = torch.rand(10, 50, 40)
>>> lin_t = Linear(input_shape=(10, 50, 40), n_neurons=100)
>>> output = lin_t(inputs)
>>> output.shape
torch.Size([10, 50, 100])
"""
def __init__(
self,
n_neurons,
input_shape=None,
input_size=None,
bias=True,
combine_dims=False,
):
super().__init__()
self.combine_dims = combine_dims
if input_shape is None and input_size is None:
raise ValueError("Expected one of input_shape or input_size")
if input_size is None:
input_size = input_shape[-1]
if len(input_shape) == 4 and self.combine_dims:
input_size = input_shape[2] * input_shape[3]
# Weights are initialized following pytorch approach
self.w = nn.Linear(input_size, n_neurons, bias=bias)
def forward(self, x):
"""Returns the linear transformation of input tensor.
Arguments
---------
x : torch.Tensor
Input to transform linearly.
"""
if x.ndim == 4 and self.combine_dims:
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3])
wx = self.w(x)
return wx
class LinearWithConstraint(Linear):
"""Computes a linear transformation y = wx + b with kernel max-norm constaint.
This corresponds to set an upper bound for the kernel norm.
Arguments
---------
n_neurons : int
It is the number of output neurons (i.e, the dimensionality of the
output).
input_shape: tuple
It is the shape of the input tensor.
input_size: int
Size of the input tensor.
bias : bool
If True, the additive bias b is adopted.
combine_dims : bool
If True and the input is 4D, combine 3rd and 4th dimensions of input.
max_norm : float
Kernel max-norm
Example
-------
>>> inputs = torch.rand(100,)
>>> max_norm = 1.
>>> lin_t_contrained = LinearWithConstraint(input_size=inputs.shape[0], n_neurons=2, max_norm=max_norm)
>>> output = lin_t_contrained(inputs)
>>> torch.any(torch.norm(lin_t_contrained.w.weight.data, p=2, dim=0)>max_norm)
tensor(False)
"""
def __init__(self, *args, max_norm=1, **kwargs):
self.max_norm = max_norm
super(LinearWithConstraint, self).__init__(*args, **kwargs)
def forward(self, x):
"""Returns the linear transformation of input tensor.
Arguments
---------
x : torch.Tensor
Input to transform linearly.
"""
self.w.weight.data = torch.renorm(
self.w.weight.data, p=2, dim=0, maxnorm=self.max_norm
)
return super(LinearWithConstraint, self).forward(x)
| 3,506 | 27.056 | 107 | py |
speechbrain | speechbrain-main/speechbrain/nnet/dropout.py | """Library implementing dropout.
Authors
* Mirco Ravanelli 2020
"""
import torch # noqa: F401
import logging
import torch.nn as nn
logger = logging.getLogger(__name__)
class Dropout2d(nn.Module):
"""This function implements dropout 2d. It randomly put zeros on
entire channels.
Arguments
---------
dropout_rate : float
It is the dropout factor (between 0 and 1).
inplace : bool
If True, it uses inplace operations.
Example
-------
>>> drop = Dropout2d(drop_rate=0.5)
>>> inputs = torch.rand(10, 50, 40)
>>> output=drop(inputs)
>>> output.shape
torch.Size([10, 50, 40])
"""
def __init__(
self, drop_rate, inplace=False,
):
super().__init__()
self.drop_rate = drop_rate
self.inplace = inplace
self.drop = nn.Dropout2d(p=self.drop_rate, inplace=self.inplace)
def forward(self, x):
"""Applies dropout 2d to the input tensor.
Arguments
---------
x : torch.Tensor (batch, time, channel1, channel2)
input to normalize. 4d tensors are expected.
"""
# time must be the last
x = x.transpose(1, 2).transpose(2, -1)
x_drop = self.drop(x)
x_drop = x_drop.transpose(-1, 1).transpose(2, -1)
return x_drop
| 1,320 | 22.175439 | 72 | py |
speechbrain | speechbrain-main/speechbrain/nnet/activations.py | """Library implementing activation functions.
Authors
* Mirco Ravanelli 2020
* Jianyuan Zhong 2020
"""
import torch
import logging
import torch.nn.functional as F
logger = logging.getLogger(__name__)
class Softmax(torch.nn.Module):
"""Computes the softmax of a 2d, 3d, or 4d input tensor.
Arguments
---------
apply_log : bool
Whether to apply the log function before softmax.
dim : int
If the dimension where softmax is applied.
Example
-------
>>> classifier = Softmax()
>>> inputs = torch.rand(10, 50, 40)
>>> output = classifier(inputs)
>>> output.shape
torch.Size([10, 50, 40])
"""
def __init__(self, apply_log=False, dim=-1):
super().__init__()
if apply_log:
self.act = torch.nn.LogSoftmax(dim=dim)
else:
self.act = torch.nn.Softmax(dim=dim)
def forward(self, x):
"""Returns the softmax of the input tensor.
Arguments
---------
x : torch.Tensor
Input tensor.
"""
# Reshaping the tensors
dims = x.shape
if len(dims) == 3:
x = x.reshape(dims[0] * dims[1], dims[2])
if len(dims) == 4:
x = x.reshape(dims[0] * dims[1], dims[2], dims[3])
x_act = self.act(x)
# Retrieving the original shape format
if len(dims) == 3:
x_act = x_act.reshape(dims[0], dims[1], dims[2])
if len(dims) == 4:
x_act = x_act.reshape(dims[0], dims[1], dims[2], dims[3])
return x_act
class GumbelSoftmax(torch.nn.Module):
"""Samples from the Gumbel-Softmax distribution and optionally discretizes.
Reference: https://arxiv.org/abs/1611.00712, https://arxiv.org/abs/1611.01144
Arguments
----------
tau: float
non-negative scalar temperature
hard: bool
if True, the returned samples will be discretized as one-hot vectors, but will be differentiated as if it is the soft sample in autograd
dim: int
A dimension along which softmax will be computed (default: -1).
Example
-------
>>> x = torch.randn((8, 40, 120))
>>> act = GumbelSoftmax(0.8, True)
>>> x = act(x)
"""
def __init__(self, tau, hard=False, apply_log=False):
super().__init__()
self.tau = tau
self.hard = hard
self.apply_log = apply_log
def forward(self, x):
"""Returns the Gumbel softmax of the input tensor.
Arguments
---------
x : torch.Tensor
Input tensor.
"""
if self.apply_log:
return torch.log(F.gumbel_softmax(x, tau=self.tau, hard=self.hard))
return F.gumbel_softmax(x, tau=self.tau, hard=self.hard)
class Swish(torch.nn.Module):
""" The class implements the Swish activation function from
https://arxiv.org/pdf/2005.03191.pdf
given input x. Swish(x) = x / (1 + exp(beta * x))
Arguments
---------
beta: float
Beta value.
Example
-------
>>> x = torch.randn((8, 40, 120))
>>> act = Swish()
>>> x = act(x)
"""
def __init__(self, beta=1):
super().__init__()
self.beta = beta
self.sigmoid = torch.nn.Sigmoid()
def forward(self, x):
"""Returns the Swished input tensor.
Arguments
---------
x : torch.Tensor
Input tensor.
"""
return x * self.sigmoid(self.beta * x)
| 3,474 | 23.300699 | 144 | py |
speechbrain | speechbrain-main/speechbrain/nnet/normalization.py | """Library implementing normalization.
Authors
* Mirco Ravanelli 2020
* Guillermo Cámbara 2021
* Sarthak Yadav 2022
"""
import torch
import torch.nn as nn
class BatchNorm1d(nn.Module):
"""Applies 1d batch normalization to the input tensor.
Arguments
---------
input_shape : tuple
The expected shape of the input. Alternatively, use ``input_size``.
input_size : int
The expected size of the input. Alternatively, use ``input_shape``.
eps : float
This value is added to std deviation estimation to improve the numerical
stability.
momentum : float
It is a value used for the running_mean and running_var computation.
affine : bool
When set to True, the affine parameters are learned.
track_running_stats : bool
When set to True, this module tracks the running mean and variance,
and when set to False, this module does not track such statistics.
combine_batch_time : bool
When true, it combines batch an time axis.
Example
-------
>>> input = torch.randn(100, 10)
>>> norm = BatchNorm1d(input_shape=input.shape)
>>> output = norm(input)
>>> output.shape
torch.Size([100, 10])
"""
def __init__(
self,
input_shape=None,
input_size=None,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
combine_batch_time=False,
skip_transpose=False,
):
super().__init__()
self.combine_batch_time = combine_batch_time
self.skip_transpose = skip_transpose
if input_size is None and skip_transpose:
input_size = input_shape[1]
elif input_size is None:
input_size = input_shape[-1]
self.norm = nn.BatchNorm1d(
input_size,
eps=eps,
momentum=momentum,
affine=affine,
track_running_stats=track_running_stats,
)
def forward(self, x):
"""Returns the normalized input tensor.
Arguments
---------
x : torch.Tensor (batch, time, [channels])
input to normalize. 2d or 3d tensors are expected in input
4d tensors can be used when combine_dims=True.
"""
shape_or = x.shape
if self.combine_batch_time:
if x.ndim == 3:
x = x.reshape(shape_or[0] * shape_or[1], shape_or[2])
else:
x = x.reshape(
shape_or[0] * shape_or[1], shape_or[3], shape_or[2]
)
elif not self.skip_transpose:
x = x.transpose(-1, 1)
x_n = self.norm(x)
if self.combine_batch_time:
x_n = x_n.reshape(shape_or)
elif not self.skip_transpose:
x_n = x_n.transpose(1, -1)
return x_n
class BatchNorm2d(nn.Module):
"""Applies 2d batch normalization to the input tensor.
Arguments
---------
input_shape : tuple
The expected shape of the input. Alternatively, use ``input_size``.
input_size : int
The expected size of the input. Alternatively, use ``input_shape``.
eps : float
This value is added to std deviation estimation to improve the numerical
stability.
momentum : float
It is a value used for the running_mean and running_var computation.
affine : bool
When set to True, the affine parameters are learned.
track_running_stats : bool
When set to True, this module tracks the running mean and variance,
and when set to False, this module does not track such statistics.
Example
-------
>>> input = torch.randn(100, 10, 5, 20)
>>> norm = BatchNorm2d(input_shape=input.shape)
>>> output = norm(input)
>>> output.shape
torch.Size([100, 10, 5, 20])
"""
def __init__(
self,
input_shape=None,
input_size=None,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
):
super().__init__()
if input_shape is None and input_size is None:
raise ValueError("Expected input_shape or input_size as input")
if input_size is None:
input_size = input_shape[-1]
self.norm = nn.BatchNorm2d(
input_size,
eps=eps,
momentum=momentum,
affine=affine,
track_running_stats=track_running_stats,
)
def forward(self, x):
"""Returns the normalized input tensor.
Arguments
---------
x : torch.Tensor (batch, time, channel1, channel2)
input to normalize. 4d tensors are expected.
"""
x = x.transpose(-1, 1)
x_n = self.norm(x)
x_n = x_n.transpose(1, -1)
return x_n
class LayerNorm(nn.Module):
"""Applies layer normalization to the input tensor.
Arguments
---------
input_shape : tuple
The expected shape of the input.
eps : float
This value is added to std deviation estimation to improve the numerical
stability.
elementwise_affine : bool
If True, this module has learnable per-element affine parameters
initialized to ones (for weights) and zeros (for biases).
Example
-------
>>> input = torch.randn(100, 101, 128)
>>> norm = LayerNorm(input_shape=input.shape)
>>> output = norm(input)
>>> output.shape
torch.Size([100, 101, 128])
"""
def __init__(
self,
input_size=None,
input_shape=None,
eps=1e-05,
elementwise_affine=True,
):
super().__init__()
self.eps = eps
self.elementwise_affine = elementwise_affine
if input_shape is not None:
input_size = input_shape[2:]
self.norm = torch.nn.LayerNorm(
input_size,
eps=self.eps,
elementwise_affine=self.elementwise_affine,
)
def forward(self, x):
"""Returns the normalized input tensor.
Arguments
---------
x : torch.Tensor (batch, time, channels)
input to normalize. 3d or 4d tensors are expected.
"""
return self.norm(x)
class InstanceNorm1d(nn.Module):
"""Applies 1d instance normalization to the input tensor.
Arguments
---------
input_shape : tuple
The expected shape of the input. Alternatively, use ``input_size``.
input_size : int
The expected size of the input. Alternatively, use ``input_shape``.
eps : float
This value is added to std deviation estimation to improve the numerical
stability.
momentum : float
It is a value used for the running_mean and running_var computation.
track_running_stats : bool
When set to True, this module tracks the running mean and variance,
and when set to False, this module does not track such statistics.
affine : bool
A boolean value that when set to True, this module has learnable
affine parameters, initialized the same way as done for
batch normalization. Default: False.
Example
-------
>>> input = torch.randn(100, 10, 20)
>>> norm = InstanceNorm1d(input_shape=input.shape)
>>> output = norm(input)
>>> output.shape
torch.Size([100, 10, 20])
"""
def __init__(
self,
input_shape=None,
input_size=None,
eps=1e-05,
momentum=0.1,
track_running_stats=True,
affine=False,
):
super().__init__()
if input_shape is None and input_size is None:
raise ValueError("Expected input_shape or input_size as input")
if input_size is None:
input_size = input_shape[-1]
self.norm = nn.InstanceNorm1d(
input_size,
eps=eps,
momentum=momentum,
track_running_stats=track_running_stats,
affine=affine,
)
def forward(self, x):
"""Returns the normalized input tensor.
Arguments
---------
x : torch.Tensor (batch, time, channels)
input to normalize. 3d tensors are expected.
"""
x = x.transpose(-1, 1)
x_n = self.norm(x)
x_n = x_n.transpose(1, -1)
return x_n
class InstanceNorm2d(nn.Module):
"""Applies 2d instance normalization to the input tensor.
Arguments
---------
input_shape : tuple
The expected shape of the input. Alternatively, use ``input_size``.
input_size : int
The expected size of the input. Alternatively, use ``input_shape``.
eps : float
This value is added to std deviation estimation to improve the numerical
stability.
momentum : float
It is a value used for the running_mean and running_var computation.
track_running_stats : bool
When set to True, this module tracks the running mean and variance,
and when set to False, this module does not track such statistics.
affine : bool
A boolean value that when set to True, this module has learnable
affine parameters, initialized the same way as done for
batch normalization. Default: False.
Example
-------
>>> input = torch.randn(100, 10, 20, 2)
>>> norm = InstanceNorm2d(input_shape=input.shape)
>>> output = norm(input)
>>> output.shape
torch.Size([100, 10, 20, 2])
"""
def __init__(
self,
input_shape=None,
input_size=None,
eps=1e-05,
momentum=0.1,
track_running_stats=True,
affine=False,
):
super().__init__()
if input_shape is None and input_size is None:
raise ValueError("Expected input_shape or input_size as input")
if input_size is None:
input_size = input_shape[-1]
self.norm = nn.InstanceNorm2d(
input_size,
eps=eps,
momentum=momentum,
track_running_stats=track_running_stats,
affine=affine,
)
def forward(self, x):
"""Returns the normalized input tensor.
Arguments
---------
x : torch.Tensor (batch, time, channel1, channel2)
input to normalize. 4d tensors are expected.
"""
x = x.transpose(-1, 1)
x_n = self.norm(x)
x_n = x_n.transpose(1, -1)
return x_n
class GroupNorm(nn.Module):
"""Applies group normalization to the input tensor.
Arguments
---------
input_shape : tuple
The expected shape of the input. Alternatively, use ``input_size``.
input_size : int
The expected size of the input. Alternatively, use ``input_shape``.
num_groups : int
Number of groups to separate the channels into.
eps : float
This value is added to std deviation estimation to improve the numerical
stability.
affine : bool
A boolean value that when set to True, this module has learnable per-channel
affine parameters initialized to ones (for weights) and zeros (for biases).
Example
-------
>>> input = torch.randn(100, 101, 128)
>>> norm = GroupNorm(input_size=128, num_groups=128)
>>> output = norm(input)
>>> output.shape
torch.Size([100, 101, 128])
"""
def __init__(
self,
input_shape=None,
input_size=None,
num_groups=None,
eps=1e-05,
affine=True,
):
super().__init__()
self.eps = eps
self.affine = affine
if input_shape is None and input_size is None:
raise ValueError("Expected input_shape or input_size as input")
if num_groups is None:
raise ValueError("Expected num_groups as input")
if input_shape is not None:
input_size = input_shape[-1]
self.norm = torch.nn.GroupNorm(
num_groups, input_size, eps=self.eps, affine=self.affine,
)
def forward(self, x):
"""Returns the normalized input tensor.
Arguments
---------
x : torch.Tensor (batch, time, channels)
input to normalize. 3d or 4d tensors are expected.
"""
x = x.transpose(-1, 1)
x_n = self.norm(x)
x_n = x_n.transpose(1, -1)
return x_n
class ExponentialMovingAverage(nn.Module):
"""
Applies learnable exponential moving average, as required by learnable PCEN layer
Arguments
---------
input_size : int
The expected size of the input.
coeff_init: float
Initial smoothing coefficient value
per_channel: bool
Controls whether every smoothing coefficients are learned
independently for every input channel
trainable: bool
whether to learn the PCEN parameters or use fixed
skip_transpose : bool
If False, uses batch x time x channel convention of speechbrain.
If True, uses batch x channel x time convention.
Example
-------
>>> inp_tensor = torch.rand([10, 50, 40])
>>> pcen = ExponentialMovingAverage(40)
>>> out_tensor = pcen(inp_tensor)
>>> out_tensor.shape
torch.Size([10, 50, 40])
"""
def __init__(
self,
input_size: int,
coeff_init: float = 0.04,
per_channel: bool = False,
trainable: bool = True,
skip_transpose: bool = False,
):
super(ExponentialMovingAverage, self).__init__()
self._coeff_init = coeff_init
self._per_channel = per_channel
self.skip_transpose = skip_transpose
self.trainable = trainable
weights = (
torch.ones(input_size,) if self._per_channel else torch.ones(1,)
)
self._weights = nn.Parameter(
weights * self._coeff_init, requires_grad=trainable
)
def forward(self, x):
"""Returns the normalized input tensor.
Arguments
---------
x : torch.Tensor (batch, time, channels)
input to normalize.
"""
if not self.skip_transpose:
x = x.transpose(1, -1)
w = torch.clamp(self._weights, min=0.0, max=1.0)
initial_state = x[:, :, 0]
def scan(init_state, x, w):
"""Loops and accumulates."""
x = x.permute(2, 0, 1)
acc = init_state
results = []
for ix in range(x.shape[0]):
acc = (w * x[ix]) + ((1.0 - w) * acc)
results.append(acc.unsqueeze(0))
results = torch.cat(results, dim=0)
results = results.permute(1, 2, 0)
return results
output = scan(initial_state, x, w)
if not self.skip_transpose:
output = output.transpose(1, -1)
return output
class PCEN(nn.Module):
"""
This class implements a learnable Per-channel energy normalization (PCEN) layer, supporting both
original PCEN as specified in [1] as well as sPCEN as specified in [2]
[1] Yuxuan Wang, Pascal Getreuer, Thad Hughes, Richard F. Lyon, Rif A. Saurous, "Trainable Frontend For
Robust and Far-Field Keyword Spotting", in Proc of ICASSP 2017 (https://arxiv.org/abs/1607.05666)
[2] Neil Zeghidour, Olivier Teboul, F{\'e}lix de Chaumont Quitry & Marco Tagliasacchi, "LEAF: A LEARNABLE FRONTEND
FOR AUDIO CLASSIFICATION", in Proc of ICLR 2021 (https://arxiv.org/abs/2101.08596)
The default argument values correspond with those used by [2].
Arguments
---------
input_size : int
The expected size of the input.
alpha: float
specifies alpha coefficient for PCEN
smooth_coef: float
specified smooth coefficient for PCEN
delta: float
specifies delta coefficient for PCEN
root: float
specifies root coefficient for PCEN
floor: float
specifies floor coefficient for PCEN
trainable: bool
whether to learn the PCEN parameters or use fixed
per_channel_smooth_coef: bool
whether to learn independent smooth coefficients for every channel.
when True, essentially using sPCEN from [2]
skip_transpose : bool
If False, uses batch x time x channel convention of speechbrain.
If True, uses batch x channel x time convention.
Example
-------
>>> inp_tensor = torch.rand([10, 50, 40])
>>> pcen = PCEN(40, alpha=0.96) # sPCEN
>>> out_tensor = pcen(inp_tensor)
>>> out_tensor.shape
torch.Size([10, 50, 40])
"""
def __init__(
self,
input_size,
alpha: float = 0.96,
smooth_coef: float = 0.04,
delta: float = 2.0,
root: float = 2.0,
floor: float = 1e-12,
trainable: bool = True,
per_channel_smooth_coef: bool = True,
skip_transpose: bool = False,
):
super(PCEN, self).__init__()
self._smooth_coef = smooth_coef
self._floor = floor
self._per_channel_smooth_coef = per_channel_smooth_coef
self.skip_transpose = skip_transpose
self.alpha = nn.Parameter(
torch.ones(input_size) * alpha, requires_grad=trainable
)
self.delta = nn.Parameter(
torch.ones(input_size) * delta, requires_grad=trainable
)
self.root = nn.Parameter(
torch.ones(input_size) * root, requires_grad=trainable
)
self.ema = ExponentialMovingAverage(
input_size,
coeff_init=self._smooth_coef,
per_channel=self._per_channel_smooth_coef,
skip_transpose=True,
trainable=trainable,
)
def forward(self, x):
"""Returns the normalized input tensor.
Arguments
---------
x : torch.Tensor (batch, time, channels)
input to normalize.
"""
if not self.skip_transpose:
x = x.transpose(1, -1)
alpha = torch.min(
self.alpha, torch.tensor(1.0, dtype=x.dtype, device=x.device)
)
root = torch.max(
self.root, torch.tensor(1.0, dtype=x.dtype, device=x.device)
)
ema_smoother = self.ema(x)
one_over_root = 1.0 / root
output = (
x / (self._floor + ema_smoother) ** alpha.view(1, -1, 1)
+ self.delta.view(1, -1, 1)
) ** one_over_root.view(1, -1, 1) - self.delta.view(
1, -1, 1
) ** one_over_root.view(
1, -1, 1
)
if not self.skip_transpose:
output = output.transpose(1, -1)
return output
| 18,623 | 28.942122 | 118 | py |
speechbrain | speechbrain-main/speechbrain/nnet/attention.py | """Library implementing attention modules.
Authors
* Ju-Chieh Chou 2020
* Jianyuan Zhong 2020
* Loren Lugosch 2020
* Samuele Cornell 2020
"""
import torch
import logging
import torch.nn as nn
import numpy as np
from typing import Optional
from speechbrain.dataio.dataio import length_to_mask
import torch.nn.functional as F
import math
logger = logging.getLogger(__name__)
class ContentBasedAttention(nn.Module):
""" This class implements content-based attention module for seq2seq
learning.
Reference: NEURAL MACHINE TRANSLATION BY JOINTLY LEARNING TO ALIGN
AND TRANSLATE, Bahdanau et.al. https://arxiv.org/pdf/1409.0473.pdf
Arguments
---------
attn_dim : int
Size of the attention feature.
output_dim : int
Size of the output context vector.
scaling : float
The factor controls the sharpening degree (default: 1.0).
Example
-------
>>> enc_tensor = torch.rand([4, 10, 20])
>>> enc_len = torch.ones([4]) * 10
>>> dec_tensor = torch.rand([4, 25])
>>> net = ContentBasedAttention(enc_dim=20, dec_dim=25, attn_dim=30, output_dim=5)
>>> out_tensor, out_weight = net(enc_tensor, enc_len, dec_tensor)
>>> out_tensor.shape
torch.Size([4, 5])
"""
def __init__(self, enc_dim, dec_dim, attn_dim, output_dim, scaling=1.0):
super(ContentBasedAttention, self).__init__()
self.mlp_enc = nn.Linear(enc_dim, attn_dim)
self.mlp_dec = nn.Linear(dec_dim, attn_dim)
self.mlp_attn = nn.Linear(attn_dim, 1, bias=False)
self.mlp_out = nn.Linear(enc_dim, output_dim)
self.scaling = scaling
self.softmax = nn.Softmax(dim=-1)
# reset the encoder states, lengths and masks
self.reset()
def reset(self):
"""Reset the memory in the attention module.
"""
self.enc_len = None
self.precomputed_enc_h = None
self.mask = None
def forward(self, enc_states, enc_len, dec_states):
"""Returns the output of the attention module.
Arguments
---------
enc_states : torch.Tensor
The tensor to be attended.
enc_len : torch.Tensor
The real length (without padding) of enc_states for each sentence.
dec_states : torch.Tensor
The query tensor.
"""
if self.precomputed_enc_h is None:
self.precomputed_enc_h = self.mlp_enc(enc_states)
self.mask = length_to_mask(
enc_len, max_len=enc_states.size(1), device=enc_states.device
)
dec_h = self.mlp_dec(dec_states.unsqueeze(1))
attn = self.mlp_attn(
torch.tanh(self.precomputed_enc_h + dec_h)
).squeeze(-1)
# mask the padded frames
attn = attn.masked_fill(self.mask == 0, -np.inf)
attn = self.softmax(attn * self.scaling)
# compute context vectors
# [B, 1, L] X [B, L, F]
context = torch.bmm(attn.unsqueeze(1), enc_states).squeeze(1)
context = self.mlp_out(context)
return context, attn
class LocationAwareAttention(nn.Module):
"""This class implements location-aware attention module for seq2seq learning.
Reference: Attention-Based Models for Speech Recognition, Chorowski et.al.
https://arxiv.org/pdf/1506.07503.pdf
Arguments
---------
attn_dim : int
Size of the attention feature.
output_dim : int
Size of the output context vector.
conv_channels : int
Number of channel for location feature.
kernel_size : int
Kernel size of convolutional layer for location feature.
scaling : float
The factor controls the sharpening degree (default: 1.0).
Example
-------
>>> enc_tensor = torch.rand([4, 10, 20])
>>> enc_len = torch.ones([4]) * 10
>>> dec_tensor = torch.rand([4, 25])
>>> net = LocationAwareAttention(
... enc_dim=20,
... dec_dim=25,
... attn_dim=30,
... output_dim=5,
... conv_channels=10,
... kernel_size=100)
>>> out_tensor, out_weight = net(enc_tensor, enc_len, dec_tensor)
>>> out_tensor.shape
torch.Size([4, 5])
"""
precomputed_enc_h: Optional[torch.Tensor]
def __init__(
self,
enc_dim,
dec_dim,
attn_dim,
output_dim,
conv_channels,
kernel_size,
scaling=1.0,
):
super(LocationAwareAttention, self).__init__()
self.mlp_enc = nn.Linear(enc_dim, attn_dim)
self.mlp_dec = nn.Linear(dec_dim, attn_dim)
self.mlp_attn = nn.Linear(attn_dim, 1, bias=False)
self.conv_loc = nn.Conv1d(
1,
conv_channels,
kernel_size=2 * kernel_size + 1,
padding=kernel_size,
bias=False,
)
self.mlp_loc = nn.Linear(conv_channels, attn_dim)
self.mlp_attn = nn.Linear(attn_dim, 1, bias=False)
self.mlp_out = nn.Linear(enc_dim, output_dim)
self.scaling = scaling
self.softmax = nn.Softmax(dim=-1)
# reset the encoder states, lengths and masks
self.reset()
def reset(self):
"""Reset the memory in attention module.
"""
self.enc_len = None
self.precomputed_enc_h = None
self.mask = None
self.prev_attn = None
def forward(self, enc_states, enc_len, dec_states):
"""Returns the output of the attention module.
Arguments
---------
enc_states : torch.Tensor
The tensor to be attended.
enc_len : torch.Tensor
The real length (without padding) of enc_states for each sentence.
dec_states : torch.Tensor
The query tensor.
"""
if self.precomputed_enc_h is None:
self.precomputed_enc_h = self.mlp_enc(enc_states)
self.mask = length_to_mask(
enc_len, max_len=enc_states.size(1), device=enc_states.device
)
# multiply mask by 1/Ln for each row
self.prev_attn = self.mask * (1 / enc_len.float()).unsqueeze(1)
# compute location-aware features
# [B, 1, L] -> [B, C, L]
attn_conv = self.conv_loc(self.prev_attn.unsqueeze(1))
# [B, C, L] -> [B, L, C] -> [B, L, F]
attn_conv = self.mlp_loc(attn_conv.transpose(1, 2))
dec_h = self.mlp_dec(dec_states.unsqueeze(1))
attn = self.mlp_attn(
torch.tanh(self.precomputed_enc_h + dec_h + attn_conv)
).squeeze(-1)
# mask the padded frames
attn = attn.masked_fill(self.mask == 0, -np.inf)
attn = self.softmax(attn * self.scaling)
# set prev_attn to current attn for the next timestep
self.prev_attn = attn.detach()
# compute context vectors
# [B, 1, L] X [B, L, F]
context = torch.bmm(attn.unsqueeze(1), enc_states).squeeze(1)
context = self.mlp_out(context)
return context, attn
class KeyValueAttention(nn.Module):
""" This class implements a single-headed key-value attention module for seq2seq
learning.
Reference: "Attention Is All You Need" by Vaswani et al., sec. 3.2.1
Arguments
---------
enc_dim : int
Size of the encoder feature vectors from which keys and values are computed.
dec_dim : int
Size of the decoder feature vectors from which queries are computed.
attn_dim : int
Size of the attention feature.
output_dim : int
Size of the output context vector.
Example
-------
>>> enc_tensor = torch.rand([4, 10, 20])
>>> enc_len = torch.ones([4]) * 10
>>> dec_tensor = torch.rand([4, 25])
>>> net = KeyValueAttention(enc_dim=20, dec_dim=25, attn_dim=30, output_dim=5)
>>> out_tensor, out_weight = net(enc_tensor, enc_len, dec_tensor)
>>> out_tensor.shape
torch.Size([4, 5])
"""
def __init__(self, enc_dim, dec_dim, attn_dim, output_dim):
super(KeyValueAttention, self).__init__()
self.key_linear = nn.Linear(enc_dim, attn_dim)
self.query_linear = nn.Linear(dec_dim, attn_dim)
self.value_linear = nn.Linear(enc_dim, output_dim)
self.scaling = torch.sqrt(torch.tensor(attn_dim).float())
# reset the encoder states, lengths and masks
self.reset()
def reset(self):
"""Reset the memory in the attention module.
"""
self.values = None
self.keys = None
self.mask = None
def forward(self, enc_states, enc_len, dec_states):
"""Returns the output of the attention module.
Arguments
---------
enc_states : torch.Tensor
The tensor to be attended.
enc_len : torch.Tensor
The real length (without padding) of enc_states for each sentence.
dec_states : torch.Tensor
The query tensor.
"""
if self.keys is None:
self.keys = self.key_linear(enc_states)
self.values = self.value_linear(enc_states)
self.mask = length_to_mask(
enc_len, max_len=enc_states.size(1), device=enc_states.device
).unsqueeze(2)
query = self.query_linear(dec_states).unsqueeze(2)
scores = torch.matmul(self.keys, query) / self.scaling
scores = scores.masked_fill(self.mask == 0, -np.inf)
normalized_scores = scores.softmax(1).transpose(1, 2)
out = torch.matmul(normalized_scores, self.values).squeeze(1)
return out, normalized_scores
class RelPosEncXL(nn.Module):
"""
"""
def __init__(self, emb_dim):
super().__init__()
self.emb_dim = emb_dim
inv_freq = torch.exp(
torch.arange(0, self.emb_dim, 2, dtype=torch.float32)
* -(math.log(10000.0) / self.emb_dim)
)
self.register_buffer("inv_freq", inv_freq)
def forward(self, x: torch.Tensor):
"""
Parameters
----------
x : torch.Tensor
input tensor with shape batch_size, seq_len, embed_dim
Returns
-------
pos_emb : torch.Tensor
"""
seq_len = x.size(1)
with torch.no_grad():
tot_pe = torch.zeros((2, seq_len, self.emb_dim), dtype=x.dtype).to(
x
)
pe_past = tot_pe[0]
pe_future = tot_pe[1]
positions = (
torch.arange(0, seq_len, dtype=x.dtype, device=x.device)
.to(x)
.unsqueeze(-1)
)
sinusoids = torch.sin(positions * self.inv_freq)
pe_past[:, 0::2] = sinusoids
pe_past[:, 1::2] = torch.cos(positions * self.inv_freq)
pe_future[:, 0::2] = sinusoids # same for past and future
pe_future[:, 1::2] = torch.cos(-positions * self.inv_freq)
pe_past = torch.flip(pe_past, (0,)).unsqueeze(0)
pe_future = pe_future[1:].unsqueeze(0)
pe = torch.cat([pe_past, pe_future], dim=1)
# pe is now 1, 2*seq_len, embed_dim
return pe
class RelPosMHAXL(nn.Module):
""" This class implements the relative multihead implementation similar to that in Transformer XL
https://arxiv.org/pdf/1901.02860.pdf
Arguments
---------
embed_dim : int
Size of the encoder feature vectors from which keys and values are computed.
num_heads: int
Number of attention heads.
dropout : float, optional
Dropout rate.
vbias: bool, optional
Whether to use bias for computing value.
vdim: int, optional
Size for value. Default is embed_dim (Note each head is embed_dim // num_heads).
mask_pos_future: bool, optional
Whether to mask future positional encodings values.
Must be true for causal applications e.g. decoder.
Example
-------
>>> inputs = torch.rand([6, 60, 512])
>>> pos_emb = torch.rand([1, 2*60-1, 512])
>>> net = RelPosMHAXL(num_heads=8, embed_dim=inputs.shape[-1])
>>> outputs, attn = net(inputs, inputs, inputs, pos_emb)
>>> outputs.shape
torch.Size([6, 60, 512])
"""
def __init__(
self,
embed_dim,
num_heads,
dropout=0.0,
vbias=False,
vdim=None,
mask_pos_future=False,
):
super(RelPosMHAXL, self).__init__()
self.embed_dim = embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_embed_dim = self.vdim == embed_dim
self.mask_pos_future = mask_pos_future
self.vbias = vbias
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
self.vhead_dim = self.vdim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
assert (
self.vhead_dim * num_heads == self.vdim
), "vdim must be divisible by num_heads"
if self._qkv_same_embed_dim is False:
self.qk_proj_weight = nn.Parameter(
torch.empty(2 * embed_dim, embed_dim)
)
self.v_proj_weight = nn.Parameter(torch.empty(self.vdim, embed_dim))
else:
self.in_proj_weight = nn.Parameter(
torch.empty(3 * embed_dim, embed_dim)
)
if vbias:
self.value_bias_weight = nn.Parameter(torch.empty(self.vdim))
else:
self.vbias = None
self.dropout_att = nn.Dropout(dropout)
self.out_proj = nn.Linear(self.vdim, embed_dim)
self.linear_pos = nn.Linear(embed_dim, embed_dim, bias=False)
self.pos_bias_u = nn.Parameter(
torch.empty(self.head_dim, self.num_heads)
)
self.pos_bias_v = nn.Parameter(
torch.empty(self.head_dim, self.num_heads)
)
if next(self.parameters()).dtype == torch.float16:
self.attn_fill_value = -65000
else:
self.attn_fill_value = -float("inf")
self._reset_parameters()
self.scale = 1 / math.sqrt(self.embed_dim)
def _reset_parameters(self):
if self._qkv_same_embed_dim:
torch.nn.init.xavier_uniform_(self.in_proj_weight)
else:
torch.nn.init.xavier_uniform_(self.qk_proj_weight)
torch.nn.init.xavier_uniform_(self.v_proj_weight)
if self.vbias is not None:
torch.nn.init.constant_(self.value_bias_weight, 0.0)
# positional biases
torch.nn.init.xavier_uniform_(self.pos_bias_u)
torch.nn.init.xavier_uniform_(self.pos_bias_v)
def rel_shift(self, x):
"""Relative shift implementation."""
# batch, head, time1, 2*time1-1.
b, h, qlen, pos_len = x.size() # (b, h, t1, t2)
# need to add a column of zeros on the left side of last dimension to perform the relative shifting
x = torch.nn.functional.pad(x, pad=(1, 0)) # (b, h, t1, t2+1)
x = x.view(b, h, -1, qlen) # (b, h, t2+1, t1)
# need to drop the first row
x = x[:, :, 1:].view(b, h, qlen, pos_len) # (b, h, t1, t2)
if self.mask_pos_future:
ones = torch.ones((x.size(2), x.size(3)), device=x.device)
x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]
return x[..., : pos_len // 2 + 1]
def forward(
self,
query,
key,
value,
pos_embs,
key_padding_mask=None,
attn_mask=None,
return_attn_weights=True,
):
"""
Arguments
----------
query : tensor
(B, L, E) where L is the target sequence length,
B is the batch size, E is the embedding dimension.
key : tensor
(B, S, E) where S is the source sequence length,
B is the batch size, E is the embedding dimension.
value : tensor
(B, S, E) where S is the source sequence length,
B is the batch size, E is the embedding dimension.
pos_emb : tensor
bidirectional sinusoidal positional embedding tensor (1, 2*S-1, E) where S is the max length between source and target sequence lengths,
and E is the embedding dimension.
key_padding_mask : tensor
(B, S) where B is the batch size, S is the source sequence
length. If a ByteTensor is provided, the non-zero positions will
be ignored while the position with the zero positions will be
unchanged. If a BoolTensor is provided, the positions with the
value of True will be ignored while the position with the value
of False will be unchanged.
attn_mask : tensor
2D mask (L, S) where L is the target sequence length, S is
the source sequence length.
3D mask (N*num_heads, L, S) where N is the batch
size, L is the target sequence length, S is the source sequence
length. attn_mask ensure that position i is allowed to attend the
unmasked positions. If a ByteTensor is provided, the non-zero
positions are not allowed to attend while the zero positions will
be unchanged. If a BoolTensor is provided, positions with True is
not allowed to attend while False values will be unchanged. If a
FloatTensor is provided, it will be added to the attention weight.
Outputs
-------
out : tensor
(B, L, E) where L is the target sequence length, B is the
batch size, E is the embedding dimension.
attn_score : tensor
(B, L, S) where B is the batch size, L is the target
sequence length, S is the source sequence length.
"""
# query, key and value are of shape batch, time, embed_dim
bsz = query.shape[0]
klen = key.shape[1]
qlen = query.shape[1]
if self._qkv_same_embed_dim:
# self-attention
if (query is key or torch.equal(query, key)) and (
key is value or torch.equal(key, value)
):
query, key, value = (
nn.functional.linear(query, self.in_proj_weight)
.view(bsz, -1, self.num_heads, self.head_dim * 3)
.chunk(3, dim=-1)
)
else:
qweight, kweight, vweight = self.in_proj_weight.chunk(3, dim=0)
query = nn.functional.linear(query, qweight).view(
bsz, -1, self.num_heads, self.head_dim
)
key = nn.functional.linear(key, kweight).view(
bsz, -1, self.num_heads, self.head_dim
)
value = nn.functional.linear(value, vweight).view(
bsz, -1, self.num_heads, self.head_dim
)
else:
raise NotImplementedError
query, key = (
nn.functional.linear(query, self.qk_proj_weight)
.view(bsz, -1, self.num_heads, self.head_dim * 2)
.chunk(2, dim=-1)
)
value = nn.functional.linear(value, self.v_proj_weight).view(
bsz, -1, self.num_heads, self.vhead_dim
)
if self.vbias is not None:
value = value + self.value_bias_weight.view(
1, 1, self.num_heads, self.vhead_dim
)
p_k = self.linear_pos(pos_embs).view(
1, -1, self.num_heads, self.head_dim
)
# (batch, head, klen, d_k)
q_with_bias_u = (
query + self.pos_bias_u.view(1, 1, self.num_heads, self.head_dim)
).transpose(1, 2)
# (batch, head, qlen, d_k)
q_with_bias_v = (
query + self.pos_bias_v.view(1, 1, self.num_heads, self.head_dim)
).transpose(1, 2)
# (batch, head, qlen, klen)
matrix_ac = torch.matmul(q_with_bias_u, key.permute(0, 2, 3, 1))
# (batch, num_heads, klen, 2*klen-1)
matrix_bd = torch.matmul(q_with_bias_v, p_k.permute(0, 2, 3, 1))
matrix_bd = self.rel_shift(matrix_bd) # shifting trick
# if klen != qlen:
# import ipdb
# ipdb.set_trace(
attn_score = (matrix_ac + matrix_bd) * self.scale
# compute attention probability
if attn_mask is not None:
if attn_mask.ndim == 2:
attn_mask = attn_mask.view(1, 1, qlen, klen)
else:
attn_mask = attn_mask.view(-1, self.num_heads, qlen, klen)
if attn_mask.dtype == torch.bool:
attn_score = attn_score.masked_fill(
attn_mask, self.attn_fill_value
)
else:
attn_score += attn_mask
if key_padding_mask is not None:
attn_score = attn_score.masked_fill(
key_padding_mask.view(bsz, 1, 1, klen), self.attn_fill_value,
)
attn_score = F.softmax(attn_score, dim=-1)
attn_score = self.dropout_att(attn_score)
x = torch.matmul(
attn_score, value.transpose(1, 2)
) # (batch, head, time1, d_k)
x = (
x.transpose(1, 2)
.contiguous()
.view(bsz, -1, self.vhead_dim * self.num_heads)
) # (batch, time1, d_model)
out = self.out_proj(x)
if return_attn_weights:
return out, attn_score
return out
class MultiheadAttention(nn.Module):
""" The class is a wrapper of MultiHead Attention for torch.nn.MultiHeadAttention.
Reference: https://pytorch.org/docs/stable/nn.html
Arguments
----------
num_heads : int
parallel attention heads.
dropout : float
a Dropout layer on attn_output_weights (default: 0.0).
bias : bool
add bias as module parameter (default: True).
add_bias_kv : bool
add bias to the key and value sequences at dim=0.
add_zero_attn : bool
add a new batch of zeros to the key and value sequences at dim=1.
kdim : int
total number of features in key (default: None).
vdim : int
total number of features in value (default: None).
Example
-------
>>> inputs = torch.rand([8, 60, 512])
>>> net = MultiheadAttention(nhead=8, d_model=inputs.shape[-1])
>>> outputs, attn = net(inputs, inputs, inputs)
>>> outputs.shape
torch.Size([8, 60, 512])
"""
def __init__(
self,
nhead,
d_model,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
kdim=None,
vdim=None,
):
super().__init__()
self.att = nn.MultiheadAttention(
embed_dim=d_model,
num_heads=nhead,
dropout=dropout,
bias=bias,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
kdim=kdim,
vdim=vdim,
)
def forward(
self,
query,
key,
value,
attn_mask: Optional[torch.Tensor] = None,
key_padding_mask: Optional[torch.Tensor] = None,
return_attn_weights: Optional[torch.Tensor] = True,
pos_embs: Optional[torch.Tensor] = None,
):
"""
Arguments
----------
query : torch.Tensor
(B, L, E) where L is the target sequence length,
B is the batch size, E is the embedding dimension.
key : torch.Tensor
(B, S, E) where S is the source sequence length,
B is the batch size, E is the embedding dimension.
value : torch.Tensor
(B, S, E) where S is the source sequence length,
B is the batch size, E is the embedding dimension.
key_padding_mask : torch.Tensor, optional
(B, S) where B is the batch size, S is the source sequence
length. If a ByteTensor is provided, the non-zero positions will
be ignored while the position with the zero positions will be
unchanged. If a BoolTensor is provided, the positions with the
value of True will be ignored while the position with the value
of False will be unchanged.
attn_mask : torch.Tensor, optional
2D mask (L, S) where L is the target sequence length, S is
the source sequence length.
3D mask (N*num_heads, L, S) where N is the batch
size, L is the target sequence length, S is the source sequence
length. attn_mask ensure that position i is allowed to attend the
unmasked positions. If a ByteTensor is provided, the non-zero
positions are not allowed to attend while the zero positions will
be unchanged. If a BoolTensor is provided, positions with True is
not allowed to attend while False values will be unchanged. If a
FloatTensor is provided, it will be added to the attention weight.
pos_embs: torch.Tensor, optional
Positional embeddings added to the attention map of shape (L, S, E) or (L, S, 1).
Outputs
-------
attn_output : torch.Tensor
(B, L, E) where L is the target sequence length, B is the
batch size, E is the embedding dimension.
attn_output_weights : torch.Tensor
(B, L, S) where B is the batch size, L is the target
sequence length, S is the source sequence length.
"""
# give tensors of shape (time, batch, fea)
query = query.permute(1, 0, 2)
key = key.permute(1, 0, 2)
value = value.permute(1, 0, 2)
# this will be legit because of https://github.com/pytorch/pytorch/blob/5288d05cfdda85c46c4df84617fa7f37c21b10b3/torch/nn/functional.py#L4946
# we can inject relative learnable pos embeddings directly in MHA via the attn_mask
if pos_embs is not None:
if attn_mask is not None:
attn_mask += pos_embs
else:
attn_mask = pos_embs
output = self.att(
query,
key,
value,
attn_mask=attn_mask,
key_padding_mask=key_padding_mask,
need_weights=return_attn_weights,
)
if return_attn_weights:
output, attention_weights = output
# reshape the output back to (batch, time, fea)
output = output.permute(1, 0, 2)
return output, attention_weights
else:
output = output.permute(1, 0, 2)
return output
class PositionalwiseFeedForward(nn.Module):
"""The class implements the positional-wise feed forward module in
“Attention Is All You Need”.
Arguments
----------
d_ffn: int
Hidden layer size.
input_shape : tuple, optional
Expected shape of the input. Alternatively use ``input_size``.
input_size : int, optional
Expected size of the input. Alternatively use ``input_shape``.
dropout: float, optional
Dropout rate.
activation: torch.nn.Module, optional
activation functions to be applied (Recommendation: ReLU, GELU).
Example
-------
>>> inputs = torch.rand([8, 60, 512])
>>> net = PositionalwiseFeedForward(256, input_size=inputs.shape[-1])
>>> outputs = net(inputs)
>>> outputs.shape
torch.Size([8, 60, 512])
"""
def __init__(
self,
d_ffn,
input_shape=None,
input_size=None,
dropout=0.0,
activation=nn.ReLU,
):
super().__init__()
if input_shape is None and input_size is None:
raise ValueError("Expected one of input_shape or input_size")
if input_size is None:
input_size = input_shape[-1]
self.ffn = nn.Sequential(
nn.Linear(input_size, d_ffn),
activation(),
nn.Dropout(dropout),
nn.Linear(d_ffn, input_size),
)
def forward(self, x):
"""Applies PositionalwiseFeedForward to the input tensor x."""
# give a tensor of shap (time, batch, fea)
x = x.permute(1, 0, 2)
x = self.ffn(x)
# reshape the output back to (batch, time, fea)
x = x.permute(1, 0, 2)
return x
| 28,426 | 32.841667 | 149 | py |
speechbrain | speechbrain-main/speechbrain/nnet/containers.py | """Library for implementing cascade (sequences) of different neural modules.
Authors
* Peter Plantinga 2020
"""
import torch
import inspect
import logging
import operator
import functools
from speechbrain.nnet.linear import Linear
from speechbrain.utils.callchains import lengths_arg_exists
logger = logging.getLogger(__name__)
class Sequential(torch.nn.ModuleDict):
"""A sequence of modules with potentially inferring shape on construction.
If layers are passed with names, these can be referenced with dot notation.
Arguments
---------
input_shape : iterable
A list or tuple of ints or None, representing the expected shape of an
input tensor. None represents a variable-length dimension. If no
``input_shape`` is passed, no shape inference will be performed.
*layers, **named_layers
The inputs are treated as a list of layers to be
applied in sequence. The output shape of each layer is used to
infer the shape of the following layer. If a tuple is returned,
only the shape of the first element is used to determine input
shape of the next layer (e.g. RNN returns output, hidden).
Example
-------
>>> inputs = torch.rand(10, 40, 50)
>>> model = Sequential(input_shape=inputs.shape)
>>> model.append(Linear, n_neurons=100, layer_name="layer1")
>>> model.append(Linear, n_neurons=200, layer_name="layer2")
>>> outputs = model(inputs)
>>> outputs.shape
torch.Size([10, 40, 200])
>>> outputs = model.layer1(inputs)
>>> outputs.shape
torch.Size([10, 40, 100])
"""
def __init__(self, *layers, input_shape=None, **named_layers):
super().__init__()
# Make sure either layers or input_shape is passed
if not layers and input_shape is None and not named_layers:
raise ValueError("Must pass either layers or input shape")
# Keep track of what layers need "lengths" passed
self.length_layers = []
# Replace None dimensions with arbitrary value
self.input_shape = input_shape
if input_shape and None in input_shape:
self.input_shape = list(input_shape)
for i, dim in enumerate(self.input_shape):
# To reduce size of dummy tensors, use 1 for batch dim
if i == 0 and dim is None:
dim = 1
# Use 64 as nice round arbitrary value, big enough that
# halving this dimension a few times doesn't reach 1
self.input_shape[i] = dim or 256
# Append non-named layers
for layer in layers:
self.append(layer)
# Append named layers
for name, layer in named_layers.items():
self.append(layer, layer_name=name)
def append(self, layer, *args, layer_name=None, **kwargs):
"""Add a layer to the list of layers, inferring shape if necessary.
Arguments
---------
layer : A torch.nn.Module class or object
If the layer is a class, it should accept an argument called
``input_shape`` which will be inferred and passed. If the layer
is a module object, it is added as-is.
layer_name : str
The name of the layer, for reference. If the name is in use,
``_{count}`` will be appended.
*args, **kwargs
These are passed to the layer if it is constructed.
"""
# Compute layer_name
if layer_name is None:
layer_name = str(len(self))
elif layer_name in self:
index = 0
while f"{layer_name}_{index}" in self:
index += 1
layer_name = f"{layer_name}_{index}"
# Check if it needs to be constructed with input shape
if self.input_shape:
argspec = inspect.getfullargspec(layer)
if "input_shape" in argspec.args + argspec.kwonlyargs:
input_shape = self.get_output_shape()
layer = layer(*args, input_shape=input_shape, **kwargs)
# Finally, append the layer.
try:
self.add_module(layer_name, layer)
except TypeError:
raise ValueError(
"Must pass `input_shape` at initialization and use "
"modules that take `input_shape` to infer shape when "
"using `append()`."
)
def get_output_shape(self):
"""Returns expected shape of the output.
Computed by passing dummy input constructed with the
``self.input_shape`` attribute.
"""
with torch.no_grad():
dummy_input = torch.zeros(self.input_shape)
dummy_output = self(dummy_input)
return dummy_output.shape
def forward(self, x):
"""Applies layers in sequence, passing only the first element of tuples.
Arguments
---------
x : torch.Tensor
The input tensor to run through the network.
"""
for layer in self.values():
x = layer(x)
if isinstance(x, tuple):
x = x[0]
return x
class LengthsCapableSequential(Sequential):
"""Sequential model that can take ``lengths`` in the forward method.
This is useful for Sequential models that include RNNs where it is
important to avoid padding, or for some feature normalization layers.
Unfortunately, this module is not jit-able because the compiler doesn't
know ahead of time if the length will be passed, and some layers don't
accept the length parameter.
"""
def __init__(self, *args, **kwargs):
self.takes_lengths = []
super().__init__(*args, **kwargs)
def append(self, *args, **kwargs):
"""Add a layer to the list of layers, inferring shape if necessary.
"""
# Add lengths arg inference here.
super().append(*args, **kwargs)
latest_forward_method = list(self.values())[-1].forward
self.takes_lengths.append(lengths_arg_exists(latest_forward_method))
def forward(self, x, lengths=None):
"""Applies layers in sequence, passing only the first element of tuples.
In addition, forward the ``lengths`` argument to all layers that accept
a ``lengths`` argument in their ``forward()`` method (e.g. RNNs).
Arguments
---------
x : torch.Tensor
The input tensor to run through the network.
lengths : torch.Tensor
The relative lengths of each signal in the tensor.
"""
for layer, give_lengths in zip(self.values(), self.takes_lengths):
if give_lengths:
x = layer(x, lengths=lengths)
else:
x = layer(x)
if isinstance(x, tuple):
x = x[0]
return x
class ModuleList(torch.nn.Module):
"""This class implements a wrapper to torch.nn.ModuleList with a forward()
method to forward all the layers sequentially.
For some pretrained model with the SpeechBrain older implementation of
Sequential class, user can use this class to load those pretrained models
Arguments
---------
*layers : torch class
Torch objects to be put in a ModuleList.
"""
def __init__(self, *layers):
super().__init__()
self.layers = torch.nn.ModuleList(layers)
def forward(self, x):
"""Applies the computation pipeline."""
for layer in self.layers:
x = layer(x)
if isinstance(x, tuple):
x = x[0]
return x
def append(self, module):
"""Appends module to the layers list."""
self.layers.append(module)
def extend(self, modules):
"""Appends module to the layers list."""
self.layers.extend(modules)
def insert(self, index, module):
"""Inserts module to the layers list."""
self.layers.insert(module)
class ConnectBlocks(torch.nn.Module):
"""Connect a sequence of blocks with shortcut connections.
Note: all shortcuts start from the output of the first block,
since the first block may change the shape significantly.
Arguments
---------
input_shape : tuple
The shape of the
shortcut_type : str
One of:
* "residual" - first block output passed to final output,
* "dense" - input of each block is from all previous blocks,
* "skip" - output of each block is passed to final output.
shortcut_projection : bool
Only has an effect if `shortcut_type` is passed. Whether to add a
linear projection layer to the shortcut connection before combining
with the output, to handle different sizes.
shortcut_combine_fn : str or function
Either a pre-defined function (one of "add", "sub", "mul", "div",
"avg", "cat") or a user-defined function that takes the shortcut
and next input, and combines them, as well as `init_params`
in case parameters need to be initialized inside of the function.
Example
-------
>>> inputs = torch.rand(10, 100, 20)
>>> model = ConnectBlocks(
... input_shape=inputs.shape, shortcut_projection=True
... )
>>> model.append(Linear, n_neurons=10)
>>> model.append(Linear, n_neurons=10, end_of_block=True)
>>> model.append(Linear, n_neurons=10)
>>> model.append(Linear, n_neurons=10, end_of_block=True)
>>> outputs = model(inputs)
>>> outputs.shape
torch.Size([10, 100, 10])
"""
def __init__(
self,
input_shape,
shortcut_type="residual",
shortcut_projection=False,
shortcut_combine_fn=torch.add,
):
super().__init__()
self.first_input_shape = input_shape
self.block_input_shape = input_shape
self.new_block = True
self.blocks = torch.nn.ModuleList()
if shortcut_type not in ["residual", "dense", "skip"]:
raise ValueError(
"'shortcuts' must be one of 'residual', 'dense', or 'skip'"
)
self.shortcut_type = shortcut_type
self.shortcut_projection = shortcut_projection
if shortcut_projection:
self.projections = torch.nn.ModuleList()
self.shortcut_combine_fn = shortcut_combine_fn
def append(self, layer, *args, **kwargs):
"""Appends the specified module to the shortcut model.
Arguments
---------
layer : torch.nn.Module class
This layer will get initialized with *args and **kwargs. Also,
the argument ``input_shape`` will be passed if the layer takes it.
*args, **kwargs
Passed unchanged to the layer **EXCEPT** the kwarg ``end_of_block``
which is used to indicate that the shortcut should be added in.
"""
if self.new_block:
self.blocks.append(Sequential(input_shape=self.block_input_shape))
self.new_block = False
end_of_block = False
if "end_of_block" in kwargs:
end_of_block = kwargs["end_of_block"]
del kwargs["end_of_block"]
self.blocks[-1].append(layer, *args, **kwargs)
# When we reach the end of the block, prepare to add shortcut
if end_of_block:
# Use dummy input to find shape of next block
dummy_input = torch.zeros(self.block_input_shape)
dummy_output = self.blocks[-1](dummy_input)
# Initialize projection if necessary
if self.shortcut_projection:
projection_size = functools.reduce(
operator.mul, dummy_output.shape[2:], 1
)
if self.shortcut_type == "residual":
shape = self.first_input_shape
dummy_input = torch.zeros(self.first_input_shape)
else:
shape = self.block_input_shape
self.projections.append(
Linear(
n_neurons=projection_size,
input_shape=shape,
bias=False,
combine_dims=True,
)
)
# Prepare for next block
self.new_block = True
dummy_output = self._combine(dummy_input, dummy_output, -1)
self.block_input_shape = dummy_output.shape
def forward(self, x):
"""
Arguments
---------
x : torch.Tensor
The inputs to the replicated modules.
"""
shortcut = x
for i, block in enumerate(self.blocks):
x = block(x)
if self.shortcut_type == "skip":
shortcut = self._combine(shortcut, x, i)
if self.shortcut_type == "dense":
x = shortcut = self._combine(shortcut, x, i)
if self.shortcut_type == "residual":
x = self._combine(shortcut, x, i)
if self.shortcut_type == "skip":
return shortcut
else:
return x
def _combine(self, shortcut, x, block_index=0):
"""Handle combining shortcut with outputs."""
# Apply projection
if self.shortcut_projection:
shortcut = self.projections[block_index](shortcut)
shortcut = shortcut.reshape(x.shape)
return self.shortcut_combine_fn(shortcut, x)
| 13,475 | 33.911917 | 80 | py |
speechbrain | speechbrain-main/speechbrain/nnet/complex_networks/c_CNN.py | """Library implementing complex-valued convolutional neural networks.
Authors
* Titouan Parcollet 2020
"""
import torch
import torch.nn as nn
import logging
import torch.nn.functional as F
from speechbrain.nnet.CNN import get_padding_elem
from speechbrain.nnet.complex_networks.c_ops import (
unitary_init,
complex_init,
affect_conv_init,
complex_conv_op,
)
logger = logging.getLogger(__name__)
class CConv1d(torch.nn.Module):
"""This function implements complex-valued 1d convolution.
Arguments
---------
out_channels : int
Number of output channels. Please note
that these are complex-valued neurons. If 256
channels are specified, the output dimension
will be 512.
kernel_size : int
Kernel size of the convolutional filters.
stride : int, optional
Stride factor of the convolutional filters (default 1).
dilation : int, optional
Dilation factor of the convolutional filters (default 1).
padding : str, optional
(same, valid, causal). If "valid", no padding is performed.
If "same" and stride is 1, output shape is same as input shape.
"causal" results in causal (dilated) convolutions. (default "same")
padding_mode : str, optional
This flag specifies the type of padding. See torch.nn documentation
for more information (default "reflect").
groups : int, optional
This option specifies the convolutional groups. See torch.nn
documentation for more information (default 1).
bias : bool, optional
If True, the additive bias b is adopted (default True).
init_criterion : str, optional
(glorot, he).
This parameter controls the initialization criterion of the weights.
It is combined with weights_init to build the initialization method of
the complex-valued weights. (default "glorot")
weight_init : str, optional
(complex, unitary).
This parameter defines the initialization procedure of the
complex-valued weights. "complex" will generate random complex-valued
weights following the init_criterion and the complex polar form.
"unitary" will normalize the weights to lie on the unit circle. (default "complex")
More details in: "Deep Complex Networks", Trabelsi C. et al.
Example
-------
>>> inp_tensor = torch.rand([10, 16, 30])
>>> cnn_1d = CConv1d(
... input_shape=inp_tensor.shape, out_channels=12, kernel_size=5
... )
>>> out_tensor = cnn_1d(inp_tensor)
>>> out_tensor.shape
torch.Size([10, 16, 24])
"""
def __init__(
self,
out_channels,
kernel_size,
input_shape,
stride=1,
dilation=1,
padding="same",
groups=1,
bias=True,
padding_mode="reflect",
init_criterion="glorot",
weight_init="complex",
):
super().__init__()
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
self.padding = padding
self.groups = groups
self.bias = bias
self.padding_mode = padding_mode
self.unsqueeze = False
self.init_criterion = init_criterion
self.weight_init = weight_init
self.in_channels = self._check_input(input_shape) // 2
# Managing the weight initialization and bias by directly setting the
# correct function
(self.k_shape, self.w_shape) = self._get_kernel_and_weight_shape()
self.real_weight = torch.nn.Parameter(torch.Tensor(*self.w_shape))
self.imag_weight = torch.nn.Parameter(torch.Tensor(*self.w_shape))
if self.bias:
self.b = torch.nn.Parameter(torch.Tensor(2 * self.out_channels))
self.b.data.fill_(0)
else:
self.b = None
self.winit = {"complex": complex_init, "unitary": unitary_init}[
self.weight_init
]
affect_conv_init(
self.real_weight,
self.imag_weight,
self.kernel_size,
self.winit,
self.init_criterion,
)
def forward(self, x):
"""Returns the output of the convolution.
Arguments
---------
x : torch.Tensor
(batch, time, channel).
Input to convolve. 3d or 4d tensors are expected.
"""
# (batch, channel, time)
x = x.transpose(1, -1)
if self.padding == "same":
x = self._manage_padding(
x, self.kernel_size, self.dilation, self.stride
)
elif self.padding == "causal":
num_pad = (self.kernel_size - 1) * self.dilation
x = F.pad(x, (num_pad, 0))
elif self.padding == "valid":
pass
else:
raise ValueError(
"Padding must be 'same', 'valid' or 'causal'. Got %s."
% (self.padding)
)
wx = complex_conv_op(
x,
self.real_weight,
self.imag_weight,
self.b,
stride=self.stride,
padding=0,
dilation=self.dilation,
conv1d=True,
)
wx = wx.transpose(1, -1)
return wx
def _manage_padding(self, x, kernel_size, dilation, stride):
"""This function performs zero-padding on the time axis
such that their lengths is unchanged after the convolution.
Arguments
---------
x : torch.Tensor
Input tensor.
kernel_size : int
Kernel size.
dilation : int
Dilation.
stride : int
Stride.
"""
# Detecting input shape
L_in = x.shape[-1]
# Time padding
padding = get_padding_elem(L_in, stride, kernel_size, dilation)
# Applying padding
x = F.pad(x, tuple(padding), mode=self.padding_mode)
return x
def _check_input(self, input_shape):
"""Checks the input and returns the number of input channels.
"""
if len(input_shape) == 3:
in_channels = input_shape[2]
else:
raise ValueError(
"ComplexConv1d expects 3d inputs. Got " + input_shape
)
# Kernel size must be odd
if self.kernel_size % 2 == 0:
raise ValueError(
"The field kernel size must be an odd number. Got %s."
% (self.kernel_size)
)
# Check complex format
if in_channels % 2 != 0:
raise ValueError(
"Complex Tensors must have dimensions divisible by 2."
" input.size()["
+ str(self.channels_axis)
+ "] = "
+ str(self.nb_channels)
)
return in_channels
def _get_kernel_and_weight_shape(self):
""" Returns the kernel size and weight shape for convolutional layers.
"""
ks = self.kernel_size
w_shape = (self.out_channels, self.in_channels) + tuple((ks,))
return ks, w_shape
class CConv2d(nn.Module):
"""This function implements complex-valued 1d convolution.
Arguments
---------
out_channels : int
Number of output channels. Please note
that these are complex-valued neurons. If 256
channels are specified, the output dimension
will be 512.
kernel_size : int
Kernel size of the convolutional filters.
stride : int, optional
Stride factor of the convolutional filters (default 1).
dilation : int, optional
Dilation factor of the convolutional filters (default 1).
padding : str, optional
(same, valid, causal). If "valid", no padding is performed.
If "same" and stride is 1, output shape is same as input shape.
"causal" results in causal (dilated) convolutions. (default "same")
padding_mode : str, optional
This flag specifies the type of padding (default "reflect").
See torch.nn documentation for more information.
groups : int, optional
This option specifies the convolutional groups (default 1). See torch.nn
documentation for more information.
bias : bool, optional
If True, the additive bias b is adopted (default True).
init_criterion : str , optional
(glorot, he).
This parameter controls the initialization criterion of the weights (default "glorot").
It is combined with weights_init to build the initialization method of
the complex-valued weights.
weight_init : str, optional
(complex, unitary).
This parameter defines the initialization procedure of the
complex-valued weights (default complex). "complex" will generate random complex-valued
weights following the init_criterion and the complex polar form.
"unitary" will normalize the weights to lie on the unit circle.
More details in: "Deep Complex Networks", Trabelsi C. et al.
Example
-------
>>> inp_tensor = torch.rand([10, 16, 30, 30])
>>> cnn_2d = CConv2d(
... input_shape=inp_tensor.shape, out_channels=12, kernel_size=5
... )
>>> out_tensor = cnn_2d(inp_tensor)
>>> out_tensor.shape
torch.Size([10, 16, 30, 24])
"""
def __init__(
self,
out_channels,
kernel_size,
input_shape,
stride=1,
dilation=1,
padding="same",
groups=1,
bias=True,
padding_mode="reflect",
init_criterion="glorot",
weight_init="complex",
):
super().__init__()
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
self.padding = padding
self.groups = groups
self.bias = bias
self.padding_mode = padding_mode
self.unsqueeze = False
self.init_criterion = init_criterion
self.weight_init = weight_init
# k -> [k,k]
if isinstance(self.kernel_size, int):
self.kernel_size = [self.kernel_size, self.kernel_size]
if isinstance(self.dilation, int):
self.dilation = [self.dilation, self.dilation]
if isinstance(self.stride, int):
self.stride = [self.stride, self.stride]
self.in_channels = self._check_input(input_shape) // 2
# Managing the weight initialization and bias by directly setting the
# correct function
(self.k_shape, self.w_shape) = self._get_kernel_and_weight_shape()
self.real_weight = torch.nn.Parameter(torch.Tensor(*self.w_shape))
self.imag_weight = torch.nn.Parameter(torch.Tensor(*self.w_shape))
if self.bias:
self.b = torch.nn.Parameter(torch.Tensor(2 * self.out_channels))
self.b.data.fill_(0)
else:
self.b = None
self.winit = {"complex": complex_init, "unitary": unitary_init}[
self.weight_init
]
affect_conv_init(
self.real_weight,
self.imag_weight,
self.kernel_size,
self.winit,
self.init_criterion,
)
def forward(self, x, init_params=False):
"""Returns the output of the convolution.
Arguments
---------
x : torch.Tensor
(batch, time, feature, channels).
Input to convolve. 3d or 4d tensors are expected.
"""
if init_params:
self.init_params(x)
# (batch, channel, feature, time)
x = x.transpose(1, -1)
if self.padding == "same":
x = self._manage_padding(
x, self.kernel_size, self.dilation, self.stride
)
elif self.padding == "causal":
num_pad = (self.kernel_size - 1) * self.dilation
x = F.pad(x, (num_pad, 0))
elif self.padding == "valid":
pass
else:
raise ValueError(
"Padding must be 'same', 'valid' or 'causal'. Got %s."
% (self.padding)
)
wx = complex_conv_op(
x,
self.real_weight,
self.imag_weight,
self.b,
stride=self.stride,
padding=0,
dilation=self.dilation,
conv1d=False,
)
wx = wx.transpose(1, -1)
return wx
def _get_kernel_and_weight_shape(self):
""" Returns the kernel size and weight shape for convolutional layers.
"""
ks = (self.kernel_size[0], self.kernel_size[1])
w_shape = (self.out_channels, self.in_channels) + (*ks,)
return ks, w_shape
def _manage_padding(self, x, kernel_size, dilation, stride):
"""This function performs zero-padding on the time and frequency axes
such that their lengths is unchanged after the convolution.
Arguments
---------
x : torch.Tensor
Input tensor.
kernel_size : int
Kernel size.
dilation : int
Dilation.
stride: int
Stride.
"""
# Detecting input shape
L_in = x.shape[-1]
# Time padding
padding_time = get_padding_elem(
L_in, stride[-1], kernel_size[-1], dilation[-1]
)
padding_freq = get_padding_elem(
L_in, stride[-2], kernel_size[-2], dilation[-2]
)
padding = padding_time + padding_freq
# Applying padding
x = nn.functional.pad(x, tuple(padding), mode=self.padding_mode)
return x
def _check_input(self, input_shape):
"""Checks the input and returns the number of input channels.
"""
if len(input_shape) == 3:
self.unsqueeze = True
in_channels = 1
elif len(input_shape) == 4:
in_channels = input_shape[3]
else:
raise ValueError("Expected 3d or 4d inputs. Got " + input_shape)
# Kernel size must be odd
if self.kernel_size[0] % 2 == 0 or self.kernel_size[1] % 2 == 0:
raise ValueError(
"The field kernel size must be an odd number. Got %s."
% (self.kernel_size)
)
# Check complex format
if in_channels % 2 != 0:
raise ValueError(
"Complex Tensors must have dimensions divisible by 2."
" input.size()["
+ str(self.channels_axis)
+ "] = "
+ str(self.nb_channels)
)
return in_channels
| 14,753 | 29.995798 | 95 | py |
speechbrain | speechbrain-main/speechbrain/nnet/complex_networks/c_normalization.py | """Library implementing complex-valued normalization.
Authors
* Titouan Parcollet 2020
"""
import torch
from torch.nn import Parameter
import numpy as np
from speechbrain.nnet.complex_networks.c_ops import multi_mean
class CBatchNorm(torch.nn.Module):
"""This class is implements the complex-valued batch-normalization
as introduced by "Deep Complex Networks", Trabelsi C. et al.
Arguments
---------
input_shape : tuple
Expected shape of the input.
input_size : int
Expected size of the input.
dim : int, optional
It defines the axis that should be normalized. It usually correspond to
the channel dimension (default -1).
eps : float, optional
Term used to stabilize operation (default 1e-4).
momentum : float, optional
It defines the momentum as for the real-valued batch-normalization
(default 0.1).
scale : bool, optional,
It defines if scaling should be used or not. It is
equivalent to the real-valued batchnormalization scaling (default True).
center : bool, optional
It defines if centering should be used or not. It is
equivalent to the real-valued batchnormalization centering
(default True).
track_running_stats : bool, optional
Equivalent to the real-valued batchnormalization parameter.
When True, stats are tracked. When False, solely statistics computed
over the batch are used (default True).
Example
-------
>>> inp_tensor = torch.rand([10, 16, 30])
>>> CBN = CBatchNorm(input_shape=inp_tensor.shape)
>>> out_tensor = CBN(inp_tensor)
>>> out_tensor.shape
torch.Size([10, 16, 30])
"""
def __init__(
self,
input_shape=None,
input_size=None,
dim=-1,
eps=1e-4,
momentum=0.1,
scale=True,
center=True,
track_running_stats=True,
):
super().__init__()
self.dim = dim
self.eps = eps
self.momentum = momentum
self.scale = scale
self.center = center
self.track_running_stats = track_running_stats
if input_size is None:
self.num_complex_features = self._check_input(input_shape)
else:
self.num_complex_features = input_size // 2
if self.scale:
self.gamma_rr = Parameter(torch.empty(self.num_complex_features))
self.gamma_ii = Parameter(torch.empty(self.num_complex_features))
self.gamma_ri = Parameter(torch.empty(self.num_complex_features))
else:
self.register_parameter("gamma_rr", None)
self.register_parameter("gamma_ii", None)
self.register_parameter("gamma_ri", None)
if self.center:
self.beta = Parameter(torch.empty(self.num_complex_features * 2))
else:
self.register_parameter("beta", None)
if self.track_running_stats:
self.register_buffer(
"num_batches_tracked", torch.tensor(0, dtype=torch.long)
)
if self.scale:
# We initializing the scaling parameter following the proposal
# of "Deep Complex Networks". Trabelsi C. et al.
self.register_buffer(
"moving_Vrr",
torch.ones(self.num_complex_features) * np.sqrt(1 / 2),
)
self.register_buffer(
"moving_Vii",
torch.ones(self.num_complex_features) * np.sqrt(1 / 2),
)
self.register_buffer(
"moving_Vri", torch.zeros(self.num_complex_features)
)
else:
self.register_parameter("moving_Vrr", None)
self.register_parameter("moving_Vii", None)
self.register_parameter("moving_Vri", None)
if self.center:
self.register_buffer(
"moving_mean", torch.zeros(self.num_complex_features * 2)
)
else:
self.register_parameter("moving_mean", None)
else:
self.register_parameter("moving_Vrr", None)
self.register_parameter("moving_Vii", None)
self.register_parameter("moving_Vri", None)
self.register_parameter("moving_mean", None)
self.register_parameter("num_batches_tracked", None)
self.reset_parameters()
def reset_running_stats(self):
"""Simply reset the running statistics to the initial values."""
# "Deep Complex Networks" Trabelsi C. et al.
if self.track_running_stats:
if self.center:
self.moving_mean.zero_()
if self.scale:
self.moving_Vrr.fill_(1 / np.sqrt(2))
self.moving_Vii.fill_(1 / np.sqrt(2))
self.moving_Vri.zero_()
self.num_batches_tracked.zero_()
def reset_parameters(self):
"""Simply reset all the parameters."""
# "Deep Complex Networks" Trabelsi C. et al.
self.reset_running_stats()
if self.scale:
self.gamma_rr.data.fill_(1 / np.sqrt(2))
self.gamma_ii.data.fill_(1 / np.sqrt(2))
self.gamma_ri.data.zero_()
if self.center:
self.beta.data.zero_()
def forward(self, input):
"""Returns the normalized input tensor.
Arguments
---------
input : torch.Tensor (batch, time, [channels])
Input to normalize. It can be 2d, 3d, 4d.
"""
exponential_average_factor = 0.0
# Initialize moving parameters
if self.training and self.track_running_stats:
if self.center:
self.moving_mean = self.moving_mean.detach()
if self.scale:
self.moving_Vrr = self.moving_Vrr.detach()
self.moving_Vii = self.moving_Vii.detach()
self.moving_Vri = self.moving_Vri.detach()
self.num_batches_tracked = self.num_batches_tracked.detach()
self.num_batches_tracked += 1
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / self.num_batches_tracked.item()
else: # use exponential moving average
exponential_average_factor = self.momentum
input_shape = input.size()
ndim = input.dim()
reduction_axes = list(range(ndim))
del reduction_axes[self.dim]
input_dim = input_shape[self.dim] // 2
# Get the mean and center the input
mu = multi_mean(input, reduction_axes, True)
input_centred = input - mu
if self.scale:
centred_squared = input_centred ** 2
# Retrieve the real and image parts of the input tensor w.r.t the
# dimension
if self.scale:
(
centred_squared_real,
centred_squared_imag,
) = self._retrieve_real_imag(centred_squared, ndim, input_dim)
if self.center:
centred_real, centred_imag = self._retrieve_real_imag(
input_centred, ndim, input_dim
)
# We compute the mean for each component
if self.scale:
Vrr = (
multi_mean(
centred_squared_real, axes=reduction_axes, keepdim=True
)
+ self.eps
)
Vii = (
multi_mean(
centred_squared_imag, axes=reduction_axes, keepdim=True
)
+ self.eps
)
# Vri contains the real and imaginary covariance
# for each feature map.
Vri = multi_mean(
centred_real * centred_imag, axes=reduction_axes, keepdim=True
)
else:
Vrr = None
Vii = None
Vri = None
# Pick the normalized form corresponding
# to the training phase when we use running stats.
if self.training and self.track_running_stats:
if self.center:
self.moving_mean = (
1 - exponential_average_factor
) * self.moving_mean + exponential_average_factor * mu.view(
self.moving_mean.size()
)
if self.scale:
self.moving_Vrr = (
1 - exponential_average_factor
) * self.moving_Vrr + exponential_average_factor * Vrr.view(
self.moving_Vrr.size()
)
self.moving_Vii = (
1 - exponential_average_factor
) * self.moving_Vii + exponential_average_factor * Vii.view(
self.moving_Vii.size()
)
self.moving_Vri = (
1 - exponential_average_factor
) * self.moving_Vri + exponential_average_factor * Vri.view(
self.moving_Vri.size()
)
if self.training or (not self.track_running_stats):
input_inferred = input_centred if self.center else input
return c_norm(
input_inferred,
Vrr,
Vii,
Vri,
self.beta,
self.gamma_rr,
self.gamma_ri,
self.gamma_ii,
self.scale,
self.center,
layernorm=False,
dim=self.dim,
)
else: # if we are not training or using running_stats
if self.center:
input_inferred = input - self.moving_mean.view(mu.size())
else:
input_inferred = input
return c_norm(
input_inferred,
self.moving_Vrr,
self.moving_Vii,
self.moving_Vri,
self.beta,
self.gamma_rr,
self.gamma_ri,
self.gamma_ii,
self.scale,
self.center,
layernorm=False,
dim=self.dim,
)
def _retrieve_real_imag(self, tensor, ndim, input_dim):
"""
Function used to retrieve the real and imaginary component of a tensor
according to the dimensions
"""
if self.dim == 1 or ndim == 2:
tensor_real = tensor[:, :input_dim]
tensor_imag = tensor[:, input_dim:]
elif self.dim == -1 and ndim == 3:
tensor_real = tensor[:, :, :input_dim]
tensor_imag = tensor[:, :, input_dim:]
elif self.dim == -1 and ndim == 4:
tensor_real = tensor[:, :, :, :input_dim]
tensor_imag = tensor[:, :, :, input_dim:]
else:
msg = "Retrieve_real_imag expects 2d to 4d inputs. Got " + str(
len(tensor)
)
raise ValueError(msg)
return tensor_real, tensor_imag
def _check_input(self, input_shape):
"""
Checks the input and returns the number of complex values.
"""
if input_shape[self.dim] % 2 == 0:
return input_shape[self.dim] // 2
else:
msg = "ComplexBatchNorm dim must be divisible by 2 ! Got " + str(
input_shape[self.dim]
)
raise ValueError(msg)
class CLayerNorm(torch.nn.Module):
"""This class is used to instantiate the complex
layer-normalization as introduced by "Deep Complex Networks",
Trabelsi C. et al.
Arguments
---------
input_shape : tuple
Expected shape of the input.
input_size : int
Expected size of the input dimension.
dim : int, optional
It defines the axis that should be normalized. It usually correspond to
the channel dimension (default -1).
eps : float, optional
Term used to stabilize operation (default 1e-4).
scale : bool, optional,
It defines if scaling should be used or not. It is
equivalent to the real-valued batchnormalization scaling (default True).
center : bool, optional
It defines if centering should be used or not. It is
equivalent to the real-valued batchnormalization centering
(default True).
Example
-------
>>> inp_tensor = torch.rand([10, 16, 30])
>>> CBN = CLayerNorm(input_shape=inp_tensor.shape)
>>> out_tensor = CBN(inp_tensor)
>>> out_tensor.shape
torch.Size([10, 16, 30])
"""
def __init__(
self,
input_shape=None,
input_size=None,
dim=-1,
eps=1e-4,
scale=True,
center=True,
):
super().__init__()
self.dim = dim
self.eps = eps
self.scale = scale
self.center = center
if input_size is None:
self.num_complex_features = self._check_input(input_shape)
else:
self.num_complex_features = input_size // 2
if self.scale:
self.gamma_rr = Parameter(torch.empty(self.num_complex_features))
self.gamma_ii = Parameter(torch.empty(self.num_complex_features))
self.gamma_ri = Parameter(torch.empty(self.num_complex_features))
else:
self.register_parameter("gamma_rr", None)
self.register_parameter("gamma_ii", None)
self.register_parameter("gamma_ri", None)
if self.center:
self.beta = Parameter(torch.empty(self.num_complex_features * 2))
else:
self.register_parameter("beta", None)
self.reset_parameters()
def reset_parameters(self):
"""Simply reset all the parameters."""
# "Deep Complex Networks" Trabelsi C. et al.
if self.scale:
self.gamma_rr.data.fill_(1 / np.sqrt(2))
self.gamma_ii.data.fill_(1 / np.sqrt(2))
self.gamma_ri.data.zero_()
if self.center:
self.beta.data.zero_()
def forward(self, input):
"""Computes the complex normalization."""
input_shape = input.size()
ndim = input.dim()
reduction_axes = list(range(ndim))
del reduction_axes[self.dim]
del reduction_axes[0]
input_dim = input_shape[self.dim] // 2
# Get the mean and center
mu = multi_mean(input, reduction_axes, True)
if self.center:
input_centred = input - mu
else:
input_centred = input
centred_squared = input_centred ** 2
if self.dim == 1 or ndim == 2:
centred_squared_real = centred_squared[:, :input_dim]
centred_squared_imag = centred_squared[:, input_dim:]
centred_real = input_centred[:, :input_dim]
centred_imag = input_centred[:, input_dim:]
elif self.dim == -1 and ndim == 3:
centred_squared_real = centred_squared[:, :, :input_dim]
centred_squared_imag = centred_squared[:, :, input_dim:]
centred_real = input_centred[:, :, :input_dim]
centred_imag = input_centred[:, :, input_dim:]
elif self.dim == -1 and ndim == 4:
centred_squared_real = centred_squared[:, :, :, :input_dim]
centred_squared_imag = centred_squared[:, :, :, input_dim:]
centred_real = input_centred[:, :, :, :input_dim]
centred_imag = input_centred[:, :, :, input_dim:]
else:
centred_squared_real = centred_squared[:, :, :, :, :input_dim]
centred_squared_imag = centred_squared[:, :, :, :, input_dim:]
centred_real = input_centred[:, :, :, :, :input_dim]
centred_imag = input_centred[:, :, :, :, input_dim:]
if self.scale:
Vrr = (
multi_mean(
centred_squared_real, axes=reduction_axes, keepdim=True
)
+ self.eps
)
Vii = (
multi_mean(
centred_squared_imag, axes=reduction_axes, keepdim=True
)
+ self.eps
)
Vri = multi_mean(
centred_real * centred_imag, axes=reduction_axes, keepdim=True
)
else:
Vrr = None
Vii = None
Vri = None
return c_norm(
input_centred,
Vrr,
Vii,
Vri,
self.beta,
self.gamma_rr,
self.gamma_ri,
self.gamma_ii,
self.scale,
self.center,
dim=self.dim,
layernorm=True,
)
def _check_input(self, input_shape):
"""Checks the input and returns the number of complex values.
"""
if input_shape[self.dim] % 2 == 0:
return input_shape[self.dim] // 2
else:
msg = "ComplexBatchNorm dim must be dividble by 2 ! Got " + str(
input_shape[self.dim]
)
raise ValueError(msg)
def c_norm(
input_centred,
Vrr,
Vii,
Vri,
beta,
gamma_rr,
gamma_ri,
gamma_ii,
scale=True,
center=True,
layernorm=False,
dim=-1,
):
"""This function is used to apply the complex normalization
as introduced by "Deep Complex Networks", Trabelsi C. et al.
Arguments
---------
input_centred : torch.Tensor
It is the tensor to be normalized. The features
dimension is divided by 2 with the first half
corresponding to the real-parts and the second half
to the imaginary parts.
Vrr : torch.Tensor
It is a tensor that contains the covariance between real-parts.
Vii : torch.Tensor
It is a tensor that contains the covariance between imaginary-parts.
Vri : torch.Tensor
It is a tensor that contains the covariance between real-parts and
imaginary-parts.
beta : torch.Tensor
It is a tensor corresponding to the beta parameter on the real-valued
batch-normalization, but in the complex-valued space.
gamma_rr : torch.Tensor
It is a tensor that contains the gamma between real-parts.
gamma_ii : torch.Tensor
It is a tensor that contains the gamma between imaginary-parts.
gamma_ri : torch.Tensor
It is a tensor that contains the gamma between real-parts and
imaginary-parts.
scale : bool, optional
It defines if scaling should be used or not. It is
equivalent to the real-valued batchnormalization
scaling (default True).
center : bool, optional,
It defines if centering should be used or not. It is
equivalent to the real-valued batchnormalization centering
(default True).
layernorm : bool, optional
It defines is c_standardization is called from a layernorm or a
batchnorm layer (default False).
dim : int, optional
It defines the axis that should be considered as the complex-valued
axis (divided by 2 to get r and i) (default -1).
"""
ndim = input_centred.dim()
input_dim = input_centred.size(dim) // 2
if scale:
gamma_broadcast_shape = [1] * ndim
gamma_broadcast_shape[dim] = input_dim
if center:
broadcast_beta_shape = [1] * ndim
broadcast_beta_shape[dim] = input_dim * 2
if scale:
standardized_output = c_standardization(
input_centred, Vrr, Vii, Vri, layernorm, dim=dim
)
# Now we perform the scaling and Shifting of the normalized x using
# the scaling parameter
# [ gamma_rr gamma_ri ]
# Gamma = [ gamma_ri gamma_ii ]
# and the shifting parameter
# Beta = [beta_real beta_imag].T
# where:
# x_real_BN = gamma_rr * x_real_normed +
# gamma_ri * x_imag_normed + beta_real
# x_imag_BN = gamma_ri * x_real_normed +
# gamma_ii * x_imag_normed + beta_imag
broadcast_gamma_rr = gamma_rr.view(gamma_broadcast_shape)
broadcast_gamma_ri = gamma_ri.view(gamma_broadcast_shape)
broadcast_gamma_ii = gamma_ii.view(gamma_broadcast_shape)
cat_gamma_4_real = torch.cat(
[broadcast_gamma_rr, broadcast_gamma_ii], dim=dim
)
cat_gamma_4_imag = torch.cat(
[broadcast_gamma_ri, broadcast_gamma_ri], dim=dim
)
if dim == 0:
centred_real = standardized_output[:input_dim]
centred_imag = standardized_output[input_dim:]
elif dim == 1 or (dim == -1 and ndim == 2):
centred_real = standardized_output[:, :input_dim]
centred_imag = standardized_output[:, input_dim:]
elif dim == -1 and ndim == 3:
centred_real = standardized_output[:, :, :input_dim]
centred_imag = standardized_output[:, :, input_dim:]
elif dim == -1 and ndim == 4:
centred_real = standardized_output[:, :, :, :input_dim]
centred_imag = standardized_output[:, :, :, input_dim:]
else:
centred_real = standardized_output[:, :, :, :, :input_dim]
centred_imag = standardized_output[:, :, :, :, input_dim:]
rolled_standardized_output = torch.cat(
[centred_imag, centred_real], dim=dim
)
if center:
broadcast_beta = beta.view(broadcast_beta_shape)
a = cat_gamma_4_real * standardized_output
b = cat_gamma_4_imag * rolled_standardized_output
return a + b + broadcast_beta
else:
return (
cat_gamma_4_real * standardized_output
+ cat_gamma_4_imag * rolled_standardized_output
)
else:
if center:
broadcast_beta = beta.view(broadcast_beta_shape)
return input_centred + broadcast_beta
else:
return input_centred
def c_standardization(input_centred, Vrr, Vii, Vri, layernorm=False, dim=-1):
"""This function is used to standardize a centred tensor of
complex numbers (mean of the set must be 0).
Arguments
---------
input_centred : torch.Tensor
It is the tensor to be normalized. The features
dimension is divided by 2 with the first half
corresponding to the real-parts and the second half
to the imaginary parts.
Vrr : torch.Tensor
It is a tensor that contains the covariance between real-parts.
Vii : torch.Tensor
It is a tensor that contains the covariance between imaginary-parts.
Vri : torch.Tensor
It is a tensor that contains the covariance between real-parts and
imaginary-parts.
layernorm : bool, optional
It defines is c_standardization is called from a layernorm or a
batchnorm layer (default False).
dim : int, optional
It defines the axis that should be considered as the complex-valued
axis (divided by 2 to get r and i) (default -1).
"""
ndim = input_centred.dim()
input_dim = input_centred.size(dim) // 2
variances_broadcast = [1] * ndim
variances_broadcast[dim] = input_dim
if layernorm:
variances_broadcast[0] = input_centred.size(0)
# We require the covariance matrix's inverse square root. That requires
# square rooting, followed by inversion (During the computation of square
# root we compute the determinant we'll need for inversion as well).
# tau = Vrr + Vii = Trace. Guaranteed >=0 because Positive-definite matrix
tau = Vrr + Vii
# delta = (Vrr * Vii) - (Vri ** 2) = Determinant
delta = (Vrr * Vii) - (Vri ** 2)
s = delta.sqrt()
t = (tau + 2 * s).sqrt()
# The square root matrix could now be explicitly formed as
# [ Vrr+s Vri ]
# (1/t) [ Vir Vii+s ]
# https://en.wikipedia.org/wiki/Square_root_of_a_2_by_2_matrix
# but we don't need to do this immediately since we can also simultaneously
# invert. We can do this because we've already computed the determinant of
# the square root matrix, and can thus invert it using the analytical
# solution for 2x2 matrices
# [ A B ] [ D -B ]
# inv( [ C D ] ) = (1/det) [ -C A ]
# http://mathworld.wolfram.com/MatrixInverse.html
# Thus giving us
# [ Vii+s -Vri ]
# (1/s)(1/t)[ -Vir Vrr+s ]
# So we proceed as follows:
inverse_st = 1.0 / (s * t)
Wrr = (Vii + s) * inverse_st
Wii = (Vrr + s) * inverse_st
Wri = -Vri * inverse_st
# And we have computed the inverse square root matrix W = sqrt(V)!
# Normalization. We multiply, x_normalized = W.x.
# The returned result will be a complex standardized input
# where the real and imaginary parts are obtained as follows:
# x_real_normed = Wrr * x_real_centred + Wri * x_imag_centred
# x_imag_normed = Wri * x_real_centred + Wii * x_imag_centred
broadcast_Wrr = Wrr.view(variances_broadcast)
broadcast_Wri = Wri.view(variances_broadcast)
broadcast_Wii = Wii.view(variances_broadcast)
cat_W_4_real = torch.cat([broadcast_Wrr, broadcast_Wii], dim=dim)
cat_W_4_imag = torch.cat([broadcast_Wri, broadcast_Wri], dim=dim)
if dim == 0:
centred_real = input_centred[:input_dim]
centred_imag = input_centred[input_dim:]
elif dim == 1 or (dim == -1 and ndim == 2):
centred_real = input_centred[:, :input_dim]
centred_imag = input_centred[:, input_dim:]
elif dim == -1 and ndim == 3:
centred_real = input_centred[:, :, :input_dim]
centred_imag = input_centred[:, :, input_dim:]
elif dim == -1 and ndim == 4:
centred_real = input_centred[:, :, :, :input_dim]
centred_imag = input_centred[:, :, :, input_dim:]
else:
centred_real = input_centred[:, :, :, :, :input_dim]
centred_imag = input_centred[:, :, :, :, input_dim:]
rolled_input = torch.cat([centred_imag, centred_real], dim=dim)
output = cat_W_4_real * input_centred + cat_W_4_imag * rolled_input
# Wrr * x_real_centered | Wii * x_imag_centered
# + Wri * x_imag_centered | Wri * x_real_centered
# -----------------------------------------------
# = output
return output
| 26,368 | 34.730352 | 80 | py |
speechbrain | speechbrain-main/speechbrain/nnet/complex_networks/c_ops.py | """This library implements different operations needed by complex-
valued architectures.
This work is inspired by: "Deep Complex Networks" from Trabelsi C.
et al.
Authors
* Titouan Parcollet 2020
"""
import torch
import torch.nn.functional as F
import numpy as np
def check_complex_input(input_shape):
"""Check the complex-valued shape for a linear layer.
Arguments
---------
input_shape : tuple
Expected shape of the input.
"""
if len(input_shape) not in {2, 3}:
raise Exception(
"Complex linear accepts only input of dimension 2 or 3."
" input.dim = " + str(input.dim())
)
nb_hidden = input_shape[-1]
if nb_hidden % 1 != 0:
raise Exception(
"Complex Tensors must have an even number of hidden dimensions."
" input.size()[1] = " + str(nb_hidden)
)
def get_real(input, input_type="linear", channels_axis=1):
"""Returns the real components of the complex-valued input.
Arguments
---------
input : torch.Tensor
Input tensor.
input_type : str,
(convolution, linear) (default "linear")
channels_axis : int.
Default 1.
"""
if input_type == "linear":
nb_hidden = input.size()[-1]
if input.dim() == 2:
return input.narrow(
1, 0, nb_hidden // 2
) # input[:, :nb_hidden / 2]
elif input.dim() == 3:
return input.narrow(
2, 0, nb_hidden // 2
) # input[:, :, :nb_hidden / 2]
else:
nb_featmaps = input.size(channels_axis)
return input.narrow(channels_axis, 0, nb_featmaps // 2)
def get_imag(input, input_type="linear", channels_axis=1):
"""Returns the imaginary components of the complex-valued input.
Arguments
---------
input : torch.Tensor
Input tensor.
input_type : str,
(convolution, linear) (default "linear")
channels_axis : int.
Default 1.
"""
if input_type == "linear":
nb_hidden = input.size()[-1]
if input.dim() == 2:
return input.narrow(
1, nb_hidden // 2, nb_hidden // 2
) # input[:, :nb_hidden / 2]
elif input.dim() == 3:
return input.narrow(
2, nb_hidden // 2, nb_hidden // 2
) # input[:, :, :nb_hidden / 2]
else:
nb_featmaps = input.size(channels_axis)
return input.narrow(channels_axis, nb_featmaps // 2, nb_featmaps // 2)
def get_conjugate(input, input_type="linear", channels_axis=1):
"""Returns the conjugate (z = r - xi) of the input complex numbers.
Arguments
---------
input : torch.Tensor
Input tensor
input_type : str,
(convolution, linear) (default "linear")
channels_axis : int.
Default 1.
"""
input_imag = get_imag(input, input_type, channels_axis)
input_real = get_real(input, input_type, channels_axis)
if input_type == "linear":
return torch.cat([input_real, -input_imag], dim=-1)
elif input_type == "convolution":
return torch.cat([input_real, -input_imag], dim=channels_axis)
def complex_linear_op(input, real_weight, imag_weight, bias):
"""
Applies a complex linear transformation to the incoming data.
Arguments
---------
input : torch.Tensor
Complex input tensor to be transformed.
real_weight : torch.Parameter
Real part of the quaternion weight matrix of this layer.
imag_weight : torch.Parameter
First imaginary part of the quaternion weight matrix of this layer.
bias : torch.Parameter
"""
cat_real = torch.cat([real_weight, -imag_weight], dim=0)
cat_imag = torch.cat([imag_weight, real_weight], dim=0)
cat_complex = torch.cat([cat_real, cat_imag], dim=1)
# If the input is already [batch*time, N]
if input.dim() == 2:
if bias.requires_grad:
return torch.addmm(bias, input, cat_complex)
else:
return torch.mm(input, cat_complex)
else:
output = torch.matmul(input, cat_complex)
if bias.requires_grad:
return output + bias
else:
return output
def complex_conv_op(
input, real_weight, imag_weight, bias, stride, padding, dilation, conv1d
):
"""Applies a complex convolution to the incoming data.
Arguments
---------
input : torch.Tensor
Complex input tensor to be transformed.
conv1d : bool
If true, a 1D convolution operation will be applied. Otherwise, a 2D
convolution is called.
real_weight : torch.Parameter
Real part of the quaternion weight matrix of this layer.
imag_weight : torch.Parameter
First imaginary part of the quaternion weight matrix of this layer.
bias : torch.Parameter
stride : int
Stride factor of the convolutional filters.
padding : int
Amount of padding. See torch.nn documentation for more information.
dilation : int
Dilation factor of the convolutional filters.
"""
cat_real = torch.cat([real_weight, -imag_weight], dim=1)
cat_imag = torch.cat([imag_weight, real_weight], dim=1)
cat_complex = torch.cat([cat_real, cat_imag], dim=0)
if conv1d:
convfunc = F.conv1d
else:
convfunc = F.conv2d
return convfunc(input, cat_complex, bias, stride, padding, dilation)
def unitary_init(
in_features, out_features, kernel_size=None, criterion="glorot"
):
""" Returns a matrice of unitary complex numbers.
Arguments
---------
in_features : int
Number of real values of the input layer (quaternion // 4).
out_features : int
Number of real values of the output layer (quaternion // 4).
kernel_size : int
Kernel_size for convolutional layers (ex: (3,3)).
criterion : str
(glorot, he) (default "glorot").
"""
if kernel_size is None:
kernel_shape = (in_features, out_features)
else:
if type(kernel_size) is int:
kernel_shape = (out_features, in_features) + tuple((kernel_size,))
else:
kernel_shape = (out_features, in_features) + (*kernel_size,)
number_of_weights = np.prod(kernel_shape)
v_r = np.random.uniform(-1.0, 1.0, number_of_weights)
v_i = np.random.uniform(-1.0, 1.0, number_of_weights)
# Unitary complex
for i in range(0, number_of_weights):
norm = np.sqrt(v_r[i] ** 2 + v_i[i] ** 2) + 0.0001
v_r[i] /= norm
v_i[i] /= norm
v_r = v_r.reshape(kernel_shape)
v_i = v_i.reshape(kernel_shape)
return (v_r, v_i)
def complex_init(
in_features, out_features, kernel_size=None, criterion="glorot"
):
""" Returns a matrice of complex numbers initialized as described in:
"Deep Complex Networks", Trabelsi C. et al.
Arguments
---------
in_features : int
Number of real values of the input layer (quaternion // 4).
out_features : int
Number of real values of the output layer (quaternion // 4).
kernel_size : int
Kernel_size for convolutional layers (ex: (3,3)).
criterion: str
(glorot, he) (default "glorot")
"""
if kernel_size is not None:
receptive_field = np.prod(kernel_size)
fan_out = out_features * receptive_field
fan_in = in_features * receptive_field
else:
fan_out = out_features
fan_in = in_features
if criterion == "glorot":
s = 1.0 / (fan_in + fan_out)
else:
s = 1.0 / fan_in
if kernel_size is None:
size = (in_features, out_features)
else:
if type(kernel_size) is int:
size = (out_features, in_features) + tuple((kernel_size,))
else:
size = (out_features, in_features) + (*kernel_size,)
modulus = np.random.rayleigh(scale=s, size=size)
phase = np.random.uniform(-np.pi, np.pi, size)
weight_real = modulus * np.cos(phase)
weight_imag = modulus * np.sin(phase)
return (weight_real, weight_imag)
def affect_init(real_weight, imag_weight, init_func, criterion):
""" Applies the weight initialization function given to the parameters.
Arguments
---------
real_weight: torch.Parameters
imag_weight: torch.Parameters
init_func: function
(unitary_init, complex_init)
criterion: str
(glorot, he)
"""
a, b = init_func(real_weight.size(0), real_weight.size(1), None, criterion)
a, b = torch.from_numpy(a), torch.from_numpy(b)
real_weight.data = a.type_as(real_weight.data)
imag_weight.data = b.type_as(imag_weight.data)
def affect_conv_init(
real_weight, imag_weight, kernel_size, init_func, criterion
):
""" Applies the weight initialization function given to the parameters.
This is specifically written for convolutional layers.
Arguments
---------
real_weight: torch.Parameters
imag_weight: torch.Parameters
kernel_size: int
init_func: function
(unitary_init, complex_init)
criterion: str
(glorot, he)
"""
in_channels = real_weight.size(1)
out_channels = real_weight.size(0)
a, b = init_func(
in_channels, out_channels, kernel_size=kernel_size, criterion=criterion,
)
a, b = torch.from_numpy(a), torch.from_numpy(b)
real_weight.data = a.type_as(real_weight.data)
imag_weight.data = b.type_as(imag_weight.data)
# The following mean function using a list of reduced axes is taken from:
# https://discuss.pytorch.org/t/sum-mul-over-multiple-axes/1882/8
def multi_mean(input, axes, keepdim=False):
"""
Performs `torch.mean` over multiple dimensions of `input`.
"""
axes = sorted(axes)
m = input
for axis in reversed(axes):
m = m.mean(axis, keepdim)
return m
| 9,840 | 29.28 | 80 | py |
speechbrain | speechbrain-main/speechbrain/nnet/complex_networks/c_RNN.py | """Library implementing complex-valued recurrent neural networks.
Authors
* Titouan Parcollet 2020
"""
import torch
import logging
from speechbrain.nnet.complex_networks.c_linear import CLinear
from speechbrain.nnet.complex_networks.c_normalization import (
CBatchNorm,
CLayerNorm,
)
logger = logging.getLogger(__name__)
class CLSTM(torch.nn.Module):
""" This function implements a complex-valued LSTM.
Input format is (batch, time, fea) or (batch, time, fea, channel).
In the latter shape, the two last dimensions will be merged:
(batch, time, fea * channel)
Arguments
---------
hidden_size : int
Number of output neurons (i.e, the dimensionality of the output).
Specified value is in term of complex-valued neurons. Thus, the output
is 2*hidden_size.
num_layers : int, optional
Number of layers to employ in the RNN architecture (default 1).
bias: bool, optional
If True, the additive bias b is adopted (default True).
dropout : float, optional
It is the dropout factor (must be between 0 and 1) (default 0.0).
return_hidden : bool, optional
It True, the function returns the last hidden layer.
bidirectional : bool, optional
If True, a bidirectional model that scans the sequence both
right-to-left and left-to-right is used (default False).
init_criterion : str , optional
(glorot, he).
This parameter controls the initialization criterion of the weights.
It is combined with weights_init to build the initialization method of
the complex-valued weights (default "glorot").
weight_init : str, optional
(complex, unitary).
This parameter defines the initialization procedure of the
complex-valued weights (default "complex"). "complex" will generate random complex-valued
weights following the init_criterion and the complex polar form.
"unitary" will normalize the weights to lie on the unit circle.
More details in: "Deep Complex Networks", Trabelsi C. et al.
Example
-------
>>> inp_tensor = torch.rand([10, 16, 40])
>>> rnn = CLSTM(hidden_size=16, input_shape=inp_tensor.shape)
>>> out_tensor = rnn(inp_tensor)
>>>
torch.Size([10, 16, 32])
"""
def __init__(
self,
hidden_size,
input_shape,
num_layers=1,
bias=True,
dropout=0.0,
bidirectional=False,
return_hidden=False,
init_criterion="glorot",
weight_init="complex",
):
super().__init__()
self.hidden_size = hidden_size * 2
self.num_layers = num_layers
self.bias = bias
self.dropout = dropout
self.bidirectional = bidirectional
self.reshape = False
self.return_hidden = return_hidden
self.init_criterion = init_criterion
self.weight_init = weight_init
if len(input_shape) > 3:
self.reshape = True
# Computing the feature dimensionality
self.fea_dim = torch.prod(torch.tensor(input_shape[2:]))
self.batch_size = input_shape[0]
self.rnn = self._init_layers()
def _init_layers(self,):
"""
Initializes the layers of the ComplexLSTM.
Arguments
---------
first_input : tensor
A first input used for initializing the parameters.
"""
rnn = torch.nn.ModuleList([])
current_dim = self.fea_dim
for i in range(self.num_layers):
rnn_lay = CLSTM_Layer(
current_dim,
self.hidden_size,
self.num_layers,
self.batch_size,
dropout=self.dropout,
bidirectional=self.bidirectional,
init_criterion=self.init_criterion,
weight_init=self.weight_init,
)
rnn.append(rnn_lay)
if self.bidirectional:
current_dim = self.hidden_size * 2
else:
current_dim = self.hidden_size
return rnn
def forward(self, x, hx=None):
"""Returns the output of the CLSTM.
Arguments
---------
x : torch.Tensor
Input tensor.
"""
# Reshaping input tensors for 4d inputs
if self.reshape:
if x.ndim == 4:
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3])
output, hh = self._forward_rnn(x, hx=hx)
if self.return_hidden:
return output, hh
else:
return output
def _forward_rnn(self, x, hx):
"""Returns the output of the CLSTM.
Arguments
---------
x : torch.Tensor
Input tensor.
"""
h = []
if hx is not None:
if self.bidirectional:
hx = hx.reshape(
self.num_layers, self.batch_size * 2, self.hidden_size
)
# Processing the different layers
for i, rnn_lay in enumerate(self.rnn):
if hx is not None:
x = rnn_lay(x, hx=hx[i])
else:
x = rnn_lay(x, hx=None)
h.append(x[:, -1, :])
h = torch.stack(h, dim=1)
if self.bidirectional:
h = h.reshape(h.shape[1] * 2, h.shape[0], self.hidden_size)
else:
h = h.transpose(0, 1)
return x, h
class CLSTM_Layer(torch.nn.Module):
""" This function implements complex-valued LSTM layer.
Arguments
---------
input_size : int
Feature dimensionality of the input tensors (in term of real values).
batch_size : int
Batch size of the input tensors.
hidden_size : int
Number of output values (in term of real values).
num_layers : int, optional
Number of layers to employ in the RNN architecture (default 1).
dropout : float, optional
It is the dropout factor (must be between 0 and 1) (default 0.0).
bidirectional : bool, optional
If True, a bidirectional model that scans the sequence both
right-to-left and left-to-right is used (default False).
init_criterion : str, optional
(glorot, he).
This parameter controls the initialization criterion of the weights.
It is combined with weights_init to build the initialization method of
the complex-valued weights (default "glorot").
weight_init : str, optional
(complex, unitary).
This parameter defines the initialization procedure of the
complex-valued weights (default "complex"). "complex" will generate random complex-valued
weights following the init_criterion and the complex polar form.
"unitary" will normalize the weights to lie on the unit circle.
More details in: "Deep Complex Networks", Trabelsi C. et al.
"""
def __init__(
self,
input_size,
hidden_size,
num_layers,
batch_size,
dropout=0.0,
bidirectional=False,
init_criterion="glorot",
weight_init="complex",
):
super(CLSTM_Layer, self).__init__()
self.hidden_size = int(hidden_size) // 2 # Express in term of quat
self.input_size = int(input_size)
self.batch_size = batch_size
self.bidirectional = bidirectional
self.dropout = dropout
self.init_criterion = init_criterion
self.weight_init = weight_init
self.w = CLinear(
input_shape=self.input_size,
n_neurons=self.hidden_size * 4, # Forget, Input, Output, Cell
bias=True,
weight_init=self.weight_init,
init_criterion=self.init_criterion,
)
self.u = CLinear(
input_shape=self.hidden_size * 2, # The input size is in real
n_neurons=self.hidden_size * 4,
bias=True,
weight_init=self.weight_init,
init_criterion=self.init_criterion,
)
if self.bidirectional:
self.batch_size = self.batch_size * 2
# Initial state
self.register_buffer("h_init", torch.zeros(1, self.hidden_size * 2))
# Preloading dropout masks (gives some speed improvement)
self._init_drop(self.batch_size)
# Initializing dropout
self.drop = torch.nn.Dropout(p=self.dropout, inplace=False)
self.drop_mask_te = torch.tensor([1.0]).float()
def forward(self, x, hx=None):
# type: (Tensor, Optional[Tensor]) -> Tensor # noqa F821
"""Returns the output of the CRNN_layer.
Arguments
---------
x : torch.Tensor
Input tensor.
"""
if self.bidirectional:
x_flip = x.flip(1)
x = torch.cat([x, x_flip], dim=0)
# Change batch size if needed
self._change_batch_size(x)
# Feed-forward affine transformations (all steps in parallel)
w = self.w(x)
# Processing time steps
if hx is not None:
h = self._complexlstm_cell(w, hx)
else:
h = self._complexlstm_cell(w, self.h_init)
if self.bidirectional:
h_f, h_b = h.chunk(2, dim=0)
h_b = h_b.flip(1)
h = torch.cat([h_f, h_b], dim=2)
return h
def _complexlstm_cell(self, w, ht):
"""Returns the hidden states for each time step.
Arguments
---------
wx : torch.Tensor
Linearly transformed input.
"""
hiddens = []
# Initialise the cell state
ct = self.h_init
# Sampling dropout mask
drop_mask = self._sample_drop_mask(w)
# Loop over time axis
for k in range(w.shape[1]):
gates = w[:, k] + self.u(ht)
(itr, iti, ftr, fti, otr, oti, ctr, cti) = gates.chunk(8, 1)
it = torch.sigmoid(torch.cat([itr, iti], dim=-1))
ft = torch.sigmoid(torch.cat([ftr, fti], dim=-1))
ot = torch.sigmoid(torch.cat([otr, oti], dim=-1))
ct = (
it * torch.tanh(torch.cat([ctr, cti], dim=-1)) * drop_mask
+ ft * ct
)
ht = ot * torch.tanh(ct)
hiddens.append(ht)
# Stacking hidden states
h = torch.stack(hiddens, dim=1)
return h
def _init_drop(self, batch_size):
"""Initializes the recurrent dropout operation. To speed it up,
the dropout masks are sampled in advance.
"""
self.drop = torch.nn.Dropout(p=self.dropout, inplace=False)
self.drop_mask_te = torch.tensor([1.0]).float()
self.N_drop_masks = 16000
self.drop_mask_cnt = 0
self.register_buffer(
"drop_masks",
self.drop(torch.ones(self.N_drop_masks, self.hidden_size * 2)).data,
)
def _sample_drop_mask(self, w):
"""Selects one of the pre-defined dropout masks
"""
if self.training:
# Sample new masks when needed
if self.drop_mask_cnt + self.batch_size > self.N_drop_masks:
self.drop_mask_cnt = 0
self.drop_masks = self.drop(
torch.ones(
self.N_drop_masks, self.hidden_size * 2, device=w.device
)
).data
# Sampling the mask
drop_mask = self.drop_masks[
self.drop_mask_cnt : self.drop_mask_cnt + self.batch_size
]
self.drop_mask_cnt = self.drop_mask_cnt + self.batch_size
else:
self.drop_mask_te = self.drop_mask_te.to(w.device)
drop_mask = self.drop_mask_te
return drop_mask
def _change_batch_size(self, x):
"""This function changes the batch size when it is different from
the one detected in the initialization method. This might happen in
the case of multi-gpu or when we have different batch sizes in train
and test. We also update the h_int and drop masks.
"""
if self.batch_size != x.shape[0]:
self.batch_size = x.shape[0]
if self.training:
self.drop_masks = self.drop(
torch.ones(self.N_drop_masks, self.hidden_size * 2)
).data
class CRNN(torch.nn.Module):
""" This function implements a vanilla complex-valued RNN.
Input format is (batch, time, fea) or (batch, time, fea, channel).
In the latter shape, the two last dimensions will be merged:
(batch, time, fea * channel)
Arguments
---------
hidden_size : int
Number of output neurons (i.e, the dimensionality of the output).
Specified value is in term of complex-valued neurons. Thus, the output
is 2*hidden_size.
num_layers : int, optional
Number of layers to employ in the RNN architecture (default 1).
nonlinearity : str, optional
Type of nonlinearity (tanh, relu) (default "tanh").
bias : bool, optional
If True, the additive bias b is adopted (default True).
dropout : float, optional
It is the dropout factor (must be between 0 and 1) (default 0.0).
return_hidden : bool, optional
It True, the function returns the last hidden layer (default False).
bidirectional : bool, optional
If True, a bidirectional model that scans the sequence both
right-to-left and left-to-right is used (default False).
init_criterion : str , optional
(glorot, he).
This parameter controls the initialization criterion of the weights.
It is combined with weights_init to build the initialization method of
the complex-valued weights (default "glorot").
weight_init : str, optional
(complex, unitary).
This parameter defines the initialization procedure of the
complex-valued weights (default "complex"). "complex" will generate random complex-valued
weights following the init_criterion and the complex polar form.
"unitary" will normalize the weights to lie on the unit circle.
More details in: "Deep Complex Networks", Trabelsi C. et al.
Example
-------
>>> inp_tensor = torch.rand([10, 16, 30])
>>> rnn = CRNN(hidden_size=16, input_shape=inp_tensor.shape)
>>> out_tensor = rnn(inp_tensor)
>>>
torch.Size([10, 16, 32])
"""
def __init__(
self,
hidden_size,
input_shape,
nonlinearity="tanh",
num_layers=1,
bias=True,
dropout=0.0,
bidirectional=False,
return_hidden=False,
init_criterion="glorot",
weight_init="complex",
):
super().__init__()
self.hidden_size = hidden_size * 2 # z = x + iy
self.nonlinearity = nonlinearity
self.num_layers = num_layers
self.bias = bias
self.dropout = dropout
self.bidirectional = bidirectional
self.reshape = False
self.return_hidden = return_hidden
self.init_criterion = init_criterion
self.weight_init = weight_init
if len(input_shape) > 3:
self.reshape = True
# Computing the feature dimensionality
self.fea_dim = torch.prod(torch.tensor(input_shape[2:]))
self.batch_size = input_shape[0]
self.rnn = self._init_layers()
def _init_layers(self,):
"""
Initializes the layers of the CRNN.
Arguments
---------
first_input : tensor
A first input used for initializing the parameters.
"""
rnn = torch.nn.ModuleList([])
current_dim = self.fea_dim
for i in range(self.num_layers):
rnn_lay = CRNN_Layer(
current_dim,
self.hidden_size,
self.num_layers,
self.batch_size,
dropout=self.dropout,
nonlinearity=self.nonlinearity,
bidirectional=self.bidirectional,
init_criterion=self.init_criterion,
weight_init=self.weight_init,
)
rnn.append(rnn_lay)
if self.bidirectional:
current_dim = self.hidden_size * 2
else:
current_dim = self.hidden_size
return rnn
def forward(self, x, hx=None):
"""Returns the output of the vanilla CRNN.
Arguments
---------
x : torch.Tensor
"""
# Reshaping input tensors for 4d inputs
if self.reshape:
if x.ndim == 4:
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3])
output, hh = self._forward_rnn(x, hx=hx)
if self.return_hidden:
return output, hh
else:
return output
def _forward_rnn(self, x, hx):
"""Returns the output of the vanilla CRNN.
Arguments
---------
x : torch.Tensor
"""
h = []
if hx is not None:
if self.bidirectional:
hx = hx.reshape(
self.num_layers, self.batch_size * 2, self.hidden_size
)
# Processing the different layers
for i, rnn_lay in enumerate(self.rnn):
if hx is not None:
x = rnn_lay(x, hx=hx[i])
else:
x = rnn_lay(x, hx=None)
h.append(x[:, -1, :])
h = torch.stack(h, dim=1)
if self.bidirectional:
h = h.reshape(h.shape[1] * 2, h.shape[0], self.hidden_size)
else:
h = h.transpose(0, 1)
return x, h
class CRNN_Layer(torch.nn.Module):
""" This function implements complex-valued recurrent layer.
Arguments
---------
input_size : int
Feature dimensionality of the input tensors (in term of real values).
batch_size : int
Batch size of the input tensors.
hidden_size : int
Number of output values (in term of real values).
num_layers : int, optional
Number of layers to employ in the RNN architecture (default 1).
nonlinearity : str, optional
Type of nonlinearity (tanh, relu) (default "tanh").
dropout : float, optional
It is the dropout factor (must be between 0 and 1) (default 0.0).
bidirectional : bool, optional
If True, a bidirectional model that scans the sequence both
right-to-left and left-to-right is used (default False).
init_criterion : str , optional
(glorot, he).
This parameter controls the initialization criterion of the weights.
It is combined with weights_init to build the initialization method of
the complex-valued weights (default "glorot").
weight_init : str, optional
(complex, unitary).
This parameter defines the initialization procedure of the
complex-valued weights (default "complex"). "complex" will generate random complex-valued
weights following the init_criterion and the complex polar form.
"unitary" will normalize the weights to lie on the unit circle.
More details in: "Deep Complex Networks", Trabelsi C. et al.
"""
def __init__(
self,
input_size,
hidden_size,
num_layers,
batch_size,
dropout=0.0,
nonlinearity="tanh",
bidirectional=False,
init_criterion="glorot",
weight_init="complex",
):
super(CRNN_Layer, self).__init__()
self.hidden_size = int(hidden_size) // 2 # Express in term of complex
self.input_size = int(input_size)
self.batch_size = batch_size
self.bidirectional = bidirectional
self.dropout = dropout
self.init_criterion = init_criterion
self.weight_init = weight_init
self.w = CLinear(
input_shape=self.input_size,
n_neurons=self.hidden_size,
bias=False,
weight_init=self.weight_init,
init_criterion=self.init_criterion,
)
self.u = CLinear(
input_shape=self.hidden_size * 2, # The input size is in real
n_neurons=self.hidden_size,
bias=False,
weight_init=self.weight_init,
init_criterion=self.init_criterion,
)
if self.bidirectional:
self.batch_size = self.batch_size * 2
# Initial state
self.register_buffer("h_init", torch.zeros(1, self.hidden_size * 2))
# Preloading dropout masks (gives some speed improvement)
self._init_drop(self.batch_size)
# Initializing dropout
self.drop = torch.nn.Dropout(p=self.dropout, inplace=False)
self.drop_mask_te = torch.tensor([1.0]).float()
# Setting the activation function
if nonlinearity == "tanh":
self.act = torch.nn.Tanh()
else:
self.act = torch.nn.ReLU()
def forward(self, x, hx=None):
# type: (Tensor, Optional[Tensor]) -> Tensor # noqa F821
"""Returns the output of the CRNN_layer.
Arguments
---------
x : torch.Tensor
Input tensor.
"""
if self.bidirectional:
x_flip = x.flip(1)
x = torch.cat([x, x_flip], dim=0)
# Change batch size if needed
# self._change_batch_size(x)
# Feed-forward affine transformations (all steps in parallel)
w = self.w(x)
# Processing time steps
if hx is not None:
h = self._complexrnn_cell(w, hx)
else:
h = self._complexrnn_cell(w, self.h_init)
if self.bidirectional:
h_f, h_b = h.chunk(2, dim=0)
h_b = h_b.flip(1)
h = torch.cat([h_f, h_b], dim=2)
return h
def _complexrnn_cell(self, w, ht):
"""Returns the hidden states for each time step.
Arguments
---------
wx : torch.Tensor
Linearly transformed input.
"""
hiddens = []
# Sampling dropout mask
drop_mask = self._sample_drop_mask(w)
# Loop over time axis
for k in range(w.shape[1]):
at = w[:, k] + self.u(ht)
ht = self.act(at) * drop_mask
hiddens.append(ht)
# Stacking hidden states
h = torch.stack(hiddens, dim=1)
return h
def _init_drop(self, batch_size):
"""Initializes the recurrent dropout operation. To speed it up,
the dropout masks are sampled in advance.
"""
self.drop = torch.nn.Dropout(p=self.dropout, inplace=False)
self.drop_mask_te = torch.tensor([1.0]).float()
self.N_drop_masks = 16000
self.drop_mask_cnt = 0
self.register_buffer(
"drop_masks",
self.drop(torch.ones(self.N_drop_masks, self.hidden_size * 2)).data,
)
def _sample_drop_mask(self, w):
"""Selects one of the pre-defined dropout masks
"""
if self.training:
# Sample new masks when needed
if self.drop_mask_cnt + self.batch_size > self.N_drop_masks:
self.drop_mask_cnt = 0
self.drop_masks = self.drop(
torch.ones(
self.N_drop_masks, self.hidden_size * 2, device=w.device
)
).data
# Sampling the mask
drop_mask = self.drop_masks[
self.drop_mask_cnt : self.drop_mask_cnt + self.batch_size
]
self.drop_mask_cnt = self.drop_mask_cnt + self.batch_size
else:
self.drop_mask_te = self.drop_mask_te.to(w.device)
drop_mask = self.drop_mask_te
return drop_mask
def _change_batch_size(self, x):
"""This function changes the batch size when it is different from
the one detected in the initialization method. This might happen in
the case of multi-gpu or when we have different batch sizes in train
and test. We also update the h_int and drop masks.
"""
if self.batch_size != x.shape[0]:
self.batch_size = x.shape[0]
if self.training:
self.drop_masks = self.drop(
torch.ones(self.N_drop_masks, self.hidden_size * 2)
).data
class CLiGRU(torch.nn.Module):
""" This function implements a complex-valued Light GRU (liGRU).
Ligru is single-gate GRU model based on batch-norm + relu
activations + recurrent dropout. For more info see:
"M. Ravanelli, P. Brakel, M. Omologo, Y. Bengio,
Light Gated Recurrent Units for Speech Recognition,
in IEEE Transactions on Emerging Topics in Computational Intelligence,
2018" (https://arxiv.org/abs/1803.10225)
To speed it up, it is compiled with the torch just-in-time compiler (jit)
right before using it.
It accepts in input tensors formatted as (batch, time, fea).
In the case of 4d inputs like (batch, time, fea, channel) the tensor is
flattened as (batch, time, fea*channel).
Arguments
---------
hidden_size : int
Number of output neurons (i.e, the dimensionality of the output).
Specified value is in term of complex-valued neurons. Thus, the output
is 2*hidden_size.
nonlinearity : str
Type of nonlinearity (tanh, relu).
normalization : str
Type of normalization for the ligru model (batchnorm, layernorm).
Every string different from batchnorm and layernorm will result
in no normalization.
num_layers : int
Number of layers to employ in the RNN architecture.
bias : bool
If True, the additive bias b is adopted.
dropout : float
It is the dropout factor (must be between 0 and 1).
return_hidden : bool
If True, the function returns the last hidden layer.
bidirectional : bool
If True, a bidirectional model that scans the sequence both
right-to-left and left-to-right is used.
init_criterion : str , optional
(glorot, he).
This parameter controls the initialization criterion of the weights.
It is combined with weights_init to build the initialization method of
the complex-valued weights (default "glorot").
weight_init : str, optional
(complex, unitary).
This parameter defines the initialization procedure of the
complex-valued weights (default "complex"). "complex" will generate random complex-valued
weights following the init_criterion and the complex polar form.
"unitary" will normalize the weights to lie on the unit circle.
More details in: "Deep Complex Networks", Trabelsi C. et al.
Example
-------
>>> inp_tensor = torch.rand([10, 16, 30])
>>> rnn = CLiGRU(input_shape=inp_tensor.shape, hidden_size=16)
>>> out_tensor = rnn(inp_tensor)
>>>
torch.Size([4, 10, 5])
"""
def __init__(
self,
hidden_size,
input_shape,
nonlinearity="relu",
normalization="batchnorm",
num_layers=1,
bias=True,
dropout=0.0,
bidirectional=False,
return_hidden=False,
init_criterion="glorot",
weight_init="complex",
):
super().__init__()
self.hidden_size = hidden_size * 2 # z = x + iy
self.nonlinearity = nonlinearity
self.num_layers = num_layers
self.normalization = normalization
self.bias = bias
self.dropout = dropout
self.bidirectional = bidirectional
self.reshape = False
self.return_hidden = return_hidden
self.init_criterion = init_criterion
self.weight_init = weight_init
if len(input_shape) > 3:
self.reshape = True
self.fea_dim = torch.prod(torch.tensor(input_shape[2:]))
self.batch_size = input_shape[0]
self.rnn = self._init_layers()
def _init_layers(self):
"""Initializes the layers of the liGRU.
Arguments
---------
first_input : tensor
A first input used for initializing the parameters.
"""
rnn = torch.nn.ModuleList([])
current_dim = self.fea_dim
for i in range(self.num_layers):
rnn_lay = CLiGRU_Layer(
current_dim,
self.hidden_size,
self.num_layers,
self.batch_size,
dropout=self.dropout,
nonlinearity=self.nonlinearity,
normalization=self.normalization,
bidirectional=self.bidirectional,
init_criterion=self.init_criterion,
weight_init=self.weight_init,
)
rnn.append(rnn_lay)
if self.bidirectional:
current_dim = self.hidden_size * 2
else:
current_dim = self.hidden_size
return rnn
def forward(self, x, hx=None):
"""Returns the output of the CliGRU.
Arguments
---------
x : torch.Tensor
Input tensor.
"""
# Reshaping input tensors for 4d inputs
if self.reshape:
if x.ndim == 4:
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3])
# run ligru
output, hh = self._forward_ligru(x, hx=hx)
if self.return_hidden:
return output, hh
else:
return output
def _forward_ligru(self, x, hx):
"""Returns the output of the CliGRU.
Arguments
---------
x : torch.Tensor
Input tensor.
"""
h = []
if hx is not None:
if self.bidirectional:
hx = hx.reshape(
self.num_layers, self.batch_size * 2, self.hidden_size
)
# Processing the different layers
for i, ligru_lay in enumerate(self.rnn):
if hx is not None:
x = ligru_lay(x, hx=hx[i])
else:
x = ligru_lay(x, hx=None)
h.append(x[:, -1, :])
h = torch.stack(h, dim=1)
if self.bidirectional:
h = h.reshape(h.shape[1] * 2, h.shape[0], self.hidden_size)
else:
h = h.transpose(0, 1)
return x, h
class CLiGRU_Layer(torch.nn.Module):
"""
This function implements complex-valued Light-Gated Recurrent Unit layer.
Arguments
---------
input_size : int
Feature dimensionality of the input tensors.
batch_size : int
Batch size of the input tensors.
hidden_size : int
Number of output values.
num_layers : int
Number of layers to employ in the RNN architecture.
nonlinearity : str
Type of nonlinearity (tanh, relu).
normalization : str
Type of normalization (batchnorm, layernorm).
Every string different from batchnorm and layernorm will result
in no normalization.
dropout : float
It is the dropout factor (must be between 0 and 1).
bidirectional : bool
If True, a bidirectional model that scans the sequence both
right-to-left and left-to-right is used.
init_criterion : str , optional
(glorot, he).
This parameter controls the initialization criterion of the weights.
It is combined with weights_init to build the initialization method of
the complex-valued weights (default "glorot").
weight_init : str, optional
(complex, unitary).
This parameter defines the initialization procedure of the
complex-valued weights (default "complex"). "complex" will generate random complex-valued
weights following the init_criterion and the complex polar form.
"unitary" will normalize the weights to lie on the unit circle.
More details in: "Deep Complex Networks", Trabelsi C. et al.
"""
def __init__(
self,
input_size,
hidden_size,
num_layers,
batch_size,
dropout=0.0,
nonlinearity="relu",
normalization="batchnorm",
bidirectional=False,
init_criterion="glorot",
weight_init="complex",
):
super(CLiGRU_Layer, self).__init__()
self.hidden_size = int(hidden_size) // 2
self.input_size = int(input_size)
self.batch_size = batch_size
self.bidirectional = bidirectional
self.dropout = dropout
self.init_criterion = init_criterion
self.weight_init = weight_init
self.normalization = normalization
self.nonlinearity = nonlinearity
self.w = CLinear(
input_shape=self.input_size,
n_neurons=self.hidden_size * 2,
bias=False,
weight_init=self.weight_init,
init_criterion=self.init_criterion,
)
self.u = CLinear(
input_shape=self.hidden_size * 2, # The input size is in real
n_neurons=self.hidden_size * 2,
bias=False,
weight_init=self.weight_init,
init_criterion=self.init_criterion,
)
if self.bidirectional:
self.batch_size = self.batch_size * 2
# Initializing batch norm
self.normalize = False
if self.normalization == "batchnorm":
self.norm = CBatchNorm(
input_size=hidden_size * 2, dim=-1, momentum=0.05
)
self.normalize = True
elif self.normalization == "layernorm":
self.norm = CLayerNorm(input_size=hidden_size * 2, dim=-1)
self.normalize = True
else:
# Normalization is disabled here. self.norm is only formally
# initialized to avoid jit issues.
self.norm = CLayerNorm(input_size=hidden_size * 2, dim=-1)
self.normalize = True
# Initial state
self.register_buffer("h_init", torch.zeros(1, self.hidden_size * 2))
# Preloading dropout masks (gives some speed improvement)
self._init_drop(self.batch_size)
# Initializing dropout
self.drop = torch.nn.Dropout(p=self.dropout, inplace=False)
self.drop_mask_te = torch.tensor([1.0]).float()
# Setting the activation function
if self.nonlinearity == "tanh":
self.act = torch.nn.Tanh()
else:
self.act = torch.nn.ReLU()
def forward(self, x, hx=None):
# type: (Tensor, Optional[Tensor], Optional[Bool]) -> Tensor # noqa F821
"""Returns the output of the Complex liGRU layer.
Arguments
---------
x : torch.Tensor
Input tensor.
"""
if self.bidirectional:
x_flip = x.flip(1)
x = torch.cat([x, x_flip], dim=0)
# Change batch size if needed
self._change_batch_size(x)
# Feed-forward affine transformations (all steps in parallel)
w = self.w(x)
# Apply batch normalization
if self.normalize:
w_bn = self.norm(w.reshape(w.shape[0] * w.shape[1], w.shape[2]))
w = w_bn.reshape(w.shape[0], w.shape[1], w.shape[2])
# Processing time steps
if hx is not None:
h = self._complex_ligru_cell(w, hx)
else:
h = self._complex_ligru_cell(w, self.h_init)
if self.bidirectional:
h_f, h_b = h.chunk(2, dim=0)
h_b = h_b.flip(1)
h = torch.cat([h_f, h_b], dim=2)
return h
def _complex_ligru_cell(self, w, ht):
"""Returns the hidden states for each time step.
Arguments
---------
wx : torch.Tensor
Linearly transformed input.
"""
hiddens = []
# Sampling dropout mask
drop_mask = self._sample_drop_mask(w)
# Loop over time axis
for k in range(w.shape[1]):
gates = w[:, k] + self.u(ht)
atr, ati, ztr, zti = gates.chunk(4, 1)
at = torch.cat([atr, ati], dim=-1)
zt = torch.cat([ztr, zti], dim=-1)
zt = torch.sigmoid(zt)
hcand = self.act(at) * drop_mask
ht = zt * ht + (1 - zt) * hcand
hiddens.append(ht)
# Stacking hidden states
h = torch.stack(hiddens, dim=1)
return h
def _init_drop(self, batch_size):
"""Initializes the recurrent dropout operation. To speed it up,
the dropout masks are sampled in advance.
"""
self.drop = torch.nn.Dropout(p=self.dropout, inplace=False)
self.drop_mask_te = torch.tensor([1.0]).float()
self.N_drop_masks = 16000
self.drop_mask_cnt = 0
self.register_buffer(
"drop_masks",
self.drop(torch.ones(self.N_drop_masks, self.hidden_size * 2)).data,
)
def _sample_drop_mask(self, w):
"""Selects one of the pre-defined dropout masks
"""
if self.training:
# Sample new masks when needed
if self.drop_mask_cnt + self.batch_size > self.N_drop_masks:
self.drop_mask_cnt = 0
self.drop_masks = self.drop(
torch.ones(
self.N_drop_masks, self.hidden_size * 2, device=w.device
)
).data
# Sampling the mask
drop_mask = self.drop_masks[
self.drop_mask_cnt : self.drop_mask_cnt + self.batch_size
]
self.drop_mask_cnt = self.drop_mask_cnt + self.batch_size
else:
self.drop_mask_te = self.drop_mask_te.to(w.device)
drop_mask = self.drop_mask_te
return drop_mask
def _change_batch_size(self, x):
"""This function changes the batch size when it is different from
the one detected in the initialization method. This might happen in
the case of multi-gpu or when we have different batch sizes in train
and test. We also update the h_int and drop masks.
"""
if self.batch_size != x.shape[0]:
self.batch_size = x.shape[0]
if self.training:
self.drop_masks = self.drop(
torch.ones(self.N_drop_masks, self.hidden_size)
).data
| 38,455 | 31.153846 | 97 | py |
speechbrain | speechbrain-main/speechbrain/nnet/complex_networks/c_linear.py | """Library implementing complex-valued linear transformation.
Authors
* Titouan Parcollet 2020
"""
import torch
import logging
from speechbrain.nnet.complex_networks.c_ops import (
affect_init,
complex_init,
unitary_init,
complex_linear_op,
check_complex_input,
)
logger = logging.getLogger(__name__)
class CLinear(torch.nn.Module):
"""This function implements a fully connected complex-valued
linear layer: y = Wx + b. y, W, x and b are thus complex
numbers. A complex number is written as: r + xi. A tensor of
complex numbers x = [batch, 32] can be understood as
[batch, 0:15] = R and [batch, 16:31] = Xi. Thus the features
dimension is cut in half (must be divisible by 2).
Arguments
---------
n_neurons : int
It is the number of output neurons (i.e, the dimensionality of the
output). Please note that these are complex-valued neurons. If 256
neurons are specified, the output dimension will be 512.
input_shape : tuple
Expected size of the input.
bias : bool
if True, the additive bias b is adopted.
init_criterion : str , optional
(glorot, he).
This parameter controls the initialization criterion of the weights.
It is combined with weights_init to build the initialization method of
the complex-valued weights (default "glorot").
weight_init : str, optional
(complex, unitary).
This parameter defines the initialization procedure of the
complex-valued weights (default "complex"). "complex" will generate random complex-valued
weights following the init_criterion and the complex polar form.
"unitary" will normalize the weights to lie on the unit circle.
More details in: "Deep Complex Networks", Trabelsi C. et al.
Example
-------
>>> inputs = torch.rand(10, 50, 40)
>>> lin = CLinear(n_neurons=100, input_shape=inputs.shape)
>>> output = lin(inputs)
>>> output.shape
torch.Size([10, 50, 200])
"""
def __init__(
self,
n_neurons,
input_shape,
bias=True,
init_criterion="glorot",
weight_init="complex",
):
super().__init__()
self.n_neurons = n_neurons
self.bias = bias
self.init_criterion = init_criterion
self.weight_init = weight_init
# When initialising with speechbrain the input_shape is an integer !
# we need to transform it into a list it works with all the question ops
if isinstance(input_shape, int):
input_shape = [1, input_shape]
# Check the complex_valued form of the input
check_complex_input(input_shape)
# Computing the complex dimensionality of the input
self.in_features = input_shape[-1] // 2
self.out_features = self.n_neurons
# Two weight matrices are created for the real and imaginary parts of
# the weights. This will also allow an easier complex product.
self.real_weight = torch.nn.Parameter(
torch.Tensor(self.in_features, self.out_features)
)
self.imag_weight = torch.nn.Parameter(
torch.Tensor(self.in_features, self.out_features)
)
if self.bias:
self.b = torch.nn.Parameter(torch.Tensor(2 * self.out_features))
else:
self.b = torch.Tensor(2 * self.out_features).requires_grad_(False)
# Managing the weight initialization and bias
self.winit = {"complex": complex_init, "unitary": unitary_init}[
self.weight_init
]
affect_init(
self.real_weight, self.imag_weight, self.winit, init_criterion
)
def forward(self, x):
"""Returns the linear transformation of input tensor.
Arguments
---------
x : torch.Tensor
Input to transform linearly.
"""
wx = complex_linear_op(x, self.real_weight, self.imag_weight, self.b)
return wx
| 4,020 | 32.508333 | 97 | py |
speechbrain | speechbrain-main/speechbrain/nnet/transducer/transducer_joint.py | """Library implementing transducer_joint.
Author
Abdelwahab HEBA 2020
"""
import torch
import logging
import torch.nn as nn
logger = logging.getLogger(__name__)
class Transducer_joint(nn.Module):
"""Computes joint tensor between Transcription network (TN) & Prediction network (PN)
Arguments
---------
joint_network : torch.class (neural network modules)
if joint == "concat", we call this network after the concatenation of TN and PN
if None, we don't use this network.
joint : joint the two tensors by ("sum",or "concat") option.
nonlinearity : torch class
Activation function used after the joint between TN and PN
Type of nonlinearity (tanh, relu).
Example
-------
>>> from speechbrain.nnet.transducer.transducer_joint import Transducer_joint
>>> from speechbrain.nnet.linear import Linear
>>> input_TN = torch.rand(8, 200, 1, 40)
>>> input_PN = torch.rand(8, 1, 12, 40)
>>> joint_network = Linear(input_size=80, n_neurons=80)
>>> TJoint = Transducer_joint(joint_network, joint="concat")
>>> output = TJoint(input_TN, input_PN)
>>> output.shape
torch.Size([8, 200, 12, 80])
"""
def __init__(
self, joint_network=None, joint="sum", nonlinearity=torch.nn.LeakyReLU
):
super().__init__()
self.joint_network = joint_network
self.joint = joint
self.nonlinearity = nonlinearity()
def init_params(self, first_input):
"""
Arguments
---------
first_input : tensor
A first input used for initializing the parameters.
"""
self.joint_network(first_input)
def forward(self, input_TN, input_PN):
"""Returns the fusion of inputs tensors.
Arguments
---------
input_TN : torch.Tensor
Input from Transcription Network.
input_PN : torch.Tensor
Input from Prediction Network.
"""
if len(input_TN.shape) != len(input_PN.shape):
raise ValueError("Arg 1 and 2 must be have same size")
if not (len(input_TN.shape) != 4 or len(input_TN.shape) != 1):
raise ValueError("Tensors 1 and 2 must have dim=1 or dim=4")
if self.joint == "sum":
joint = input_TN + input_PN
if self.joint == "concat":
# For training
if len(input_TN.shape) == 4:
dim = len(input_TN.shape) - 1
xs = input_TN
ymat = input_PN
sz = [
max(i, j) for i, j in zip(xs.size()[:-1], ymat.size()[:-1])
]
xs = xs.expand(torch.Size(sz + [xs.shape[-1]]))
ymat = ymat.expand(torch.Size(sz + [ymat.shape[-1]]))
joint = torch.cat((xs, ymat), dim=dim)
# For evaluation
elif len(input_TN.shape) == 1:
joint = torch.cat((input_TN, input_PN), dim=0)
if self.joint_network is not None:
joint = self.joint_network(joint)
return self.nonlinearity(joint)
| 3,106 | 31.364583 | 89 | py |
speechbrain | speechbrain-main/speechbrain/nnet/quaternion_networks/q_RNN.py | """Library implementing quaternion-valued recurrent neural networks.
Authors
* Titouan Parcollet 2020
"""
import torch
import logging
from speechbrain.nnet.quaternion_networks.q_linear import QLinear
from speechbrain.nnet.quaternion_networks.q_normalization import QBatchNorm
from torch import Tensor
from typing import Optional
logger = logging.getLogger(__name__)
class QLSTM(torch.nn.Module):
""" This function implements a quaternion-valued LSTM as first introduced
in : "Quaternion Recurrent Neural Networks", Parcollet T. et al.
Input format is (batch, time, fea) or (batch, time, fea, channel).
In the latter shape, the two last dimensions will be merged:
(batch, time, fea * channel)
Arguments
---------
hidden_size : int
Number of output neurons (i.e, the dimensionality of the output).
Specified value is in terms of quaternion-valued neurons. Thus, the output
is 4*hidden_size.
num_layers : int, optional
Number of layers to employ in the RNN architecture (default 1).
bias : bool, optional
If True, the additive bias b is adopted (default True).
dropout : float, optional
It is the dropout factor (must be between 0 and 1) (default 0.0).
bidirectional : bool, optional
If True, a bidirectional model that scans the sequence both
right-to-left and left-to-right is used (default False).
init_criterion : str , optional
(glorot, he).
This parameter controls the initialization criterion of the weights.
It is combined with weights_init to build the initialization method of
the quaternion-valued weights (default "glorot").
weight_init : str, optional
(quaternion, unitary).
This parameter defines the initialization procedure of the
quaternion-valued weights. "quaternion" will generate random quaternion
weights following the init_criterion and the quaternion polar form.
"unitary" will normalize the weights to lie on the unit circle (default "quaternion").
More details in: "Quaternion Recurrent Neural Networks",
Parcollet T. et al.
autograd : bool, optional
When True, the default PyTorch autograd will be used. When False, a
custom backpropagation will be used, reducing by a factor 3 to 4 the
memory consumption. It is also 2x slower (default True).
Example
-------
>>> inp_tensor = torch.rand([10, 16, 40])
>>> rnn = QLSTM(hidden_size=16, input_shape=inp_tensor.shape)
>>> out_tensor = rnn(inp_tensor)
>>>
torch.Size([10, 16, 64])
"""
def __init__(
self,
hidden_size,
input_shape,
num_layers=1,
bias=True,
dropout=0.0,
bidirectional=False,
init_criterion="glorot",
weight_init="quaternion",
autograd=True,
):
super().__init__()
self.hidden_size = hidden_size * 4
self.num_layers = num_layers
self.bias = bias
self.dropout = dropout
self.bidirectional = bidirectional
self.reshape = False
self.init_criterion = init_criterion
self.weight_init = weight_init
self.autograd = autograd
if len(input_shape) > 3:
self.reshape = True
# Computing the feature dimensionality
self.fea_dim = torch.prod(torch.tensor(input_shape[2:]))
self.batch_size = input_shape[0]
self.rnn = self._init_layers()
def _init_layers(self,):
"""Initializes the layers of the quaternionLSTM.
Arguments
---------
first_input : tensor
A first input used for initializing the parameters.
"""
rnn = torch.nn.ModuleList([])
current_dim = self.fea_dim
for i in range(self.num_layers):
rnn_lay = QLSTM_Layer(
current_dim,
self.hidden_size,
self.num_layers,
self.batch_size,
dropout=self.dropout,
bidirectional=self.bidirectional,
init_criterion=self.init_criterion,
weight_init=self.weight_init,
autograd=self.autograd,
)
rnn.append(rnn_lay)
if self.bidirectional:
current_dim = self.hidden_size * 2
else:
current_dim = self.hidden_size
return rnn
def forward(self, x, hx: Optional[Tensor] = None):
"""Returns the output of the vanilla QuaternionRNN.
Arguments
---------
x : torch.Tensor
Input tensor.
"""
# Reshaping input tensors for 4d inputs
if self.reshape:
if x.ndim == 4:
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3])
output, hh = self._forward_rnn(x, hx=hx)
return output, hh
def _forward_rnn(self, x, hx: Optional[Tensor]):
"""Returns the output of the vanilla QuaternionRNN.
Arguments
---------
x : torch.Tensor
Input tensor.
"""
h = []
if hx is not None:
if self.bidirectional:
hx = hx.reshape(
self.num_layers, self.batch_size * 2, self.hidden_size
)
# Processing the different layers
for i, rnn_lay in enumerate(self.rnn):
if hx is not None:
x = rnn_lay(x, hx=hx[i])
else:
x = rnn_lay(x, hx=None)
h.append(x[:, -1, :])
h = torch.stack(h, dim=1)
if self.bidirectional:
h = h.reshape(h.shape[1] * 2, h.shape[0], self.hidden_size)
else:
h = h.transpose(0, 1)
return x, h
class QLSTM_Layer(torch.nn.Module):
""" This function implements quaternion-valued LSTM layer.
Arguments
---------
input_size : int
Feature dimensionality of the input tensors (in term of real values).
batch_size : int
Batch size of the input tensors.
hidden_size : int
Number of output values (in term of real values).
num_layers : int, optional
Number of layers to employ in the RNN architecture (default 1).
dropout : float, optional
It is the dropout factor (must be between 0 and 1) (default 0.0).
bidirectional : bool, optional
If True, a bidirectional model that scans the sequence both
right-to-left and left-to-right is used (default False).
init_criterion : str , optional
(glorot, he).
This parameter controls the initialization criterion of the weights.
It is combined with weights_init to build the initialization method of
the quaternion-valued weights (default "glorot").
weight_init : str, optional
(quaternion, unitary).
This parameter defines the initialization procedure of the
quaternion-valued weights. "quaternion" will generate random quaternion
weights following the init_criterion and the quaternion polar form.
"unitary" will normalize the weights to lie on the unit circle (default "quaternion").
More details in: "Quaternion Recurrent Neural Networks",
Parcollet T. et al.
autograd : bool, optional
When True, the default PyTorch autograd will be used. When False, a
custom backpropagation will be used, reducing by a factor 3 to 4 the
memory consumption. It is also 2x slower (default True).
"""
def __init__(
self,
input_size,
hidden_size,
num_layers,
batch_size,
dropout=0.0,
bidirectional=False,
init_criterion="glorot",
weight_init="quaternion",
autograd="true",
):
super(QLSTM_Layer, self).__init__()
self.hidden_size = int(hidden_size) // 4 # Express in term of quat
self.input_size = int(input_size)
self.batch_size = batch_size
self.bidirectional = bidirectional
self.dropout = dropout
self.init_criterion = init_criterion
self.weight_init = weight_init
self.autograd = autograd
self.w = QLinear(
input_shape=self.input_size,
n_neurons=self.hidden_size * 4, # Forget, Input, Output, Cell
bias=True,
weight_init=self.weight_init,
init_criterion=self.init_criterion,
autograd=self.autograd,
)
self.u = QLinear(
input_shape=self.hidden_size * 4, # The input size is in real
n_neurons=self.hidden_size * 4,
bias=True,
weight_init=self.weight_init,
init_criterion=self.init_criterion,
autograd=self.autograd,
)
if self.bidirectional:
self.batch_size = self.batch_size * 2
# Initial state
self.register_buffer("h_init", torch.zeros(1, self.hidden_size * 4))
# Preloading dropout masks (gives some speed improvement)
self._init_drop(self.batch_size)
# Initializing dropout
self.drop = torch.nn.Dropout(p=self.dropout, inplace=False)
self.drop_mask_te = torch.tensor([1.0]).float()
def forward(self, x, hx: Optional[Tensor] = None):
# type: (Tensor, Optional[Tensor]) -> Tensor # noqa F821
"""Returns the output of the QuaternionRNN_layer.
Arguments
---------
x : torch.Tensor
Input tensor.
"""
if self.bidirectional:
x_flip = x.flip(1)
x = torch.cat([x, x_flip], dim=0)
# Change batch size if needed
self._change_batch_size(x)
# Feed-forward affine transformations (all steps in parallel)
w = self.w(x)
# Processing time steps
if hx is not None:
h = self._quaternionlstm_cell(w, hx)
else:
h = self._quaternionlstm_cell(w, self.h_init)
if self.bidirectional:
h_f, h_b = h.chunk(2, dim=0)
h_b = h_b.flip(1)
h = torch.cat([h_f, h_b], dim=2)
return h
def _quaternionlstm_cell(self, w, ht):
"""Returns the hidden states for each time step.
Arguments
---------
wx : torch.Tensor
Linearly transformed input.
"""
hiddens = []
# Initialise the cell state
ct = self.h_init
# Sampling dropout mask
drop_mask = self._sample_drop_mask(w)
# Loop over time axis
for k in range(w.shape[1]):
gates = w[:, k] + self.u(ht)
(
itr,
iti,
itj,
itk,
ftr,
fti,
ftj,
ftk,
otr,
oti,
otj,
otk,
ctr,
cti,
ctj,
ctk,
) = gates.chunk(16, 1)
it = torch.sigmoid(torch.cat([itr, iti, itj, itk], dim=-1))
ft = torch.sigmoid(torch.cat([ftr, fti, ftj, ftk], dim=-1))
ot = torch.sigmoid(torch.cat([otr, oti, otj, otk], dim=-1))
ct = (
it
* torch.tanh(torch.cat([ctr, cti, ctj, ctk], dim=-1))
* drop_mask
+ ft * ct
)
ht = ot * torch.tanh(ct)
hiddens.append(ht)
# Stacking hidden states
h = torch.stack(hiddens, dim=1)
return h
def _init_drop(self, batch_size):
"""Initializes the recurrent dropout operation. To speed it up,
the dropout masks are sampled in advance.
"""
self.drop = torch.nn.Dropout(p=self.dropout, inplace=False)
self.drop_mask_te = torch.tensor([1.0]).float()
self.N_drop_masks = 16000
self.drop_mask_cnt = 0
self.drop_masks = self.drop(
torch.ones(self.N_drop_masks, self.hidden_size * 4,)
).data
def _sample_drop_mask(self, w):
"""Selects one of the pre-defined dropout masks.
"""
if self.training:
# Sample new masks when needed
if self.drop_mask_cnt + self.batch_size > self.N_drop_masks:
self.drop_mask_cnt = 0
self.drop_masks = self.drop(
torch.ones(
self.N_drop_masks, self.hidden_size * 4, device=w.device
)
).data
# Sampling the mask
drop_mask = self.drop_masks[
self.drop_mask_cnt : self.drop_mask_cnt + self.batch_size
]
self.drop_mask_cnt = self.drop_mask_cnt + self.batch_size
else:
drop_mask = self.drop_mask_te
return drop_mask
def _change_batch_size(self, x):
"""This function changes the batch size when it is different from
the one detected in the initialization method. This might happen in
the case of multi-gpu or when we have different batch sizes in train
and test. We also update the h_int and drop masks.
"""
if self.batch_size != x.shape[0]:
self.batch_size = x.shape[0]
if self.training:
self.drop_masks = self.drop(
torch.ones(
self.N_drop_masks, self.hidden_size * 4, device=x.device
)
).data
class QRNN(torch.nn.Module):
""" This function implements a vanilla quaternion-valued RNN.
Input format is (batch, time, fea) or (batch, time, fea, channel).
In the latter shape, the two last dimensions will be merged:
(batch, time, fea * channel)
Arguments
---------
hidden_size : int
Number of output neurons (i.e, the dimensionality of the output).
Specified value is in term of quaternion-valued neurons. Thus, the output
is 4*hidden_size.
num_layers : int, optional
Number of layers to employ in the RNN architecture (default 1).
nonlinearity : str, optional
Type of nonlinearity (tanh, relu) (default "tanh").
bias : bool, optional
If True, the additive bias b is adopted (default True).
dropout : float, optional
It is the dropout factor (must be between 0 and 1) (default 0.0).
bidirectional : bool, optional
If True, a bidirectional model that scans the sequence both
right-to-left and left-to-right is used (default False).
init_criterion : str , optional
(glorot, he).
This parameter controls the initialization criterion of the weights.
It is combined with weights_init to build the initialization method of
the quaternion-valued weights (default "glorot").
weight_init : str, optional
(quaternion, unitary).
This parameter defines the initialization procedure of the
quaternion-valued weights. "quaternion" will generate random quaternion
weights following the init_criterion and the quaternion polar form.
"unitary" will normalize the weights to lie on the unit circle (default "quaternion").
More details in: "Quaternion Recurrent Neural Networks",
Parcollet T. et al.
autograd : bool, optional
When True, the default PyTorch autograd will be used. When False, a
custom backpropagation will be used, reducing by a factor 3 to 4 the
memory consumption. It is also 2x slower (default True).
Example
-------
>>> inp_tensor = torch.rand([10, 16, 40])
>>> rnn = QRNN(hidden_size=16, input_shape=inp_tensor.shape)
>>> out_tensor = rnn(inp_tensor)
>>>
torch.Size([10, 16, 64])
"""
def __init__(
self,
hidden_size,
input_shape,
nonlinearity="tanh",
num_layers=1,
bias=True,
dropout=0.0,
bidirectional=False,
init_criterion="glorot",
weight_init="quaternion",
autograd=True,
):
super().__init__()
self.hidden_size = hidden_size * 4 # z = x + iy
self.nonlinearity = nonlinearity
self.num_layers = num_layers
self.bias = bias
self.dropout = dropout
self.bidirectional = bidirectional
self.reshape = False
self.init_criterion = init_criterion
self.weight_init = weight_init
self.autograd = autograd
if len(input_shape) > 3:
self.reshape = True
# Computing the feature dimensionality
self.fea_dim = torch.prod(torch.tensor(input_shape[2:]))
self.batch_size = input_shape[0]
self.rnn = self._init_layers()
def _init_layers(self,):
"""
Initializes the layers of the quaternionRNN.
Arguments
---------
first_input : tensor
A first input used for initializing the parameters.
"""
rnn = torch.nn.ModuleList([])
current_dim = self.fea_dim
for i in range(self.num_layers):
rnn_lay = QRNN_Layer(
current_dim,
self.hidden_size,
self.num_layers,
self.batch_size,
dropout=self.dropout,
nonlinearity=self.nonlinearity,
bidirectional=self.bidirectional,
init_criterion=self.init_criterion,
weight_init=self.weight_init,
autograd=self.autograd,
)
rnn.append(rnn_lay)
if self.bidirectional:
current_dim = self.hidden_size * 2
else:
current_dim = self.hidden_size
return rnn
def forward(self, x, hx: Optional[Tensor] = None):
"""Returns the output of the vanilla QuaternionRNN.
Arguments
---------
x : torch.Tensor
"""
# Reshaping input tensors for 4d inputs
if self.reshape:
if x.ndim == 4:
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3])
output, hh = self._forward_rnn(x, hx=hx)
return output, hh
def _forward_rnn(self, x, hx: Optional[Tensor]):
"""Returns the output of the vanilla QuaternionRNN.
Arguments
---------
x : torch.Tensor
"""
h = []
if hx is not None:
if self.bidirectional:
hx = hx.reshape(
self.num_layers, self.batch_size * 2, self.hidden_size
)
# Processing the different layers
for i, rnn_lay in enumerate(self.rnn):
if hx is not None:
x = rnn_lay(x, hx=hx[i])
else:
x = rnn_lay(x, hx=None)
h.append(x[:, -1, :])
h = torch.stack(h, dim=1)
if self.bidirectional:
h = h.reshape(h.shape[1] * 2, h.shape[0], self.hidden_size)
else:
h = h.transpose(0, 1)
return x, h
class QRNN_Layer(torch.nn.Module):
"""This function implements quaternion-valued recurrent layer.
Arguments
---------
input_size : int
Feature dimensionality of the input tensors (in term of real values).
batch_size : int
Batch size of the input tensors.
hidden_size : int
Number of output values (in term of real values).
num_layers : int, optional
Number of layers to employ in the RNN architecture (default 1).
nonlinearity : str, optional
Type of nonlinearity (tanh, relu) (default "tanh").
dropout : float, optional
It is the dropout factor (must be between 0 and 1) (default 0.0).
bidirectional : bool, optional
If True, a bidirectional model that scans the sequence both
right-to-left and left-to-right is used (default False).
init_criterion : str , optional
(glorot, he).
This parameter controls the initialization criterion of the weights.
It is combined with weights_init to build the initialization method of
the quaternion-valued weights (default "glorot").
weight_init : str, optional
(quaternion, unitary).
This parameter defines the initialization procedure of the
quaternion-valued weights. "quaternion" will generate random quaternion
weights following the init_criterion and the quaternion polar form.
"unitary" will normalize the weights to lie on the unit circle (default "quaternion").
More details in: "Quaternion Recurrent Neural Networks",
Parcollet T. et al.
autograd : bool, optional
When True, the default PyTorch autograd will be used. When False, a
custom backpropagation will be used, reducing by a factor 3 to 4 the
memory consumption. It is also 2x slower (default True).
"""
def __init__(
self,
input_size,
hidden_size,
num_layers,
batch_size,
dropout=0.0,
nonlinearity="tanh",
bidirectional=False,
init_criterion="glorot",
weight_init="quaternion",
autograd="true",
):
super(QRNN_Layer, self).__init__()
self.hidden_size = int(hidden_size) // 4 # Express in term of quat
self.input_size = int(input_size)
self.batch_size = batch_size
self.bidirectional = bidirectional
self.dropout = dropout
self.init_criterion = init_criterion
self.weight_init = weight_init
self.autograd = autograd
self.w = QLinear(
input_shape=self.input_size,
n_neurons=self.hidden_size,
bias=True,
weight_init=self.weight_init,
init_criterion=self.init_criterion,
autograd=self.autograd,
)
self.u = QLinear(
input_shape=self.hidden_size * 4, # The input size is in real
n_neurons=self.hidden_size,
bias=True,
weight_init=self.weight_init,
init_criterion=self.init_criterion,
autograd=self.autograd,
)
if self.bidirectional:
self.batch_size = self.batch_size * 2
# Initial state
self.register_buffer("h_init", torch.zeros(1, self.hidden_size * 4))
# Preloading dropout masks (gives some speed improvement)
self._init_drop(self.batch_size)
# Initializing dropout
self.drop = torch.nn.Dropout(p=self.dropout, inplace=False)
self.drop_mask_te = torch.tensor([1.0]).float()
# Setting the activation function
if nonlinearity == "tanh":
self.act = torch.nn.Tanh()
else:
self.act = torch.nn.ReLU()
def forward(self, x, hx: Optional[Tensor] = None):
# type: (Tensor, Optional[Tensor]) -> Tensor # noqa F821
"""Returns the output of the QuaternionRNN_layer.
Arguments
---------
x : torch.Tensor
"""
if self.bidirectional:
x_flip = x.flip(1)
x = torch.cat([x, x_flip], dim=0)
# Change batch size if needed
self._change_batch_size(x)
# Feed-forward affine transformations (all steps in parallel)
w = self.w(x)
# Processing time steps
if hx is not None:
h = self._quaternionrnn_cell(w, hx)
else:
h = self._quaternionrnn_cell(w, self.h_init)
if self.bidirectional:
h_f, h_b = h.chunk(2, dim=0)
h_b = h_b.flip(1)
h = torch.cat([h_f, h_b], dim=2)
return h
def _quaternionrnn_cell(self, w, ht):
"""Returns the hidden states for each time step.
Arguments
---------
wx : torch.Tensor
Linearly transformed input.
"""
hiddens = []
# Sampling dropout mask
drop_mask = self._sample_drop_mask(w)
# Loop over time axis
for k in range(w.shape[1]):
at = w[:, k] + self.u(ht)
ht = self.act(at) * drop_mask
hiddens.append(ht)
# Stacking hidden states
h = torch.stack(hiddens, dim=1)
return h
def _init_drop(self, batch_size):
"""Initializes the recurrent dropout operation. To speed it up,
the dropout masks are sampled in advance.
"""
self.drop = torch.nn.Dropout(p=self.dropout, inplace=False)
self.drop_mask_te = torch.tensor([1.0]).float()
self.N_drop_masks = 16000
self.drop_mask_cnt = 0
self.drop_masks = self.drop(
torch.ones(self.N_drop_masks, self.hidden_size * 4,)
).data
def _sample_drop_mask(self, w):
"""Selects one of the pre-defined dropout masks.
"""
if self.training:
# Sample new masks when needed
if self.drop_mask_cnt + self.batch_size > self.N_drop_masks:
self.drop_mask_cnt = 0
self.drop_masks = self.drop(
torch.ones(
self.N_drop_masks, self.hidden_size * 4, device=w.device
)
).data
# Sampling the mask
drop_mask = self.drop_masks[
self.drop_mask_cnt : self.drop_mask_cnt + self.batch_size
]
self.drop_mask_cnt = self.drop_mask_cnt + self.batch_size
else:
drop_mask = self.drop_mask_te
return drop_mask
def _change_batch_size(self, x):
"""This function changes the batch size when it is different from
the one detected in the initialization method. This might happen in
the case of multi-gpu or when we have different batch sizes in train
and test. We also update the h_int and drop masks.
"""
if self.batch_size != x.shape[0]:
self.batch_size = x.shape[0]
if self.training:
self.drop_masks = self.drop(
torch.ones(
self.N_drop_masks, self.hidden_size * 2, device=x.device
)
).data
class QLiGRU(torch.nn.Module):
""" This function implements a quaternion-valued Light GRU (liGRU).
Ligru is single-gate GRU model based on batch-norm + relu
activations + recurrent dropout. For more info see:
"M. Ravanelli, P. Brakel, M. Omologo, Y. Bengio,
Light Gated Recurrent Units for Speech Recognition,
in IEEE Transactions on Emerging Topics in Computational Intelligence,
2018" (https://arxiv.org/abs/1803.10225)
To speed it up, it is compiled with the torch just-in-time compiler (jit)
right before using it.
It accepts in input tensors formatted as (batch, time, fea).
In the case of 4d inputs like (batch, time, fea, channel) the tensor is
flattened as (batch, time, fea*channel).
Arguments
---------
hidden_size : int
Number of output neurons (i.e, the dimensionality of the output).
Specified value is in term of quaternion-valued neurons. Thus, the output
is 2*hidden_size.
nonlinearity : str
Type of nonlinearity (tanh, relu).
normalization : str
Type of normalization for the ligru model (batchnorm, layernorm).
Every string different from batchnorm and layernorm will result
in no normalization.
num_layers : int
Number of layers to employ in the RNN architecture.
bias : bool
If True, the additive bias b is adopted.
dropout: float
It is the dropout factor (must be between 0 and 1).
bidirectional : bool
If True, a bidirectional model that scans the sequence both
right-to-left and left-to-right is used.
init_criterion : str, optional
(glorot, he).
This parameter controls the initialization criterion of the weights.
It is combined with weights_init to build the initialization method of
the quaternion-valued weights (default "glorot").
weight_init : str, optional
(quaternion, unitary).
This parameter defines the initialization procedure of the
quaternion-valued weights. "quaternion" will generate random quaternion-valued
weights following the init_criterion and the quaternion polar form.
"unitary" will normalize the weights to lie on the unit circle (default "quaternion").
More details in: "Deep quaternion Networks", Trabelsi C. et al.
autograd : bool, optional
When True, the default PyTorch autograd will be used. When False, a
custom backpropagation will be used, reducing by a factor 3 to 4 the
memory consumption. It is also 2x slower (default True).
Example
-------
>>> inp_tensor = torch.rand([10, 16, 40])
>>> rnn = QLiGRU(input_shape=inp_tensor.shape, hidden_size=16)
>>> out_tensor = rnn(inp_tensor)
>>>
torch.Size([4, 10, 5])
"""
def __init__(
self,
hidden_size,
input_shape,
nonlinearity="leaky_relu",
num_layers=1,
bias=True,
dropout=0.0,
bidirectional=False,
init_criterion="glorot",
weight_init="quaternion",
autograd=True,
):
super().__init__()
self.hidden_size = hidden_size * 4 # q = x + iy + jz + kw
self.nonlinearity = nonlinearity
self.num_layers = num_layers
self.bias = bias
self.dropout = dropout
self.bidirectional = bidirectional
self.reshape = False
self.init_criterion = init_criterion
self.weight_init = weight_init
self.autograd = autograd
if len(input_shape) > 3:
self.reshape = True
self.fea_dim = torch.prod(torch.tensor(input_shape[2:]))
self.batch_size = input_shape[0]
self.rnn = self._init_layers()
def _init_layers(self):
"""
Initializes the layers of the liGRU.
Arguments
---------
first_input : tensor
A first input used for initializing the parameters.
"""
rnn = torch.nn.ModuleList([])
current_dim = self.fea_dim
for i in range(self.num_layers):
rnn_lay = QLiGRU_Layer(
current_dim,
self.hidden_size,
self.num_layers,
self.batch_size,
dropout=self.dropout,
nonlinearity=self.nonlinearity,
bidirectional=self.bidirectional,
init_criterion=self.init_criterion,
weight_init=self.weight_init,
autograd=self.autograd,
)
rnn.append(rnn_lay)
if self.bidirectional:
current_dim = self.hidden_size * 2
else:
current_dim = self.hidden_size
return rnn
def forward(self, x, hx: Optional[Tensor] = None):
"""Returns the output of the QuaternionliGRU.
Arguments
---------
x : torch.Tensor
"""
# Reshaping input tensors for 4d inputs
if self.reshape:
if x.ndim == 4:
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3])
# run ligru
output, hh = self._forward_ligru(x, hx=hx)
return output, hh
def _forward_ligru(self, x, hx: Optional[Tensor]):
"""Returns the output of the quaternionliGRU.
Arguments
---------
x : torch.Tensor
Input tensor.
"""
h = []
if hx is not None:
if self.bidirectional:
hx = hx.reshape(
self.num_layers, self.batch_size * 2, self.hidden_size
)
# Processing the different layers
for i, ligru_lay in enumerate(self.rnn):
if hx is not None:
x = ligru_lay(x, hx=hx[i])
else:
x = ligru_lay(x, hx=None)
h.append(x[:, -1, :])
h = torch.stack(h, dim=1)
if self.bidirectional:
h = h.reshape(h.shape[1] * 2, h.shape[0], self.hidden_size)
else:
h = h.transpose(0, 1)
return x, h
class QLiGRU_Layer(torch.nn.Module):
""" This function implements quaternion-valued Light-Gated Recurrent Units
(ligru) layer.
Arguments
---------
input_size: int
Feature dimensionality of the input tensors.
batch_size: int
Batch size of the input tensors.
hidden_size: int
Number of output values.
num_layers: int
Number of layers to employ in the RNN architecture.
nonlinearity: str
Type of nonlinearity (tanh, relu).
dropout: float
It is the dropout factor (must be between 0 and 1).
bidirectional: bool
If True, a bidirectional model that scans the sequence both
right-to-left and left-to-right is used.
init_criterion: str , optional
(glorot, he).
This parameter controls the initialization criterion of the weights.
It is combined with weights_init to build the initialization method of
the quaternion-valued weights (default "glorot").
weight_init: str, optional
(quaternion, unitary).
This parameter defines the initialization procedure of the
quaternion-valued weights. "quaternion" will generate random quaternion
weights following the init_criterion and the quaternion polar form.
"unitary" will normalize the weights to lie on the unit circle (default "quaternion").
More details in: "Deep quaternion Networks", Trabelsi C. et al.
autograd: bool, optional
When True, the default PyTorch autograd will be used. When False, a
custom backpropagation will be used, reducing by a factor 3 to 4 the
memory consumption. It is also 2x slower (default True).
"""
def __init__(
self,
input_size,
hidden_size,
num_layers,
batch_size,
dropout=0.0,
nonlinearity="leaky_relu",
normalization="batchnorm",
bidirectional=False,
init_criterion="glorot",
weight_init="quaternion",
autograd=True,
):
super(QLiGRU_Layer, self).__init__()
self.hidden_size = int(hidden_size) // 4
self.input_size = int(input_size)
self.batch_size = batch_size
self.bidirectional = bidirectional
self.dropout = dropout
self.init_criterion = init_criterion
self.weight_init = weight_init
self.normalization = normalization
self.nonlinearity = nonlinearity
self.autograd = autograd
self.w = QLinear(
input_shape=self.input_size,
n_neurons=self.hidden_size * 2,
bias=False,
weight_init=self.weight_init,
init_criterion=self.init_criterion,
autograd=self.autograd,
)
self.u = QLinear(
input_shape=self.hidden_size * 4, # The input size is in real
n_neurons=self.hidden_size * 2,
bias=False,
weight_init=self.weight_init,
init_criterion=self.init_criterion,
autograd=self.autograd,
)
if self.bidirectional:
self.batch_size = self.batch_size * 2
# Initializing batch norm
self.normalize = False
if self.normalization == "batchnorm":
self.norm = QBatchNorm(input_size=hidden_size * 2, dim=-1)
self.normalize = True
else:
# Normalization is disabled here. self.norm is only formally
# initialized to avoid jit issues.
self.norm = QBatchNorm(input_size=hidden_size * 2, dim=-1)
self.normalize = False
# Initial state
self.register_buffer("h_init", torch.zeros(1, self.hidden_size * 4))
# Preloading dropout masks (gives some speed improvement)
self._init_drop(self.batch_size)
# Initializing dropout
self.drop = torch.nn.Dropout(p=self.dropout, inplace=False)
self.drop_mask_te = torch.tensor([1.0]).float()
# Setting the activation function
if self.nonlinearity == "tanh":
self.act = torch.nn.Tanh()
elif self.nonlinearity == "leaky_relu":
self.act = torch.nn.LeakyReLU()
else:
self.act = torch.nn.ReLU()
def forward(self, x, hx: Optional[Tensor] = None):
# type: (Tensor, Optional[Tensor]) -> Tensor # noqa F821
"""Returns the output of the quaternion liGRU layer.
Arguments
---------
x : torch.Tensor
Input tensor.
"""
if self.bidirectional:
x_flip = x.flip(1)
x = torch.cat([x, x_flip], dim=0)
# Change batch size if needed
self._change_batch_size(x)
# Feed-forward affine transformations (all steps in parallel)
w = self.w(x)
# Apply batch normalization
if self.normalize:
w_bn = self.norm(w.reshape(w.shape[0] * w.shape[1], w.shape[2]))
w = w_bn.reshape(w.shape[0], w.shape[1], w.shape[2])
# Processing time steps
if hx is not None:
h = self._quaternion_ligru_cell(w, hx)
else:
h = self._quaternion_ligru_cell(w, self.h_init)
if self.bidirectional:
h_f, h_b = h.chunk(2, dim=0)
h_b = h_b.flip(1)
h = torch.cat([h_f, h_b], dim=2)
return h
def _quaternion_ligru_cell(self, w, ht):
"""Returns the hidden states for each time step.
Arguments
---------
wx : torch.Tensor
Linearly transformed input.
"""
hiddens = []
# Sampling dropout mask
drop_mask = self._sample_drop_mask(w)
# Loop over time axis
for k in range(w.shape[1]):
gates = w[:, k] + self.u(ht)
atr, ati, atj, atk, ztr, zti, ztj, ztk = gates.chunk(8, 1)
at = torch.cat([atr, ati, atj, atk], dim=-1)
zt = torch.cat([ztr, zti, ztj, ztk], dim=-1)
zt = torch.sigmoid(zt)
hcand = self.act(at) * drop_mask
ht = zt * ht + (1 - zt) * hcand
hiddens.append(ht)
# Stacking hidden states
h = torch.stack(hiddens, dim=1)
return h
def _init_drop(self, batch_size):
"""Initializes the recurrent dropout operation. To speed it up,
the dropout masks are sampled in advance.
"""
self.drop = torch.nn.Dropout(p=self.dropout, inplace=False)
self.drop_mask_te = torch.tensor([1.0]).float()
self.N_drop_masks = 16000
self.drop_mask_cnt = 0
self.register_buffer(
"drop_masks",
self.drop(torch.ones(self.N_drop_masks, self.hidden_size * 4)).data,
)
def _sample_drop_mask(self, w):
"""Selects one of the pre-defined dropout masks
"""
if self.training:
# Sample new masks when needed
if self.drop_mask_cnt + self.batch_size > self.N_drop_masks:
self.drop_mask_cnt = 0
self.drop_masks = self.drop(
torch.ones(
self.N_drop_masks, self.hidden_size * 4, device=w.device
)
).data
# Sampling the mask
drop_mask = self.drop_masks[
self.drop_mask_cnt : self.drop_mask_cnt + self.batch_size
]
self.drop_mask_cnt = self.drop_mask_cnt + self.batch_size
else:
self.drop_mask_te = self.drop_mask_te.to(w.device)
drop_mask = self.drop_mask_te
return drop_mask
def _change_batch_size(self, x):
"""This function changes the batch size when it is different from
the one detected in the initialization method. This might happen in
the case of multi-gpu or when we have different batch sizes in train
and test. We also update the h_int and drop masks.
"""
if self.batch_size != x.shape[0]:
self.batch_size = x.shape[0]
if self.training:
self.drop_masks = self.drop(
torch.ones(
self.N_drop_masks, self.hidden_size * 4, device=x.device
)
).data
| 40,503 | 32.06449 | 94 | py |
speechbrain | speechbrain-main/speechbrain/nnet/quaternion_networks/q_CNN.py | """Library implementing quaternion-valued convolutional neural networks.
Authors
* Titouan Parcollet 2020
"""
import torch
import torch.nn as nn
import logging
import torch.nn.functional as F
from speechbrain.nnet.CNN import get_padding_elem
from speechbrain.nnet.quaternion_networks.q_ops import (
unitary_init,
quaternion_init,
affect_conv_init,
quaternion_conv_op,
quaternion_conv_rotation_op,
)
from typing import Tuple
logger = logging.getLogger(__name__)
class QConv1d(torch.nn.Module):
"""This function implements quaternion-valued 1d convolution.
Arguments
---------
input_shape : tuple
The shape of the input.
out_channels : int
Number of output channels. Please note
that these are quaternion-valued neurons. If 256
channels are specified, the output dimension
will be 1024.
kernel_size : int
Kernel size of the convolutional filters.
stride : int, optional
Stride factor of the convolutional filters (default 1).
dilation : int, optional
Dilation factor of the convolutional filters (default 1).
padding : str, optional
(same, valid, causal). If "valid", no padding is performed.
If "same" and stride is 1, output shape is same as input shape.
"causal" results in causal (dilated) convolutions (default "same").
padding_mode : str, optional
This flag specifies the type of padding. See torch.nn documentation
for more information (default "reflect").
groups : int, optional
Default: 1
This option specifies the convolutional groups. See torch.nn
documentation for more information (default 1).
bias : bool, optional
If True, the additive bias b is adopted (default True).
init_criterion : str , optional
(glorot, he).
This parameter controls the initialization criterion of the weights.
It is combined with weights_init to build the initialization method of
the quaternion-valued weights (default "glorot").
weight_init : str, optional
(quaternion, unitary).
This parameter defines the initialization procedure of the
quaternion-valued weights. "quaternion" will generate random quaternion
weights following the init_criterion and the quaternion polar form.
"unitary" will normalize the weights to lie on the unit circle (default "quaternion").
More details in: "Quaternion Recurrent Neural Networks",
Parcollet T. et al.
spinor : bool, optional
When True, the layer will be turned into a spinor layer. More precisely
W*x will be turned into W*x*W-1. The input x will be rotated by W such
as in a spinor neural network. However, x MUST be a quaternion with
the real part equal to zero. (0 + xi + yj + zk). Indeed, the rotation
operation only acts on the vector part. Note that W will always be
normalized before the rotation to ensure the quaternion algebra (default False).
More details in: "Quaternion neural networks", Parcollet T.
vector_scale : bool, optional
The vector_scale is only used when spinor = True. In the context of a
spinor neural network, multiple rotations of the input vector x are
performed and summed. Hence, the norm of the output vector always
increases with the number of layers, making the neural network instable
with deep configurations. The vector_scale parameters are learnable
parameters that acts like gates by multiplying the output vector with
a small trainable parameter (default False).
Example
-------
>>> inp_tensor = torch.rand([10, 16, 40])
>>> cnn_1d = QConv1d(
... input_shape=inp_tensor.shape, out_channels=12, kernel_size=3
... )
>>> out_tensor = cnn_1d(inp_tensor)
>>> out_tensor.shape
torch.Size([10, 16, 48])
"""
def __init__(
self,
out_channels,
kernel_size,
input_shape=None,
stride=1,
dilation=1,
padding="same",
groups=1,
bias=True,
padding_mode="reflect",
init_criterion="glorot",
weight_init="quaternion",
spinor=False,
vector_scale=False,
):
super().__init__()
self.input_shape = input_shape
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
self.padding = padding
self.groups = groups
self.bias = bias
self.padding_mode = padding_mode
self.unsqueeze = False
self.init_criterion = init_criterion
self.weight_init = weight_init
self.spinor = spinor
self.vector_scale = vector_scale
self.in_channels = self._check_input(input_shape) // 4
# Managing the weight initialization and bias by directly setting the
# correct function
(self.k_shape, self.w_shape) = self._get_kernel_and_weight_shape()
self.r_weight = torch.nn.Parameter(torch.Tensor(*self.w_shape))
self.i_weight = torch.nn.Parameter(torch.Tensor(*self.w_shape))
self.j_weight = torch.nn.Parameter(torch.Tensor(*self.w_shape))
self.k_weight = torch.nn.Parameter(torch.Tensor(*self.w_shape))
# Spinor specific parameters
if self.spinor:
self.zero_kernel = torch.nn.Parameter(
torch.zeros(self.r_weight.shape), requires_grad=False
)
else:
self.zero_kernel = torch.Tensor(self.r_weight.shape).requires_grad_(
False
)
if self.spinor and self.vector_scale:
self.scale_param = torch.nn.Parameter(
torch.Tensor(self.r_weight.shape)
)
torch.nn.init.xavier_uniform_(self.scale_param.data)
else:
self.scale_param = torch.Tensor(self.r_weight.shape).requires_grad_(
False
)
if self.bias:
self.b = torch.nn.Parameter(torch.Tensor(4 * self.out_channels))
self.b.data.fill_(0)
else:
self.b = torch.Tensor(4 * self.out_channels).requires_grad_(False)
self.winit = {"quaternion": quaternion_init, "unitary": unitary_init}[
self.weight_init
]
# Initialise the weights
affect_conv_init(
self.r_weight,
self.i_weight,
self.j_weight,
self.k_weight,
self.kernel_size,
self.winit,
self.init_criterion,
)
def forward(self, x):
"""Returns the output of the convolution.
Arguments
---------
x : torch.Tensor (batch, time, channel)
Input to convolve. 3d or 4d tensors are expected.
"""
# (batch, channel, time)
x = x.transpose(1, -1)
if self.padding == "same":
x = self._manage_padding(
x, self.kernel_size, self.dilation, self.stride
)
elif self.padding == "causal":
num_pad = (self.kernel_size - 1) * self.dilation
x = F.pad(x, (num_pad, 0))
elif self.padding == "valid":
pass
else:
raise ValueError(
"Padding must be 'same', 'valid' or 'causal'. Got "
+ self.padding
)
if self.spinor:
out = quaternion_conv_rotation_op(
x,
self.r_weight,
self.i_weight,
self.j_weight,
self.k_weight,
self.b,
scale=self.scale_param,
zero_kernel=self.zero_kernel,
stride=self.stride,
dilation=self.dilation,
padding=0, # already managed
groups=self.groups,
conv1d=True,
)
else:
out = quaternion_conv_op(
x,
self.r_weight,
self.i_weight,
self.j_weight,
self.k_weight,
self.b,
stride=self.stride,
dilation=self.dilation,
padding=0, # already managed
groups=self.groups,
conv1d=True,
)
out = out.transpose(1, -1)
return out
def _get_kernel_and_weight_shape(self):
""" Returns the kernel size and weight shape for convolutional layers.
"""
ks = self.kernel_size
w_shape = (self.out_channels, self.in_channels) + tuple((ks,))
return ks, w_shape
def _manage_padding(
self, x, kernel_size: int, dilation: int, stride: int,
):
"""This function performs zero-padding on the time axis
such that their lengths is unchanged after the convolution.
Arguments
---------
x : torch.Tensor
Input tensor.
kernel_size : int
Kernel size.
dilation : int
Dilation.
stride: int
Stride.
"""
# Detecting input shape
L_in = x.shape[-1]
# Time padding
padding = get_padding_elem(L_in, stride, kernel_size, dilation)
# Applying padding
x = F.pad(x, padding, mode=self.padding_mode)
return x
def _check_input(self, input_shape):
"""Checks the input and returns the number of input channels.
"""
if len(input_shape) == 3:
in_channels = input_shape[2]
else:
raise ValueError(
"QuaternionConv1d expects 3d inputs. Got " + str(input_shape)
)
# Kernel size must be odd
if self.kernel_size % 2 == 0:
raise ValueError(
"The field kernel size must be an odd number. Got "
+ str(self.kernel_size)
)
# Check quaternion format
if in_channels % 4 != 0:
raise ValueError(
"Quaternion Tensors must have dimensions divisible by 4."
" input.size()[3] = " + str(in_channels)
)
return in_channels
class QConv2d(torch.nn.Module):
"""This function implements quaternion-valued 1d convolution.
Arguments
---------
input_shape : tuple
The shape of the input.
out_channels : int
Number of output channels. Please note
that these are quaternion-valued neurons. If 256
channels are specified, the output dimension
will be 1024.
kernel_size : int
Kernel size of the convolutional filters.
stride : int, optional
Stride factor of the convolutional filters (default 1).
dilation : int, optional
Dilation factor of the convolutional filters (default 1).
padding : str, optional
(same, causal). If "valid", no padding is performed.
If "same" and stride is 1, output shape is same as input shape (default "same").
padding_mode : str, optional
This flag specifies the type of padding. See torch.nn documentation
for more information. (default "reflect")
groups : int, optional
This option specifies the convolutional groups. See torch.nn
documentation for more information. (default 1).
bias : bool, optional
If True, the additive bias b is adopted (default True).
init_criterion : str , optional
(glorot, he).
This parameter controls the initialization criterion of the weights.
It is combined with weights_init to build the initialization method of
the quaternion-valued weights (default "glorot").
weight_init : str, optional
(quaternion, unitary).
This parameter defines the initialization procedure of the
quaternion-valued weights. "quaternion" will generate random quaternion
weights following the init_criterion and the quaternion polar form.
"unitary" will normalize the weights to lie on the unit circle (default "quaternion").
More details in: "Quaternion Recurrent Neural Networks",
Parcollet T. et al.
spinor : bool, optional
When True, the layer will be turned into a spinor layer. More precisely
W*x will be turned into W*x*W-1. The input x will be rotated by W such
as in a spinor neural network. However, x MUST be a quaternion with
the real part equal to zero. (0 + xi + yj + zk). Indeed, the rotation
operation only acts on the vector part. Note that W will always be
normalized before the rotation to ensure the quaternion algebra (default False).
More details in: "Quaternion neural networks", Parcollet T.
vector_scale : bool, optional
The vector_scale is only used when spinor = True. In the context of a
spinor neural network, multiple rotations of the input vector x are
performed and summed. Hence, the norm of the output vector always
increases with the number of layers, making the neural network instable
with deep configurations. The vector_scale parameters are learnable
parameters that acts like gates by multiplying the output vector with
a small trainable parameter (default False).
Example
-------
>>> inp_tensor = torch.rand([10, 4, 16, 40])
>>> cnn_1d = QConv2d(
... input_shape=inp_tensor.shape, out_channels=12, kernel_size=3
... )
>>> out_tensor = cnn_1d(inp_tensor)
>>> out_tensor.shape
torch.Size([10, 4, 16, 48])
"""
def __init__(
self,
out_channels,
kernel_size,
input_shape=None,
stride=1,
dilation=1,
padding="same",
groups=1,
bias=True,
padding_mode="reflect",
init_criterion="glorot",
weight_init="quaternion",
spinor=False,
vector_scale=False,
):
super().__init__()
self.input_shape = input_shape
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
self.padding = padding
self.groups = groups
self.bias = bias
self.padding_mode = padding_mode
self.init_criterion = init_criterion
self.weight_init = weight_init
self.spinor = spinor
self.vector_scale = vector_scale
# handle the case if some parameters are int
if isinstance(kernel_size, int):
self.kernel_size = (kernel_size, kernel_size)
if isinstance(stride, int):
self.stride = (stride, stride)
if isinstance(dilation, int):
self.dilation = (dilation, dilation)
self.in_channels = self._check_input(input_shape) // 4
# Managing the weight initialization and bias by directly setting the
# correct function
(self.k_shape, self.w_shape) = self._get_kernel_and_weight_shape()
self.r_weight = torch.nn.Parameter(torch.Tensor(*self.w_shape))
self.i_weight = torch.nn.Parameter(torch.Tensor(*self.w_shape))
self.j_weight = torch.nn.Parameter(torch.Tensor(*self.w_shape))
self.k_weight = torch.nn.Parameter(torch.Tensor(*self.w_shape))
# Spinor specific parameters
if self.spinor:
self.zero_kernel = torch.nn.Parameter(
torch.zeros(self.r_weight.shape), requires_grad=False
)
else:
self.zero_kernel = torch.Tensor(self.r_weight.shape).requires_grad_(
False
)
if self.spinor and self.vector_scale:
self.scale_param = torch.nn.Parameter(
torch.Tensor(self.r_weight.shape)
)
torch.nn.init.xavier_uniform_(self.scale_param.data)
else:
self.scale_param = torch.Tensor(self.r_weight.shape).requires_grad_(
False
)
if self.bias:
self.b = torch.nn.Parameter(torch.Tensor(4 * self.out_channels))
self.b.data.fill_(0)
else:
self.b = torch.Tensor(4 * self.out_channels).requires_grad_(False)
self.winit = {"quaternion": quaternion_init, "unitary": unitary_init}[
self.weight_init
]
# Initialise the weights
affect_conv_init(
self.r_weight,
self.i_weight,
self.j_weight,
self.k_weight,
self.kernel_size,
self.winit,
self.init_criterion,
)
def forward(self, x):
"""Returns the output of the convolution.
Arguments
---------
x : torch.Tensor (batch, time, channel)
Input to convolve. 3d or 4d tensors are expected.
"""
# (batch, channel, time)
x = x.transpose(1, -1)
if self.padding == "same":
x = self._manage_padding(
x, self.kernel_size, self.dilation, self.stride
)
elif self.padding == "valid":
pass
else:
raise ValueError(
"Padding must be 'same', 'valid' or 'causal'. Got "
+ self.padding
)
if self.spinor:
out = quaternion_conv_rotation_op(
x,
self.r_weight,
self.i_weight,
self.j_weight,
self.k_weight,
self.b,
scale=self.scale_param,
zero_kernel=self.zero_kernel,
stride=self.stride[0],
dilation=self.dilation[0],
padding=0, # already managed
groups=self.groups,
conv1d=True,
)
else:
out = quaternion_conv_op(
x,
self.r_weight,
self.i_weight,
self.j_weight,
self.k_weight,
self.b,
stride=self.stride[0],
dilation=self.dilation[0],
padding=0, # already managed
groups=self.groups,
conv1d=False,
)
out = out.transpose(1, -1)
return out
def _check_input(self, input_shape):
"""Checks the input and returns the number of input channels.
"""
if len(input_shape) == 4:
in_channels = input_shape[-1]
else:
raise ValueError(
"QuaternionConv1d expects 4d inputs. Got " + str(input_shape)
)
# Kernel size must be divisible by 4.
if self.kernel_size[0] % 2 == 0 or self.kernel_size[1] % 2 == 0:
raise ValueError(
"The field kernel size must be an odd number. Got "
+ str(self.kernel_size)
)
# Check quaternion format
if in_channels % 4 != 0:
raise ValueError(
"Quaternion Tensors must have dimensions divisible by 4."
" input.size()[" + str(-1) + "] = " + str(in_channels)
)
return in_channels
def _get_kernel_and_weight_shape(self):
""" Returns the kernel size and weight shape for convolutional layers.
"""
ks = (self.kernel_size[0], self.kernel_size[1])
w_shape = (self.out_channels, self.in_channels) + (*ks,)
return ks, w_shape
def _manage_padding(
self,
x,
kernel_size: Tuple[int, int],
dilation: Tuple[int, int],
stride: Tuple[int, int],
):
"""This function performs zero-padding on the time and frequency axises
such that their lengths is unchanged after the convolution.
Arguments
---------
x : torch.Tensor
Input tensor.
kernel_size : int
Kernel size.
dilation : int
Dilation.
stride: int
Stride.
"""
# Detecting input shape
L_in = x.shape[-1]
# Time padding
padding_time = get_padding_elem(
L_in, stride[-1], kernel_size[-1], dilation[-1]
)
padding_freq = get_padding_elem(
L_in, stride[-2], kernel_size[-2], dilation[-2]
)
padding = padding_time + padding_freq
# Applying padding
x = nn.functional.pad(x, padding, mode=self.padding_mode)
return x
| 20,523 | 32.980132 | 94 | py |
speechbrain | speechbrain-main/speechbrain/nnet/quaternion_networks/q_ops.py | """This library implements different operations needed by quaternion-
valued architectures.
This work is inspired by:
"Quaternion neural networks" - Parcollet T.
"Quaternion recurrent neural networks" - Parcollet T. et al.
"Quaternion convolutional neural networks for end-to-end automatic speech
recognition" - Parcollet T. et al.
"Deep quaternion networks" - Gaudet Chase J. et al.
Authors
* Titouan Parcollet 2020
"""
import torch
import math
import numpy as np
import torch.nn.functional as F
from scipy.stats import chi
from torch.autograd import Variable
class QuaternionLinearCustomBackward(torch.autograd.Function):
"""This class redefine the backpropagation of a quaternion linear layer
(not a spinor layer). By doing so, we can save up to 4x memory, but it
is also 2x slower than 'quaternion_linear_op'. It should be used
within speechbrain.nnet.quaternion_networks.linear.QuaternionLinear.
"""
@staticmethod
def forward(ctx, input, r_weight, i_weight, j_weight, k_weight, bias):
"""
Applies a quaternion linear transformation to the incoming data:
It is important to notice that the forward phase of a QNN is defined
as W * Inputs (with * equal to the Hamilton product). The constructed
cat_kernels_4_quaternion is a modified version of the quaternion
representation so when we do torch.mm(Input,W) it's equivalent
to W * Inputs.
Arguments
---------
input : torch.Tensor
Quaternion input tensor to be transformed. Shape: [batch*time, X].
r_weight : torch.Parameter
Real part of the quaternion weight matrix of this layer.
i_weight : torch.Parameter
First imaginary part of the quaternion weight matrix of this layer.
j_weight : torch.Parameter
Second imaginary part of the quaternion weight matrix of this layer.
k_weight : torch.Parameter
Third imaginary part of the quaternion weight matrix of this layer.
bias : torch.Parameter
"""
ctx.save_for_backward(
input, r_weight, i_weight, j_weight, k_weight, bias
)
cat_kernels_4_r = torch.cat(
[r_weight, -i_weight, -j_weight, -k_weight], dim=0
)
cat_kernels_4_i = torch.cat(
[i_weight, r_weight, -k_weight, j_weight], dim=0
)
cat_kernels_4_j = torch.cat(
[j_weight, k_weight, r_weight, -i_weight], dim=0
)
cat_kernels_4_k = torch.cat(
[k_weight, -j_weight, i_weight, r_weight], dim=0
)
cat_kernels_4_quaternion = torch.cat(
[
cat_kernels_4_r,
cat_kernels_4_i,
cat_kernels_4_j,
cat_kernels_4_k,
],
dim=1,
)
if bias.requires_grad:
return torch.addmm(bias, input, cat_kernels_4_quaternion)
else:
return torch.mm(input, cat_kernels_4_quaternion)
# This function has only a single output, so it gets only one gradient
@staticmethod
def backward(ctx, grad_output):
"""
Run the backward phase of the forward call defined above. This
implementation follows the quaternion backpropagation of a quaternion
layer that can be found in "Quaternion neural networks" - Parcollet T.
Page 48.
Arguments
---------
input : torch.Tensor
Quaternion input tensor to be transformed.
r_weight : torch.Parameter
Real part of the quaternion weight matrix of this layer.
i_weight : torch.Parameter
First imaginary part of the quaternion weight matrix of this layer.
j_weight : torch.Parameter
Second imaginary part of the quaternion weight matrix of this layer.
k_weight : torch.Parameter
Third imaginary part of the quaternion weight matrix of this layer.
bias : torch.Parameter
"""
input, r_weight, i_weight, j_weight, k_weight, bias = ctx.saved_tensors
grad_input = (
grad_weight_r
) = grad_weight_i = grad_weight_j = grad_weight_k = grad_bias = None
input_r = torch.cat([r_weight, -i_weight, -j_weight, -k_weight], dim=0)
input_i = torch.cat([i_weight, r_weight, -k_weight, j_weight], dim=0)
input_j = torch.cat([j_weight, k_weight, r_weight, -i_weight], dim=0)
input_k = torch.cat([k_weight, -j_weight, i_weight, r_weight], dim=0)
cat_kernels_4_quaternion_T = Variable(
torch.cat([input_r, input_i, input_j, input_k], dim=1).permute(
1, 0
),
requires_grad=False,
)
nb_hidden = input.size()[-1]
r = input.narrow(1, 0, nb_hidden // 4)
i = input.narrow(1, nb_hidden // 4, nb_hidden // 4)
j = input.narrow(1, nb_hidden // 2, nb_hidden // 4)
k = input.narrow(1, nb_hidden - nb_hidden // 4, nb_hidden // 4)
input_r = torch.cat([r, -i, -j, -k], dim=0)
input_i = torch.cat([i, r, -k, j], dim=0)
input_j = torch.cat([j, k, r, -i], dim=0)
input_k = torch.cat([k, -j, i, r], dim=0)
input_mat = Variable(
torch.cat([input_r, input_i, input_j, input_k], dim=1),
requires_grad=False,
)
nb_hidden = grad_output.size()[-1]
r = grad_output.narrow(1, 0, nb_hidden // 4)
i = grad_output.narrow(1, nb_hidden // 4, nb_hidden // 4)
j = grad_output.narrow(1, nb_hidden // 2, nb_hidden // 4)
k = grad_output.narrow(1, nb_hidden - nb_hidden // 4, nb_hidden // 4)
input_r = torch.cat([r, i, j, k], dim=1)
input_i = torch.cat([-i, r, k, -j], dim=1)
input_j = torch.cat([-j, -k, r, i], dim=1)
input_k = torch.cat([-k, j, -i, r], dim=1)
grad_mat = torch.cat([input_r, input_i, input_j, input_k], dim=0)
if ctx.needs_input_grad[0]:
grad_input = grad_output.mm(cat_kernels_4_quaternion_T)
if ctx.needs_input_grad[1]:
grad_weight = grad_mat.permute(1, 0).mm(input_mat).permute(1, 0)
unit_size_x = r_weight.size(0)
unit_size_y = r_weight.size(1)
grad_weight_r = grad_weight.narrow(0, 0, unit_size_x).narrow(
1, 0, unit_size_y
)
grad_weight_i = grad_weight.narrow(0, 0, unit_size_x).narrow(
1, unit_size_y, unit_size_y
)
grad_weight_j = grad_weight.narrow(0, 0, unit_size_x).narrow(
1, unit_size_y * 2, unit_size_y
)
grad_weight_k = grad_weight.narrow(0, 0, unit_size_x).narrow(
1, unit_size_y * 3, unit_size_y
)
if ctx.needs_input_grad[5]:
grad_bias = grad_output.sum(0).squeeze(0)
return (
grad_input,
grad_weight_r,
grad_weight_i,
grad_weight_j,
grad_weight_k,
grad_bias,
)
def quaternion_linear_op(input, r_weight, i_weight, j_weight, k_weight, bias):
"""
Applies a quaternion linear transformation to the incoming data:
It is important to notice that the forward phase of a QNN is defined
as W * Inputs (with * equal to the Hamilton product). The constructed
cat_kernels_4_quaternion is a modified version of the quaternion
representation so when we do torch.mm(Input,W) it's equivalent
to W * Inputs.
Arguments
---------
input : torch.Tensor
Quaternion input tensor to be transformed.
r_weight : torch.Parameter
Real part of the quaternion weight matrix of this layer.
i_weight : torch.Parameter
First imaginary part of the quaternion weight matrix of this layer.
j_weight : torch.Parameter
Second imaginary part of the quaternion weight matrix of this layer.
k_weight : torch.Parameter
Third imaginary part of the quaternion weight matrix of this layer.
bias : torch.Parameter
"""
cat_kernels_4_r = torch.cat(
[r_weight, -i_weight, -j_weight, -k_weight], dim=0
)
cat_kernels_4_i = torch.cat(
[i_weight, r_weight, -k_weight, j_weight], dim=0
)
cat_kernels_4_j = torch.cat(
[j_weight, k_weight, r_weight, -i_weight], dim=0
)
cat_kernels_4_k = torch.cat(
[k_weight, -j_weight, i_weight, r_weight], dim=0
)
cat_kernels_4_quaternion = torch.cat(
[cat_kernels_4_r, cat_kernels_4_i, cat_kernels_4_j, cat_kernels_4_k],
dim=1,
)
# If the input is already [batch*time, N]
if input.dim() == 2:
if bias.requires_grad:
return torch.addmm(bias, input, cat_kernels_4_quaternion)
else:
return torch.mm(input, cat_kernels_4_quaternion)
else:
output = torch.matmul(input, cat_kernels_4_quaternion)
if bias.requires_grad:
return output + bias
else:
return output
def quaternion_linear_rotation_op(
input, r_weight, i_weight, j_weight, k_weight, bias, scale, zero_kernel
):
"""
Applies a quaternion rotation transformation to the incoming data:
The rotation W*x*W^t can be replaced by R*x following:
https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation
Works for unitary and non-unitary weights (they will be normalized).
The initial size of the input must be a multiple of 4 with the real part
equal to zero. Rotations only affect the vector part of a quaternion.
Arguments
---------
input : torch.Tensor
Quaternion input tensor to be transformed.
r_weight : torch.Parameter
Real part of the quaternion weight matrix of this layer.
i_weight : torch.Parameter
First imaginary part of the quaternion weight matrix of this layer.
j_weight : torch.Parameter
Second imaginary part of the quaternion weight matrix of this layer.
k_weight : torch.Parameter
Third imaginary part of the quaternion weight matrix of this layer.
bias : torch.Parameter
scale : torch.Parameter
In the context of a spinor neural network, multiple rotations of
the input vector x are performed and summed. Hence, the norm of
the output vector always increases with the number of layers, making
the neural network instable with deep configurations. The scale
parameters are learnable parameters that acts like gates by multiplying
the output vector with a small trainable parameter.
zero_kernel : torch.Parameter
The zero kernel is simply a tensor of zeros with require grad = False.
Its shape is equivalent to a quaternion component shape. In fact,
it is only needed to make the dimensions match when using the rotation
matrix : https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation
"""
# First we normalise the quaternion weights. Only unit quaternions are
# valid rotations.
square_r = r_weight * r_weight
square_i = i_weight * i_weight
square_j = j_weight * j_weight
square_k = k_weight * k_weight
norm = torch.sqrt(square_r + square_i + square_j + square_k) + 0.0001
r_n_weight = r_weight / norm
i_n_weight = i_weight / norm
j_n_weight = j_weight / norm
k_n_weight = k_weight / norm
# See https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation for
# the rest of the equations.
norm_factor = 2.0
square_i = norm_factor * (i_n_weight * i_n_weight)
square_j = norm_factor * (j_n_weight * j_n_weight)
square_k = norm_factor * (k_n_weight * k_n_weight)
ri = norm_factor * r_n_weight * i_n_weight
rj = norm_factor * r_n_weight * j_n_weight
rk = norm_factor * r_n_weight * k_n_weight
ij = norm_factor * i_n_weight * j_n_weight
ik = norm_factor * i_n_weight * k_n_weight
jk = norm_factor * j_n_weight * k_n_weight
if scale.requires_grad:
rot_kernel_1 = torch.cat(
[
zero_kernel,
scale * (1.0 - (square_j + square_k)),
scale * (ij - rk),
scale * (ik + rj),
],
dim=1,
)
rot_kernel_2 = torch.cat(
[
zero_kernel,
scale * (ij + rk),
scale * (1.0 - (square_i + square_k)),
scale * (jk - ri),
],
dim=1,
)
rot_kernel_3 = torch.cat(
[
zero_kernel,
scale * (ik - rj),
scale * (jk + ri),
scale * (1.0 - (square_i + square_j)),
],
dim=1,
)
else:
rot_kernel_1 = torch.cat(
[zero_kernel, (1.0 - (square_j + square_k)), (ij - rk), (ik + rj)],
dim=1,
)
rot_kernel_2 = torch.cat(
[zero_kernel, (ij + rk), (1.0 - (square_i + square_k)), (jk - ri)],
dim=1,
)
rot_kernel_3 = torch.cat(
[zero_kernel, (ik - rj), (jk + ri), (1.0 - (square_i + square_j))],
dim=1,
)
zero_kernel2 = torch.cat(
[zero_kernel, zero_kernel, zero_kernel, zero_kernel], dim=1
)
global_rot_kernel = torch.cat(
[zero_kernel2, rot_kernel_1, rot_kernel_2, rot_kernel_3], dim=0
)
if input.dim() == 2:
if bias.requires_grad:
return torch.addmm(bias, input, global_rot_kernel)
else:
return torch.mm(input, global_rot_kernel)
else:
output = torch.matmul(input, global_rot_kernel)
if bias.requires_grad:
return output + bias
else:
return output
def quaternion_conv_rotation_op(
input,
r_weight,
i_weight,
j_weight,
k_weight,
bias,
scale,
zero_kernel,
stride: int,
padding: int,
groups: int,
dilation: int,
conv1d: bool,
):
"""
Applies a quaternion rotation transformation to the incoming data:
The rotation W*x*W^t can be replaced by R*x following:
https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation
Works for unitary and non-unitary weights (they will be normalized).
The initial size of the input must be a multiple of 4 with the real part
equal to zero. Rotations only affect the vector part of a quaternion.
Arguments
---------
input : torch.Tensor
Quaternion input tensor to be transformed.
conv1d : bool
If true, a 1D convolution operation will be applied. Otherwise, a 2D
convolution is called.
r_weight : torch.Parameter
Real part of the quaternion weight matrix of this layer.
i_weight : torch.Parameter
First imaginary part of the quaternion weight matrix of this layer.
j_weight : torch.Parameter
Second imaginary part of the quaternion weight matrix of this layer.
k_weight : torch.Parameter
Third imaginary part of the quaternion weight matrix of this layer.
bias : torch.Parameter
scale : torch.Parameter
In the context of a spinor neural network, multiple rotations of
the input vector x are performed and summed. Hence, the norm of
the output vector always increases with the number of layers, making
the neural network instable with deep configurations. The scale
parameters are learnable parameters that acts like gates by multiplying
the output vector with a small trainable parameter.
zero_kernel : torch.Parameter
The zero kernel is simply a tensor of zeros with require grad = False.
Its shape is equivalent to a quaternion component shape. In fact,
it is only needed to make the dimensions match when using the rotation
matrix : https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation
"""
square_r = r_weight * r_weight
square_i = i_weight * i_weight
square_j = j_weight * j_weight
square_k = k_weight * k_weight
norm = torch.sqrt(square_r + square_i + square_j + square_k + 0.0001)
r_n_weight = r_weight / norm
i_n_weight = i_weight / norm
j_n_weight = j_weight / norm
k_n_weight = k_weight / norm
norm_factor = 2.0
square_i = norm_factor * (i_n_weight * i_n_weight)
square_j = norm_factor * (j_n_weight * j_n_weight)
square_k = norm_factor * (k_n_weight * k_n_weight)
ri = norm_factor * r_n_weight * i_n_weight
rj = norm_factor * r_n_weight * j_n_weight
rk = norm_factor * r_n_weight * k_n_weight
ij = norm_factor * i_n_weight * j_n_weight
ik = norm_factor * i_n_weight * k_n_weight
jk = norm_factor * j_n_weight * k_n_weight
if scale.requires_grad:
rot_kernel_1 = torch.cat(
[
zero_kernel,
scale * (1.0 - (square_j + square_k)),
scale * (ij - rk),
scale * (ik + rj),
],
dim=1,
)
rot_kernel_2 = torch.cat(
[
zero_kernel,
scale * (ij + rk),
scale * (1.0 - (square_i + square_k)),
scale * (jk - ri),
],
dim=1,
)
rot_kernel_3 = torch.cat(
[
zero_kernel,
scale * (ik - rj),
scale * (jk + ri),
scale * (1.0 - (square_i + square_j)),
],
dim=1,
)
else:
rot_kernel_1 = torch.cat(
[zero_kernel, (1.0 - (square_j + square_k)), (ij - rk), (ik + rj)],
dim=1,
)
rot_kernel_2 = torch.cat(
[zero_kernel, (ij + rk), (1.0 - (square_i + square_k)), (jk - ri)],
dim=1,
)
rot_kernel_3 = torch.cat(
[zero_kernel, (ik - rj), (jk + ri), (1.0 - (square_i + square_j))],
dim=1,
)
zero_kernel2 = torch.cat(
[zero_kernel, zero_kernel, zero_kernel, zero_kernel], dim=1
)
global_rot_kernel = torch.cat(
[zero_kernel2, rot_kernel_1, rot_kernel_2, rot_kernel_3], dim=0
)
if conv1d:
return F.conv1d(
input=input,
weight=global_rot_kernel,
bias=bias,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
)
else:
return F.conv2d(
input=input,
weight=global_rot_kernel,
bias=bias,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
)
def quaternion_conv_op(
input,
r_weight,
i_weight,
j_weight,
k_weight,
bias,
stride: int,
padding: int,
groups: int,
dilation: int,
conv1d: bool,
):
"""
Applies a quaternion convolution transformation to the incoming data:
It is important to notice that the forward phase of a QCNN is defined
as W * Inputs (with * equal to the Hamilton product). The constructed
cat_kernels_4_quaternion is a modified version of the quaternion
representation so when we do torch.mm(Input,W) it's equivalent
to W * Inputs.
Arguments
---------
input : torch.Tensor
Quaternion input tensor to be transformed.
conv1d : bool
If true, a 1D convolution operation will be applied. Otherwise, a 2D
convolution is called.
r_weight : torch.Parameter
Real part of the quaternion weight matrix of this layer.
i_weight : torch.Parameter
First imaginary part of the quaternion weight matrix of this layer.
j_weight : torch.Parameter
Second imaginary part of the quaternion weight matrix of this layer.
k_weight : torch.Parameter
Third imaginary part of the quaternion weight matrix of this layer.
bias : torch.Parameter
stride : int
Stride factor of the convolutional filters.
padding : int
Amount of padding. See torch.nn documentation for more information.
groups : int
This option specifies the convolutional groups. See torch.nn
documentation for more information.
dilation : int
Dilation factor of the convolutional filters.
"""
cat_kernels_4_r = torch.cat(
[r_weight, -i_weight, -j_weight, -k_weight], dim=1
)
cat_kernels_4_i = torch.cat(
[i_weight, r_weight, -k_weight, j_weight], dim=1
)
cat_kernels_4_j = torch.cat(
[j_weight, k_weight, r_weight, -i_weight], dim=1
)
cat_kernels_4_k = torch.cat(
[k_weight, -j_weight, i_weight, r_weight], dim=1
)
cat_kernels_4_quaternion = torch.cat(
[cat_kernels_4_r, cat_kernels_4_i, cat_kernels_4_j, cat_kernels_4_k],
dim=0,
)
if conv1d:
return F.conv1d(
input=input,
weight=cat_kernels_4_quaternion,
bias=bias,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
)
else:
return F.conv2d(
input=input,
weight=cat_kernels_4_quaternion,
bias=bias,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
)
def quaternion_init(
in_features, out_features, kernel_size=None, criterion="glorot"
):
"""Returns a matrix of quaternion numbers initialized with the method
described in "Quaternion Recurrent Neural Network " - Parcollt T.
Arguments
---------
in_features : int
Number of real values of the input layer (quaternion // 4).
out_features : int
Number of real values of the output layer (quaternion // 4).
kernel_size : int
Kernel_size for convolutional layers (ex: (3,3)).
criterion : str
(glorot, he)
"""
# We set the numpy seed equal to the torch seed for reproducibility
# Indeed we use numpy and scipy here. We need % (2**31-1) or, if the
# seed hasn't been set by the used in the YAML file, torch will generate
# a double that would be to big for numpy.
np.random.seed(seed=torch.initial_seed() % (2 ** 31 - 1))
if kernel_size is not None:
receptive_field = np.prod(kernel_size)
fan_in = in_features * receptive_field
fan_out = out_features * receptive_field
else:
fan_in = in_features
fan_out = out_features
if criterion == "glorot":
s = 1.0 / np.sqrt(2 * (fan_in + fan_out))
else:
s = 1.0 / np.sqrt(2 * fan_in)
# Generating randoms and purely imaginary quaternions :
if kernel_size is None:
kernel_shape = (in_features, out_features)
else:
if type(kernel_size) is int:
kernel_shape = (out_features, in_features) + tuple((kernel_size,))
else:
kernel_shape = (out_features, in_features) + (*kernel_size,)
modulus = torch.from_numpy(chi.rvs(4, loc=0, scale=s, size=kernel_shape))
number_of_weights = np.prod(kernel_shape)
v_i = torch.FloatTensor(number_of_weights).uniform_(-1, 1)
v_j = torch.FloatTensor(number_of_weights).uniform_(-1, 1)
v_k = torch.FloatTensor(number_of_weights).uniform_(-1, 1)
# Purely imaginary quaternions unitary
for i in range(0, number_of_weights):
norm = torch.sqrt(v_i[i] ** 2 + v_j[i] ** 2 + v_k[i] ** 2) + 0.0001
v_i[i] /= norm
v_j[i] /= norm
v_k[i] /= norm
v_i = v_i.reshape(kernel_shape)
v_j = v_j.reshape(kernel_shape)
v_k = v_k.reshape(kernel_shape)
phase = torch.rand(kernel_shape).uniform_(-math.pi, math.pi)
weight_r = modulus * torch.cos(phase)
weight_i = modulus * v_i * torch.sin(phase)
weight_j = modulus * v_j * torch.sin(phase)
weight_k = modulus * v_k * torch.sin(phase)
return (weight_r, weight_i, weight_j, weight_k)
def unitary_init(in_features, out_features, kernel_size=None, criterion="he"):
"""Returns a matrix of unitary quaternion numbers.
Arguments
---------
in_features : int
Number of real values of the input layer (quaternion // 4).
out_features : int
Number of real values of the output layer (quaternion // 4).
kernel_size : int
Kernel_size for convolutional layers (ex: (3,3)).
criterion : str
(glorot, he)
"""
if kernel_size is None:
kernel_shape = (in_features, out_features)
else:
if type(kernel_size) is int:
kernel_shape = (out_features, in_features) + tuple((kernel_size,))
else:
kernel_shape = (out_features, in_features) + (*kernel_size,)
number_of_weights = np.prod(kernel_shape)
v_r = torch.FloatTensor(number_of_weights).uniform_(-1, 1)
v_i = torch.FloatTensor(number_of_weights).uniform_(-1, 1)
v_j = torch.FloatTensor(number_of_weights).uniform_(-1, 1)
v_k = torch.FloatTensor(number_of_weights).uniform_(-1, 1)
# Unitary quaternion
for i in range(0, number_of_weights):
norm = (
torch.sqrt(v_r[i] ** 2 + v_i[i] ** 2 + v_j[i] ** 2 + v_k[i] ** 2)
+ 0.0001
)
v_r[i] /= norm
v_i[i] /= norm
v_j[i] /= norm
v_k[i] /= norm
v_r = v_r.reshape(kernel_shape)
v_i = v_i.reshape(kernel_shape)
v_j = v_j.reshape(kernel_shape)
v_k = v_k.reshape(kernel_shape)
return (v_r, v_i, v_j, v_k)
def affect_init(
r_weight, i_weight, j_weight, k_weight, init_func, init_criterion
):
"""Applies the weight initialization function given to the parameters.
Arguments
---------
r_weight : torch.Parameters
(nb_quaternion_in, nb_quaternion_out)
i_weight : torch.Parameters
(nb_quaternion_in, nb_quaternion_out)
j_weight : torch.Parameters
(nb_quaternion_in, nb_quaternion_out)
k_weight : torch.Parameters
(nb_quaternion_in, nb_quaternion_out)
init_func : function
(unitary_init, quaternion_init)
init_criterion : str
(glorot, he)
"""
r, i, j, k = init_func(
r_weight.size(0), r_weight.size(1), None, init_criterion,
)
r_weight.data = r.type_as(r_weight.data)
i_weight.data = i.type_as(i_weight.data)
j_weight.data = j.type_as(j_weight.data)
k_weight.data = k.type_as(k_weight.data)
def affect_conv_init(
r_weight,
i_weight,
j_weight,
k_weight,
kernel_size,
init_func,
init_criterion,
):
""" Applies the weight initialization function given to the parameters.
This is specifically written for convolutional layers.
Arguments
---------
r_weight : torch.Parameters
(nb_quaternion_in, nb_quaternion_out)
i_weight : torch.Parameters
(nb_quaternion_in, nb_quaternion_out)
j_weight : torch.Parameters
(nb_quaternion_in, nb_quaternion_out)
k_weight : torch.Parameters
(nb_quaternion_in, nb_quaternion_out)
kernel_size : int
Kernel size.
init_func : function
(unitary_init, quaternion_init)
init_criterion : str
(glorot, he)
"""
in_channels = r_weight.size(1)
out_channels = r_weight.size(0)
r, i, j, k = init_func(
in_channels,
out_channels,
kernel_size=kernel_size,
criterion=init_criterion,
)
r_weight.data = r.type_as(r_weight.data)
i_weight.data = i.type_as(i_weight.data)
j_weight.data = j.type_as(j_weight.data)
k_weight.data = k.type_as(k_weight.data)
def check_quaternion_input(input_shape):
"""Check the quaternion-valued shape for a linear layer.
Arguments
---------
input_shape : tuple
Expected shape of the input.
"""
if len(input_shape) not in {1, 2, 3}:
raise Exception(
"Quaternion linear accepts only input of dimension 2 or 3."
" input.dim = " + str(input.dim())
)
nb_hidden = input_shape[-1]
if nb_hidden % 4 != 0:
raise Exception(
"Quaternion Tensors must have dimensions divisible by 4."
" input.size()[1] = " + str(nb_hidden)
)
| 28,142 | 33.362637 | 80 | py |
speechbrain | speechbrain-main/speechbrain/nnet/quaternion_networks/q_normalization.py | """Library implementing quaternion-valued normalization.
Authors
* Titouan Parcollet 2020
"""
import torch
from torch.nn import Parameter
class QBatchNorm(torch.nn.Module):
"""This class implements the simplest form of a quaternion batchnorm as
described in : "Quaternion Convolutional Neural Network for
Color Image Classification and Forensics", Qilin Y. et al.
Arguments
---------
input_size : int
Expected size of the dimension to be normalized.
dim : int, optional
It defines the axis that should be normalized. It usually correspond to
the channel dimension (default -1).
gamma_init : float, optional
First value of gamma to be used (mean) (default 1.0).
beta_param : bool, optional
When set to True the beta parameter of the BN is applied (default True).
momentum : float, optional
It defines the momentum as for the real-valued batch-normalization (default 0.1).
eps : float, optional
Term used to stabilize operation (default 1e-4).
track_running_stats : bool, optional
Equivalent to the real-valued batchnormalization parameter.
When True, stats are tracked. When False, solely statistics computed
over the batch are used (default True).
Example
-------
>>> inp_tensor = torch.rand([10, 40])
>>> QBN = QBatchNorm(input_size=40)
>>> out_tensor = QBN(inp_tensor)
>>> out_tensor.shape
torch.Size([10, 40])
"""
def __init__(
self,
input_size,
dim=-1,
gamma_init=1.0,
beta_param=True,
momentum=0.1,
eps=1e-4,
track_running_stats=True,
):
super(QBatchNorm, self).__init__()
self.num_features = input_size // 4
self.gamma_init = gamma_init
self.beta_param = beta_param
self.momentum = momentum
self.dim = dim
self.eps = eps
self.track_running_stats = track_running_stats
self.gamma = Parameter(torch.full([self.num_features], self.gamma_init))
self.beta = Parameter(
torch.zeros(self.num_features * 4), requires_grad=self.beta_param
)
# instantiate moving statistics
if track_running_stats:
self.register_buffer(
"running_mean", torch.zeros(self.num_features * 4)
)
self.register_buffer("running_var", torch.ones(self.num_features))
self.register_buffer(
"num_batches_tracked", torch.tensor(0, dtype=torch.long)
)
else:
self.register_parameter("running_mean", None)
self.register_parameter("running_var", None)
self.register_parameter("num_batches_tracked", None)
def forward(self, input):
"""Returns the normalized input tensor.
Arguments
---------
input : torch.Tensor (batch, time, [channels])
Input to normalize. It can be 2d, 3d, 4d.
"""
exponential_average_factor = 0.0
# Entering training mode
if self.training:
if self.num_batches_tracked is not None:
self.num_batches_tracked = self.num_batches_tracked + 1
if self.momentum is None: # use cumulative moving average
exponential_average_factor = (
1.0 / self.num_batches_tracked.item()
)
else: # use exponential moving average
exponential_average_factor = self.momentum
# Get mean along batch axis
mu = torch.mean(input, dim=0)
mu_r, mu_i, mu_j, mu_k = torch.chunk(mu, 4, dim=self.dim)
# Get variance along batch axis
delta = input - mu
delta_r, delta_i, delta_j, delta_k = torch.chunk(
delta, 4, dim=self.dim
)
quat_variance = torch.mean(
(delta_r ** 2 + delta_i ** 2 + delta_j ** 2 + delta_k ** 2),
dim=0,
)
denominator = torch.sqrt(quat_variance + self.eps)
# x - mu / sqrt(var + e)
out = input / torch.cat(
[denominator, denominator, denominator, denominator],
dim=self.dim,
)
# Update the running stats
if self.track_running_stats:
self.running_mean = (
1 - exponential_average_factor
) * self.running_mean + exponential_average_factor * mu.view(
self.running_mean.size()
)
self.running_var = (
1 - exponential_average_factor
) * self.running_var + exponential_average_factor * quat_variance.view(
self.running_var.size()
)
else:
q_var = torch.cat(
[
self.running_var,
self.running_var,
self.running_var,
self.running_var,
],
dim=self.dim,
)
out = (input - self.running_mean) / q_var
# lambda * (x - mu / sqrt(var + e)) + beta
q_gamma = torch.cat(
[self.gamma, self.gamma, self.gamma, self.gamma], dim=self.dim
)
out = (q_gamma * out) + self.beta
return out
| 5,396 | 31.908537 | 89 | py |
speechbrain | speechbrain-main/speechbrain/nnet/quaternion_networks/q_linear.py | """Library implementing quaternion-valued linear transformation.
Authors
* Titouan Parcollet 2020
"""
import torch
import logging
from speechbrain.nnet.quaternion_networks.q_ops import (
affect_init,
unitary_init,
quaternion_init,
quaternion_linear_op,
check_quaternion_input,
quaternion_linear_rotation_op,
QuaternionLinearCustomBackward,
)
logger = logging.getLogger(__name__)
class QLinear(torch.nn.Module):
"""This function implements a fully connected quaternion-valued
linear layer: y = Wx + b. y, W, x and b are thus quaternion
numbers. A quaternion number is written as: r + xi + yj + zk.
A tensor of quaternion numbers x = [batch, 32] can be understood as
[batch, 0:7] = R, [batch, 8:15] = Xi, [batch, 16:23] = Yi, and
[batch, 24:31] = Xi. Thus the features dimension is cut in four
(must be divisible by 4).
Arguments
---------
n_neurons : int
It is the number of output neurons (i.e, the dimensionality of the
output). Please note that these are quaternion-valued neurons. If 256
neurons are specified, the output dimension will be 1024.
input_shape : tuple
Expected size of the input.
bias : bool
If True, the additive bias b is adopted.
init_criterion : str , optional
(glorot, he).
This parameter controls the initialization criterion of the weights.
It is combined with weights_init to build the initialization method of
the quaternion-valued weights (default "glorot").
weight_init : str, optional
(quaternion, unitary).
This parameter defines the initialization procedure of the
quaternion-valued weights. "quaternion" will generate quaternion-valued
weights following the init_criterion and the quaternion polar form.
"unitary" will normalize the weights to lie on the unit circle (default "quaternion").
More details in: "Quaternion recurrent neural networks", Parcollet T.
autograd : bool, optional
When True, the default PyTorch autograd will be used. When False, a
custom backpropagation will be used, reducing by a factor 3 to 4 the
memory consumption. It is also 2x slower. This only works with
spinor = False (default True).
spinor : bool, optional
When True, the layer will be turned into a spinor layer. More precisely
W*x will be turned into W*x*W-1. The input x will be rotated by W such
as in a spinor neural network. However, x MUST be a quaternion with
the real part equal to zero. (0 + xi + yj + zk). Indeed, the rotation
operation only acts on the vector part. Note that W will always be
normalized before the rotation to ensure the quaternion algebra (default False).
More details in: "Quaternion neural networks", Parcollet T.
vector_scale : bool, optional
The vector_scale is only used when spinor = True. In the context of a
spinor neural network, multiple rotations of the input vector x are
performed and summed. Hence, the norm of the output vector always
increases with the number of layers, making the neural network instable
with deep configurations. The vector_scale parameters are learnable
parameters that acts like gates by multiplying the output vector with
a small trainable parameter (default False).
Example
-------
>>> inputs = torch.rand(10, 50, 40)
>>> lin = QLinear(n_neurons=100, input_shape=inputs.shape, weight_init='unitary')
>>> output = lin(inputs)
>>> output.shape
torch.Size([10, 50, 400])
"""
def __init__(
self,
n_neurons,
input_shape,
bias=True,
init_criterion="glorot",
weight_init="quaternion",
autograd=True,
spinor=False,
vector_scale=False,
):
super().__init__()
self.n_neurons = n_neurons
self.bias = bias
self.init_criterion = init_criterion
self.weight_init = weight_init
self.autograd = autograd
self.spinor = spinor
self.vector_scale = vector_scale
# When initialising with speechbrain the input_shape is an integer !
# we need to transform it into a list it works with all the question ops
if isinstance(input_shape, int):
input_shape = [1, input_shape]
# Check the quaternion_valued form of the input
check_quaternion_input(input_shape)
# Computing the quaternion dimensionality of the input
self.in_features = input_shape[-1] // 4
self.out_features = self.n_neurons
# Defining the weights
self.r_weight = torch.nn.Parameter(
torch.Tensor(self.in_features, self.out_features)
)
self.i_weight = torch.nn.Parameter(
torch.Tensor(self.in_features, self.out_features)
)
self.j_weight = torch.nn.Parameter(
torch.Tensor(self.in_features, self.out_features)
)
self.k_weight = torch.nn.Parameter(
torch.Tensor(self.in_features, self.out_features)
)
# Spinor specific parameters
if self.spinor:
self.zero_kernel = torch.nn.Parameter(
torch.zeros(self.r_weight.shape), requires_grad=False
)
else:
self.zero_kernel = torch.Tensor(self.r_weight.shape).requires_grad_(
False
)
if self.spinor and self.vector_scale:
self.scale_param = torch.nn.Parameter(
torch.Tensor(self.in_features, self.out_features)
)
torch.nn.init.xavier_uniform_(self.scale_param.data)
else:
self.scale_param = torch.Tensor(
self.in_features, self.out_features
).requires_grad_(False)
if self.bias:
self.b = torch.nn.Parameter(torch.Tensor(4 * n_neurons))
self.b.data.fill_(0)
else:
self.b = torch.Tensor(4 * n_neurons).requires_grad_(False)
# Managing the weight initialization and bias
self.winit = {"quaternion": quaternion_init, "unitary": unitary_init}[
self.weight_init
]
# Initialise the weights
affect_init(
self.r_weight,
self.i_weight,
self.j_weight,
self.k_weight,
self.winit,
init_criterion,
)
@torch.jit.ignore
def forward(self, x):
"""Returns the linear transformation of input tensor.
Arguments
---------
x : torch.Tensor
Input to transform linearly.
"""
if self.autograd:
if self.spinor:
out = quaternion_linear_rotation_op(
x,
self.r_weight,
self.i_weight,
self.j_weight,
self.k_weight,
self.b,
self.scale_param,
self.zero_kernel,
)
else:
out = quaternion_linear_op(
x,
self.r_weight,
self.i_weight,
self.j_weight,
self.k_weight,
self.b,
)
else:
# The custom backward needs an input with 2D at most!
input_dim = x.dim()
if input_dim == 3:
batch, time, fea = x.size()
x = x.view(batch * time, fea)
out = QuaternionLinearCustomBackward.apply(
x,
self.r_weight,
self.i_weight,
self.j_weight,
self.k_weight,
self.b,
)
if input_dim == 3:
out = out.view(batch, time, out.size(-1))
return out
| 7,965 | 34.721973 | 94 | py |
speechbrain | speechbrain-main/speechbrain/nnet/loss/transducer_loss.py | """
Transducer loss implementation (depends on numba)
Authors
* Abdelwahab Heba 2020
"""
import torch
from torch.autograd import Function
from torch.nn import Module
try:
from numba import cuda
except ImportError:
err_msg = "The optional dependency Numba is needed to use this module\n"
err_msg += "Cannot import numba. To use Transducer loss\n"
err_msg += "Please follow the instructions below\n"
err_msg += "=============================\n"
err_msg += "If you use your localhost:\n"
err_msg += "pip install numba\n"
err_msg += "export NUMBAPRO_LIBDEVICE='/usr/local/cuda/nvvm/libdevice/' \n"
err_msg += "export NUMBAPRO_NVVM='/usr/local/cuda/nvvm/lib64/libnvvm.so' \n"
err_msg += "================================ \n"
err_msg += "If you use conda:\n"
err_msg += "conda install numba cudatoolkit=9.0"
raise ImportError(err_msg)
import math
@cuda.jit(
"(float32[:,:,:,:], int32[:,:], float32[:,:,:], float32[:], int32[:], int32[:], int32, int32[:,:])"
)
def cu_kernel_forward(log_probs, labels, alpha, log_p, T, U, blank, lock):
"""
Compute forward pass for the forward-backward algorithm using Numba cuda kernel.
Sequence Transduction with naive implementation : https://arxiv.org/pdf/1211.3711.pdf
Arguments
---------
log_probs : tensor
4D Tensor of (batch x TimeLength x LabelLength x outputDim) from the Transducer network.
labels : tensor
2D Tensor of (batch x MaxSeqLabelLength) containing targets of the batch with zero padding.
alpha : tensor
3D Tensor of (batch x TimeLength x LabelLength) for forward computation.
log_p : tensor
1D Tensor of (batch) for forward cost computation.
T : tensor
1D Tensor of (batch) containing TimeLength of each target.
U : tensor
1D Tensor of (batch) containing LabelLength of each target.
blank : int
Blank indice.
lock : tensor
2D Tensor of (batch x LabelLength) containing bool(1-0) lock for parallel computation.
"""
# parallelize the forward algorithm over batch and target length dim
b = cuda.blockIdx.x
u = cuda.threadIdx.x
t = 0
if u <= U[b]:
# for each (B,U) Thread
# wait the unlock of the previous computation of Alpha[b,U-1,:]
# Do the computation over the whole Time sequence on alpha[B,U,:]
# and then unlock the target U+1 for computation
while t < T[b]:
if u == 0:
if t > 0:
alpha[b, t, 0] = (
alpha[b, t - 1, 0] + log_probs[b, t - 1, 0, blank]
)
cuda.atomic.add(lock, (b, u + 1), -1)
t += 1
else:
if cuda.atomic.add(lock, (b, u), 0) < 0:
if t == 0:
alpha[b, 0, u] = (
alpha[b, 0, u - 1]
+ log_probs[b, 0, u - 1, labels[b, u - 1]]
)
else:
# compute emission prob
emit = (
alpha[b, t, u - 1]
+ log_probs[b, t, u - 1, labels[b, u - 1]]
)
# compute no_emission prob
no_emit = (
alpha[b, t - 1, u] + log_probs[b, t - 1, u, blank]
)
# do logsumexp between log_emit and log_no_emit
alpha[b, t, u] = max(no_emit, emit) + math.log1p(
math.exp(-abs(no_emit - emit))
)
if u < U[b]:
cuda.atomic.add(lock, (b, u + 1), -1)
cuda.atomic.add(lock, (b, u), 1)
t += 1
if u == U[b]:
# for each thread b (utterance)
# normalize the loss over time
log_p[b] = (
alpha[b, T[b] - 1, U[b]] + log_probs[b, T[b] - 1, U[b], blank]
) / T[b]
@cuda.jit(
"(float32[:,:,:,:], int32[:,:], float32[:,:,:], float32[:], int32[:], int32[:], int32, int32[:,:])"
)
def cu_kernel_backward(log_probs, labels, beta, log_p, T, U, blank, lock):
"""
Compute backward pass for the forward-backward algorithm using Numba cuda kernel.
Sequence Transduction with naive implementation : https://arxiv.org/pdf/1211.3711.pdf
Arguments
---------
log_probs : tensor
4D Tensor of (batch x TimeLength x LabelLength x outputDim) from the Transducer network.
labels : tensor
2D Tensor of (batch x MaxSeqLabelLength) containing targets of the batch with zero padding.
beta : tensor
3D Tensor of (batch x TimeLength x LabelLength) for backward computation.
log_p : tensor
1D Tensor of (batch) for backward cost computation.
T : tensor
1D Tensor of (batch) containing TimeLength of each target.
U : tensor
1D Tensor of (batch) containing LabelLength of each target.
blank : int
Blank indice.
lock : tensor
2D Tensor of (batch x LabelLength) containing bool(1-0) lock for parallel computation.
"""
# parallelize the forward algorithm over batch and target length dim
b = cuda.blockIdx.x
u = cuda.threadIdx.x
t = T[b] - 1
if u <= U[b]:
# for each (B,U) Thread
# wait the unlock of the next computation of beta[b,U+1,:]
# Do the computation over the whole Time sequence on beta[B,U,:]
# and then unlock the target U-1 for computation
while t >= 0:
if u == U[b]:
if t == T[b] - 1:
beta[b, t, u] = log_probs[b, t, u, blank]
else:
beta[b, t, u] = (
beta[b, t + 1, u] + log_probs[b, t, u, blank]
)
cuda.atomic.add(lock, (b, u - 1), -1)
t -= 1
else:
if cuda.atomic.add(lock, (b, u), 0) < 0:
if t == T[b] - 1:
# do logsumexp between log_emit and log_no_emit
beta[b, t, u] = (
beta[b, t, u + 1] + log_probs[b, t, u, labels[b, u]]
)
else:
# compute emission prob
emit = (
beta[b, t, u + 1] + log_probs[b, t, u, labels[b, u]]
)
# compute no_emission prob
no_emit = beta[b, t + 1, u] + log_probs[b, t, u, blank]
# do logsumexp between log_emit and log_no_emit
beta[b, t, u] = max(no_emit, emit) + math.log1p(
math.exp(-abs(no_emit - emit))
)
if u > 0:
cuda.atomic.add(lock, (b, u - 1), -1)
cuda.atomic.add(lock, (b, u), 1)
t -= 1
if u == 0:
# for each thread b (utterance)
# normalize the loss over time
log_p[b] = beta[b, 0, 0] / T[b]
@cuda.jit(
"(float32[:,:,:,:], int32[:,:],float32[:,:,:], float32[:,:,:], float32[:,:,:,:], int32[:], int32[:], int32)"
)
def cu_kernel_compute_grad(log_probs, labels, alpha, beta, grads, T, U, blank):
"""
Compute gradient for the forward-backward algorithm using Numba cuda kernel.
Sequence Transduction with naive implementation : https://arxiv.org/pdf/1211.3711.pdf
Arguments
---------
log_probs : tensor
4D Tensor of (batch x TimeLength x LabelLength x outputDim) from the Transducer network.
labels : tensor
2D Tensor of (batch x MaxSeqLabelLength) containing targets of the batch with zero padding.
beta : tensor
3D Tensor of (batch x TimeLength x LabelLength) for backward computation.
log_p : tensor
1D Tensor of (batch) for backward cost computation.
T : tensor
1D Tensor of (batch) containing TimeLength of each target.
U : tensor
1D Tensor of (batch) containing LabelLength of each target.
blank : int
Blank indice.
lock : int
2D Tensor of (batch x LabelLength) containing bool(1-0) lock for parallel computation.
"""
# parallelize the gradient computation over batch and timeseq length dim
t = cuda.blockIdx.x
b = cuda.threadIdx.x
if t < T[b]:
# compute the gradient for no_emit prob
if t == 0:
grads[b, T[b] - 1, U[b], blank] = -math.exp(
alpha[b, T[b] - 1, U[b]]
+ log_probs[b, T[b] - 1, U[b], blank]
- beta[b, 0, 0]
)
if t < T[b] - 1:
for u in range(U[b] + 1):
grads[b, t, u, blank] = alpha[b, t, u] + beta[b, t + 1, u]
grads[b, t, u, blank] = -math.exp(
grads[b, t, u, blank]
+ log_probs[b, t, u, blank]
- beta[b, 0, 0]
)
# compute the gradient for emit prob
for u, l in enumerate(labels[b]):
if u < U[b]:
grads[b, t, u, l] = alpha[b, t, u] + beta[b, t, u + 1]
grads[b, t, u, l] = -math.exp(
grads[b, t, u, l] + log_probs[b, t, u, l] - beta[b, 0, 0]
)
class Transducer(Function):
"""
This class implements the Transducer loss computation with forward-backward algorithm
Sequence Transduction with naive implementation : https://arxiv.org/pdf/1211.3711.pdf
This class use torch.autograd.Function. In fact of using the forward-backward algorithm,
we need to compute the gradient manually.
This class can't be instantiated, please refer to TransducerLoss class
It is also possible to use this class directly by using Transducer.apply
"""
@staticmethod
def forward(ctx, log_probs, labels, T, U, blank, reduction):
"""Computes the transducer loss."""
log_probs = log_probs.detach()
B, maxT, maxU, A = log_probs.shape
grads = torch.zeros(
(B, maxT, maxU, A), dtype=torch.float32, device=log_probs.device
)
alpha = torch.zeros((B, maxT, maxU), device=log_probs.device)
beta = torch.zeros((B, maxT, maxU), device=log_probs.device)
lock = torch.zeros(
(B, maxU), dtype=torch.int32, device=log_probs.device
)
log_p_alpha = torch.zeros((B,), device=log_probs.device)
log_p_beta = torch.zeros((B,), device=log_probs.device)
cu_kernel_forward[B, maxU](
log_probs, labels, alpha, log_p_alpha, T, U, blank, lock,
)
lock = lock * 0
cu_kernel_backward[B, maxU](
log_probs, labels, beta, log_p_beta, T, U, blank, lock
)
cu_kernel_compute_grad[maxT, B](
log_probs, labels, alpha, beta, grads, T, U, blank
)
ctx.grads = grads
del alpha, beta, lock, log_p_beta, T, U, log_probs, labels
torch.cuda.empty_cache()
if reduction == "mean":
return -log_p_alpha.mean()
elif reduction == "sum":
return sum(-log_p_alpha)
elif reduction == "none":
return -log_p_alpha
else:
raise Exception("Unexpected reduction {}".format(reduction))
@staticmethod
def backward(ctx, grad_output):
"""Backward computations for the transducer loss."""
grad_output = grad_output.view(-1, 1, 1, 1).to(ctx.grads)
return ctx.grads.mul_(grad_output), None, None, None, None, None, None
class TransducerLoss(Module):
"""
This class implements the Transduce loss computation with forward-backward algorithm.
Sequence Transduction with naive implementation : https://arxiv.org/pdf/1211.3711.pdf
The TranducerLoss(nn.Module) use Transducer(autograd.Function)
to compute the forward-backward loss and gradients.
Input tensors must be on a cuda device.
Example
-------
>>> import torch
>>> loss = TransducerLoss(blank=0)
>>> logits = torch.randn((1,2,3,5)).cuda().requires_grad_()
>>> labels = torch.Tensor([[1,2]]).cuda().int()
>>> act_length = torch.Tensor([2]).cuda().int()
>>> # U = label_length+1
>>> label_length = torch.Tensor([2]).cuda().int()
>>> l = loss(logits, labels, act_length, label_length)
>>> l.backward()
"""
def __init__(self, blank=0, reduction="mean"):
super(TransducerLoss, self).__init__()
self.blank = blank
self.reduction = reduction
self.loss = Transducer.apply
try:
cuda.cuda_paths
except ImportError:
err_msg = "cannot import numba. To use Transducer loss\n"
err_msg += "=============================\n"
err_msg += "If you use your localhost:\n"
err_msg += "pip install numba\n"
err_msg += (
"export NUMBAPRO_LIBDEVICE='/usr/local/cuda/nvvm/libdevice/' \n"
)
err_msg += "export NUMBAPRO_NVVM='/usr/local/cuda/nvvm/lib64/libnvvm.so' \n"
err_msg += "================================ \n"
err_msg += "If you use conda:\n"
err_msg += "conda install numba cudatoolkit=XX (XX is your cuda toolkit version)"
raise ImportError(err_msg)
def forward(self, logits, labels, T, U):
"""Computes the transducer loss."""
# Transducer.apply function take log_probs tensor.
if logits.device == labels.device == T.device == U.device == "cuda":
log_probs = logits.log_softmax(-1)
return self.loss(
log_probs, labels, T, U, self.blank, self.reduction
)
else:
raise ValueError(
f"Found inputs tensors to be on {[logits.device, labels.device, T.device, U.device]} while needed to be on a 'cuda' device to use the transducer loss."
)
| 14,074 | 38.985795 | 167 | py |
speechbrain | speechbrain-main/speechbrain/nnet/loss/stoi_loss.py | """Library for computing STOI computation.
Reference: "End-to-End Waveform Utterance Enhancement for Direct Evaluation
Metrics Optimization by Fully Convolutional Neural Networks", TASLP, 2018
Authors:
Szu-Wei, Fu 2020
"""
import torch
import torchaudio
import numpy as np
from speechbrain.utils.torch_audio_backend import check_torchaudio_backend
check_torchaudio_backend()
smallVal = np.finfo("float").eps # To avoid divide by zero
def thirdoct(fs, nfft, num_bands, min_freq):
"""Returns the 1/3 octave band matrix.
Arguments
---------
fs : int
Sampling rate.
nfft : int
FFT size.
num_bands : int
Number of 1/3 octave bands.
min_freq : int
Center frequency of the lowest 1/3 octave band.
Returns
-------
obm : tensor
Octave Band Matrix.
"""
f = torch.linspace(0, fs, nfft + 1)
f = f[: int(nfft / 2) + 1]
k = torch.from_numpy(np.array(range(num_bands)).astype(float))
cf = torch.pow(2.0 ** (1.0 / 3), k) * min_freq
freq_low = min_freq * torch.pow(2.0, (2 * k - 1) / 6)
freq_high = min_freq * torch.pow(2.0, (2 * k + 1) / 6)
obm = torch.zeros(num_bands, len(f)) # a verifier
for i in range(len(cf)):
# Match 1/3 oct band freq with fft frequency bin
f_bin = torch.argmin(torch.square(f - freq_low[i]))
freq_low[i] = f[f_bin]
fl_ii = f_bin
f_bin = torch.argmin(torch.square(f - freq_high[i]))
freq_high[i] = f[f_bin]
fh_ii = f_bin
# Assign to the octave band matrix
obm[i, fl_ii:fh_ii] = 1
return obm
def removeSilentFrames(x, y, dyn_range=40, N=256, K=128):
"""Removes silent frames from the STOI computation.
This function can be used as a loss function for training
with SGD-based updates.
Arguments
---------
x: torch.Tensor
The clean (reference) waveforms.
y: torch.Tensor
The degraded (enhanced) waveforms.
dyn_range: int
Dynamic range used for mask computation.
N: int
Window length.
K: int
Step size.
"""
w = torch.unsqueeze(torch.from_numpy(np.hanning(N)), 0).to(torch.float)
X1 = x[0 : int(x.shape[0]) // N * N].reshape(int(x.shape[0]) // N, N).T
X2 = (
x[K : (int(x.shape[0]) - K) // N * N + K]
.reshape((int(x.shape[0]) - K) // N, N)
.T
)
X = torch.zeros(N, X1.shape[1] + X2.shape[1])
X[:, 0::2] = X1
X[:, 1::2] = X2
energy = 20 * torch.log10(
torch.sqrt(torch.matmul(w ** 2, X ** 2)) / 16.0 + smallVal
)
Max_energy = torch.max(energy)
msk = torch.squeeze((energy - Max_energy + dyn_range > 0))
Y1 = y[0 : int(y.shape[0]) // N * N].reshape(int(y.shape[0]) // N, N).T
Y2 = (
y[K : (int(y.shape[0]) - K) // N * N + K]
.reshape((int(y.shape[0]) - K) // N, N)
.T
)
Y = torch.zeros(N, Y1.shape[1] + Y2.shape[1])
Y[:, 0::2] = Y1
Y[:, 1::2] = Y2
x_sil = w.T.repeat(1, X[:, msk].shape[-1]) * X[:, msk]
y_sil = w.T.repeat(1, X[:, msk].shape[-1]) * Y[:, msk]
x_sil = torch.cat(
(
x_sil[0:K, 0],
(x_sil[0:K, 1:] + x_sil[K:, 0:-1]).T.flatten(),
x_sil[K:N, -1],
),
axis=0,
)
y_sil = torch.cat(
(
y_sil[0:K, 0],
(y_sil[0:K, 1:] + y_sil[K:, 0:-1]).T.flatten(),
y_sil[K:N, -1],
),
axis=0,
)
return [x_sil, y_sil]
def stoi_loss(y_pred_batch, y_true_batch, lens, reduction="mean"):
"""Compute the STOI score and return -1 * that score.
This function can be used as a loss function for training
with SGD-based updates.
Arguments
---------
y_pred_batch : torch.Tensor
The degraded (enhanced) waveforms.
y_true_batch : torch.Tensor
The clean (reference) waveforms.
lens : torch.Tensor
The relative lengths of the waveforms within the batch.
reduction : str
The type of reduction ("mean" or "batch") to use.
Example
-------
>>> a = torch.sin(torch.arange(16000, dtype=torch.float32)).unsqueeze(0)
>>> b = a + 0.001
>>> -stoi_loss(b, a, torch.ones(1))
tensor(0.7...)
"""
y_pred_batch = torch.squeeze(y_pred_batch, dim=-1)
y_true_batch = torch.squeeze(y_true_batch, dim=-1)
batch_size = y_pred_batch.shape[0]
fs = 16000 # Sampling rate
N = 30 # length of temporal envelope vectors
J = 15.0 # Number of one-third octave bands
octave_band = thirdoct(fs=10000, nfft=512, num_bands=15, min_freq=150)
c = 5.62341325 # 10^(-Beta/20) with Beta = -15
D = torch.zeros(batch_size)
resampler = torchaudio.transforms.Resample(fs, 10000).to(
y_pred_batch.device
)
for i in range(0, batch_size): # Run over mini-batches
y_true = y_true_batch[i, 0 : int(lens[i] * y_pred_batch.shape[1])]
y_pred = y_pred_batch[i, 0 : int(lens[i] * y_pred_batch.shape[1])]
y_true, y_pred = resampler(y_true), resampler(y_pred)
[y_sil_true, y_sil_pred] = removeSilentFrames(y_true, y_pred)
stft_true = torchaudio.transforms.Spectrogram(
n_fft=512, win_length=256, hop_length=128, power=2
)(y_sil_true)
stft_pred = torchaudio.transforms.Spectrogram(
n_fft=512, win_length=256, hop_length=128, power=2
)(y_sil_pred)
OCT_true = torch.sqrt(torch.matmul(octave_band, stft_true) + 1e-14)
OCT_pred = torch.sqrt(torch.matmul(octave_band, stft_pred) + 1e-14)
M = int(
stft_pred.shape[-1] - (N - 1)
) # number of temporal envelope vectors
X = torch.zeros(15 * M, 30)
Y = torch.zeros(15 * M, 30)
for m in range(0, M): # Run over temporal envelope vectors
X[m * 15 : (m + 1) * 15, :] = OCT_true[:, m : m + N]
Y[m * 15 : (m + 1) * 15, :] = OCT_pred[:, m : m + N]
alpha = torch.norm(X, dim=-1, keepdim=True) / (
torch.norm(Y, dim=-1, keepdim=True) + smallVal
)
ay = Y * alpha
y = torch.min(ay, X + X * c)
xn = X - torch.mean(X, dim=-1, keepdim=True)
xn = xn / (torch.norm(xn, dim=-1, keepdim=True) + smallVal)
yn = y - torch.mean(y, dim=-1, keepdim=True)
yn = yn / (torch.norm(yn, dim=-1, keepdim=True) + smallVal)
d = torch.sum(xn * yn)
D[i] = d / (J * M)
if reduction == "mean":
return -D.mean()
return -D
| 6,489 | 28.770642 | 76 | py |
speechbrain | speechbrain-main/speechbrain/nnet/loss/guidedattn_loss.py | """The Guided Attention Loss implementation
This loss can be used to speed up the training of
models in which the correspondence between inputs and
outputs is roughly linear, and the attention alignments
are expected to be approximately diagonal, such as Grapheme-to-Phoneme
and Text-to-Speech
Authors
* Artem Ploujnikov 2021
"""
import torch
from torch import nn
class GuidedAttentionLoss(nn.Module):
"""
A loss implementation that forces attention matrices to be
near-diagonal, imposing progressively larger penalties for paying
attention to regions far away from the diagonal). It is useful
for sequence-to-sequence models in which the sequence of outputs
is expected to corrsespond closely to the sequence of inputs,
such as TTS or G2P
https://arxiv.org/abs/1710.08969
The implementation is inspired by the R9Y9 DeepVoice3 model
https://github.com/r9y9/deepvoice3_pytorch
It should be roughly equivalent to it; however, it has been
fully vectorized.
Arguments
---------
sigma:
the guided attention weight
Example
-------
NOTE: In a real scenario, the input_lengths and
target_lengths would come from a data batch,
whereas alignments would come from a model
>>> import torch
>>> from speechbrain.nnet.loss.guidedattn_loss import GuidedAttentionLoss
>>> loss = GuidedAttentionLoss(sigma=0.2)
>>> input_lengths = torch.tensor([2, 3])
>>> target_lengths = torch.tensor([3, 4])
>>> alignments = torch.tensor(
... [
... [
... [0.8, 0.2, 0.0],
... [0.4, 0.6, 0.0],
... [0.2, 0.8, 0.0],
... [0.0, 0.0, 0.0],
... ],
... [
... [0.6, 0.2, 0.2],
... [0.1, 0.7, 0.2],
... [0.3, 0.4, 0.3],
... [0.2, 0.3, 0.5],
... ],
... ]
... )
>>> loss(alignments, input_lengths, target_lengths)
tensor(0.1142)
"""
def __init__(self, sigma=0.2):
super().__init__()
self.sigma = sigma
self.weight_factor = 2 * (sigma ** 2)
def forward(
self,
attention,
input_lengths,
target_lengths,
max_input_len=None,
max_target_len=None,
):
"""
Computes the guided attention loss for a single batch
Arguments
---------
attention: torch.Tensor
A padded attention/alignments matrix
(batch, targets, inputs)
input_lengths: torch.tensor
A (batch, lengths) tensor of input lengths
target_lengths: torch.tensor
A (batch, lengths) tensor of target lengths
max_input_len: int
The maximum input length - optional,
if not computed will be set to the maximum
of target_lengths. Setting it explicitly
might be necessary when using data parallelism
max_target_len: int
The maximum target length - optional,
if not computed will be set to the maximum
of target_lengths. Setting it explicitly
might be necessary when using data parallelism
Returns
-------
loss: torch.Tensor
A single-element tensor with the loss value
"""
soft_mask = self.guided_attentions(
input_lengths, target_lengths, max_input_len, max_target_len
)
return (attention * soft_mask.transpose(-1, -2)).mean()
def guided_attentions(
self,
input_lengths,
target_lengths,
max_input_len=None,
max_target_len=None,
):
"""
Computes guided attention matrices
Arguments
---------
input_lengths: torch.Tensor
A tensor of input lengths
target_lengths: torch.Tensor
A tensor of target lengths
max_input_len: int
The maximum input length - optional,
if not computed will be set to the maximum
of target_lengths. Setting it explicitly
might be necessary when using data parallelism
max_target_len: int
The maximum target length - optional,
if not computed will be set to the maximum
of target_lengths. Setting it explicitly
might be necessary when using data parallelism
Returns
-------
soft_mask: torch.Tensor
The guided attention tensor of shape (batch, max_input_len, max_target_len)
"""
input_lengths_broad = input_lengths.view(-1, 1, 1)
target_lengths_broad = target_lengths.view(-1, 1, 1)
if max_input_len is None:
max_input_len = input_lengths.max()
if max_target_len is None:
max_target_len = target_lengths.max()
input_mesh, target_mesh = torch.meshgrid(
torch.arange(max_input_len).to(input_lengths.device),
torch.arange(max_target_len).to(target_lengths.device),
)
input_mesh, target_mesh = (
input_mesh.unsqueeze(0),
target_mesh.unsqueeze(0),
)
input_lengths_broad = input_lengths.view(-1, 1, 1)
target_lengths_broad = target_lengths.view(-1, 1, 1)
soft_mask = 1.0 - torch.exp(
-(
(
input_mesh / input_lengths_broad
- target_mesh / target_lengths_broad
)
** 2
)
/ self.weight_factor
)
outside = (input_mesh >= input_lengths_broad) | (
target_mesh >= target_lengths_broad
)
soft_mask[outside] = 0.0
return soft_mask
| 5,760 | 31.184358 | 87 | py |
speechbrain | speechbrain-main/speechbrain/nnet/loss/si_snr_loss.py | """
# Authors:
* Szu-Wei, Fu 2021
* Mirco Ravanelli 2020
* Samuele Cornell 2020
* Hwidong Na 2020
* Yan Gao 2020
* Titouan Parcollet 2020
"""
import torch
import numpy as np
smallVal = np.finfo("float").eps # To avoid divide by zero
def si_snr_loss(y_pred_batch, y_true_batch, lens, reduction="mean"):
"""Compute the si_snr score and return -1 * that score.
This function can be used as a loss function for training
with SGD-based updates.
Arguments
---------
y_pred_batch : torch.Tensor
The degraded (enhanced) waveforms.
y_true_batch : torch.Tensor
The clean (reference) waveforms.
lens : torch.Tensor
The relative lengths of the waveforms within the batch.
reduction : str
The type of reduction ("mean" or "batch") to use.
Example
-------
"""
y_pred_batch = torch.squeeze(y_pred_batch, dim=-1)
y_true_batch = torch.squeeze(y_true_batch, dim=-1)
batch_size = y_pred_batch.shape[0]
SI_SNR = torch.zeros(batch_size)
for i in range(0, batch_size): # Run over mini-batches
s_target = y_true_batch[i, 0 : int(lens[i] * y_pred_batch.shape[1])]
s_estimate = y_pred_batch[i, 0 : int(lens[i] * y_pred_batch.shape[1])]
# s_target = <s', s>s / ||s||^2
dot = torch.sum(s_estimate * s_target, dim=0, keepdim=True)
s_target_energy = (
torch.sum(s_target ** 2, dim=0, keepdim=True) + smallVal
)
proj = dot * s_target / s_target_energy
# e_noise = s' - s_target
e_noise = s_estimate - proj
# SI-SNR = 10 * log_10(||s_target||^2 / ||e_noise||^2)
si_snr_beforelog = torch.sum(proj ** 2, dim=0) / (
torch.sum(e_noise ** 2, dim=0) + smallVal
)
SI_SNR[i] = 10 * torch.log10(si_snr_beforelog + smallVal)
if reduction == "mean":
return -SI_SNR.mean()
return -SI_SNR
| 1,912 | 27.132353 | 78 | py |
speechbrain | speechbrain-main/speechbrain/pretrained/interfaces.py | """Defines interfaces for simple inference with pretrained models
Authors:
* Aku Rouhe 2021
* Peter Plantinga 2021
* Loren Lugosch 2020
* Mirco Ravanelli 2020
* Titouan Parcollet 2021
* Abdel Heba 2021
* Andreas Nautsch 2022
* Pooneh Mousavi 20023
"""
import logging
import hashlib
import sys
import speechbrain
import torch
import torchaudio
import sentencepiece
from types import SimpleNamespace
from torch.nn import SyncBatchNorm
from torch.nn import DataParallel as DP
from hyperpyyaml import load_hyperpyyaml
from speechbrain.pretrained.fetching import fetch
from speechbrain.dataio.preprocess import AudioNormalizer
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel as DDP
from speechbrain.utils.data_utils import split_path
from speechbrain.utils.distributed import run_on_main
from speechbrain.dataio.batch import PaddedBatch, PaddedData
from speechbrain.utils.data_pipeline import DataPipeline
from speechbrain.utils.callchains import lengths_arg_exists
from speechbrain.utils.superpowers import import_from_path
logger = logging.getLogger(__name__)
def foreign_class(
source,
hparams_file="hyperparams.yaml",
pymodule_file="custom.py",
classname="CustomInterface",
overrides={},
savedir=None,
use_auth_token=False,
download_only=False,
**kwargs,
):
"""Fetch and load an interface from an outside source
The source can be a location on the filesystem or online/huggingface
The pymodule file should contain a class with the given classname. An
instance of that class is returned. The idea is to have a custom Pretrained
subclass in the file. The pymodule file is also added to the python path
before the Hyperparams YAML file is loaded, so it can contain any custom
implementations that are needed.
The hyperparams file should contain a "modules" key, which is a
dictionary of torch modules used for computation.
The hyperparams file should contain a "pretrainer" key, which is a
speechbrain.utils.parameter_transfer.Pretrainer
Arguments
---------
source : str
The location to use for finding the model. See
``speechbrain.pretrained.fetching.fetch`` for details.
hparams_file : str
The name of the hyperparameters file to use for constructing
the modules necessary for inference. Must contain two keys:
"modules" and "pretrainer", as described.
pymodule_file : str
The name of the Python file that should be fetched.
classname : str
The name of the Class, of which an instance is created and returned
overrides : dict
Any changes to make to the hparams file when it is loaded.
savedir : str or Path
Where to put the pretraining material. If not given, will use
./pretrained_models/<class-name>-hash(source).
use_auth_token : bool (default: False)
If true Hugginface's auth_token will be used to load private models from the HuggingFace Hub,
default is False because the majority of models are public.
download_only : bool (default: False)
If true, class and instance creation is skipped.
Returns
-------
object
An instance of a class with the given classname from the given pymodule file.
"""
if savedir is None:
savedir = f"./pretrained_models/{classname}-{hashlib.md5(source.encode('UTF-8', errors='replace')).hexdigest()}"
hparams_local_path = fetch(
filename=hparams_file,
source=source,
savedir=savedir,
overwrite=False,
save_filename=None,
use_auth_token=use_auth_token,
revision=None,
)
pymodule_local_path = fetch(
filename=pymodule_file,
source=source,
savedir=savedir,
overwrite=False,
save_filename=None,
use_auth_token=use_auth_token,
revision=None,
)
sys.path.append(str(pymodule_local_path.parent))
# Load the modules:
with open(hparams_local_path) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Pretraining:
pretrainer = hparams["pretrainer"]
pretrainer.set_collect_in(savedir)
# For distributed setups, have this here:
run_on_main(pretrainer.collect_files, kwargs={"default_source": source})
# Load on the CPU. Later the params can be moved elsewhere by specifying
if not download_only:
# run_opts={"device": ...}
pretrainer.load_collected(device="cpu")
# Import class and create instance
module = import_from_path(pymodule_local_path)
cls = getattr(module, classname)
return cls(modules=hparams["modules"], hparams=hparams, **kwargs)
class Pretrained(torch.nn.Module):
"""Takes a trained model and makes predictions on new data.
This is a base class which handles some common boilerplate.
It intentionally has an interface similar to ``Brain`` - these base
classes handle similar things.
Subclasses of Pretrained should implement the actual logic of how
the pretrained system runs, and add methods with descriptive names
(e.g. transcribe_file() for ASR).
Pretrained is a torch.nn.Module so that methods like .to() or .eval() can
work. Subclasses should provide a suitable forward() implementation: by
convention, it should be a method that takes a batch of audio signals and
runs the full model (as applicable).
Arguments
---------
modules : dict of str:torch.nn.Module pairs
The Torch modules that make up the learned system. These can be treated
in special ways (put on the right device, frozen, etc.). These are available
as attributes under ``self.mods``, like self.mods.model(x)
hparams : dict
Each key:value pair should consist of a string key and a hyperparameter
that is used within the overridden methods. These will
be accessible via an ``hparams`` attribute, using "dot" notation:
e.g., self.hparams.model(x).
run_opts : dict
Options parsed from command line. See ``speechbrain.parse_arguments()``.
List that are supported here:
* device
* data_parallel_count
* data_parallel_backend
* distributed_launch
* distributed_backend
* jit_module_keys
freeze_params : bool
To freeze (requires_grad=False) parameters or not. Normally in inference
you want to freeze the params. Also calls .eval() on all modules.
"""
HPARAMS_NEEDED = []
MODULES_NEEDED = []
def __init__(
self, modules=None, hparams=None, run_opts=None, freeze_params=True
):
super().__init__()
# Arguments passed via the run opts dictionary. Set a limited
# number of these, since some don't apply to inference.
run_opt_defaults = {
"device": "cpu",
"data_parallel_count": -1,
"data_parallel_backend": False,
"distributed_launch": False,
"distributed_backend": "nccl",
"jit_module_keys": None,
}
for arg, default in run_opt_defaults.items():
if run_opts is not None and arg in run_opts:
setattr(self, arg, run_opts[arg])
else:
# If any arg from run_opt_defaults exist in hparams and
# not in command line args "run_opts"
if hparams is not None and arg in hparams:
setattr(self, arg, hparams[arg])
else:
setattr(self, arg, default)
# Put modules on the right device, accessible with dot notation
self.mods = torch.nn.ModuleDict(modules)
for module in self.mods.values():
if module is not None:
module.to(self.device)
# Check MODULES_NEEDED and HPARAMS_NEEDED and
# make hyperparams available with dot notation
if self.HPARAMS_NEEDED and hparams is None:
raise ValueError("Need to provide hparams dict.")
if hparams is not None:
# Also first check that all required params are found:
for hp in self.HPARAMS_NEEDED:
if hp not in hparams:
raise ValueError(f"Need hparams['{hp}']")
self.hparams = SimpleNamespace(**hparams)
# Prepare modules for computation, e.g. jit
self._prepare_modules(freeze_params)
# Audio normalization
self.audio_normalizer = hparams.get(
"audio_normalizer", AudioNormalizer()
)
def _prepare_modules(self, freeze_params):
"""Prepare modules for computation, e.g. jit.
Arguments
---------
freeze_params : bool
Whether to freeze the parameters and call ``eval()``.
"""
# Make jit-able
self._compile_jit()
self._wrap_distributed()
# If we don't want to backprop, freeze the pretrained parameters
if freeze_params:
self.mods.eval()
for p in self.mods.parameters():
p.requires_grad = False
def load_audio(self, path, savedir="."):
"""Load an audio file with this model's input spec
When using a speech model, it is important to use the same type of data,
as was used to train the model. This means for example using the same
sampling rate and number of channels. It is, however, possible to
convert a file from a higher sampling rate to a lower one (downsampling).
Similarly, it is simple to downmix a stereo file to mono.
The path can be a local path, a web url, or a link to a huggingface repo.
"""
source, fl = split_path(path)
path = fetch(fl, source=source, savedir=savedir)
signal, sr = torchaudio.load(str(path), channels_first=False)
return self.audio_normalizer(signal, sr)
def _compile_jit(self):
"""Compile requested modules with ``torch.jit.script``."""
if self.jit_module_keys is None:
return
for name in self.jit_module_keys:
if name not in self.mods:
raise ValueError(
"module " + name + " cannot be jit compiled because "
"it is not defined in your hparams file."
)
module = torch.jit.script(self.mods[name])
self.mods[name] = module.to(self.device)
def _wrap_distributed(self):
"""Wrap modules with distributed wrapper when requested."""
if not self.distributed_launch and not self.data_parallel_backend:
return
elif self.distributed_launch:
for name, module in self.mods.items():
if any(p.requires_grad for p in module.parameters()):
# for ddp, all module must run on same GPU
module = SyncBatchNorm.convert_sync_batchnorm(module)
module = DDP(module, device_ids=[self.device])
self.mods[name] = module
else:
# data_parallel_backend
for name, module in self.mods.items():
if any(p.requires_grad for p in module.parameters()):
# if distributed_count = -1 then use all gpus
# otherwise, specify the set of gpu to use
if self.data_parallel_count == -1:
module = DP(module)
else:
module = DP(
module, [i for i in range(self.data_parallel_count)]
)
self.mods[name] = module
@classmethod
def from_hparams(
cls,
source,
hparams_file="hyperparams.yaml",
pymodule_file="custom.py",
overrides={},
savedir=None,
use_auth_token=False,
revision=None,
download_only=False,
**kwargs,
):
"""Fetch and load based from outside source based on HyperPyYAML file
The source can be a location on the filesystem or online/huggingface
You can use the pymodule_file to include any custom implementations
that are needed: if that file exists, then its location is added to
sys.path before Hyperparams YAML is loaded, so it can be referenced
in the YAML.
The hyperparams file should contain a "modules" key, which is a
dictionary of torch modules used for computation.
The hyperparams file should contain a "pretrainer" key, which is a
speechbrain.utils.parameter_transfer.Pretrainer
Arguments
---------
source : str
The location to use for finding the model. See
``speechbrain.pretrained.fetching.fetch`` for details.
hparams_file : str
The name of the hyperparameters file to use for constructing
the modules necessary for inference. Must contain two keys:
"modules" and "pretrainer", as described.
pymodule_file : str
A Python file can be fetched. This allows any custom
implementations to be included. The file's location is added to
sys.path before the hyperparams YAML file is loaded, so it can be
referenced in YAML.
This is optional, but has a default: "custom.py". If the default
file is not found, this is simply ignored, but if you give a
different filename, then this will raise in case the file is not
found.
overrides : dict
Any changes to make to the hparams file when it is loaded.
savedir : str or Path
Where to put the pretraining material. If not given, will use
./pretrained_models/<class-name>-hash(source).
use_auth_token : bool (default: False)
If true Hugginface's auth_token will be used to load private models from the HuggingFace Hub,
default is False because the majority of models are public.
revision : str
The model revision corresponding to the HuggingFace Hub model revision.
This is particularly useful if you wish to pin your code to a particular
version of a model hosted at HuggingFace.
download_only : bool (default: False)
If true, class and instance creation is skipped.
"""
if savedir is None:
clsname = cls.__name__
savedir = f"./pretrained_models/{clsname}-{hashlib.md5(source.encode('UTF-8', errors='replace')).hexdigest()}"
hparams_local_path = fetch(
filename=hparams_file,
source=source,
savedir=savedir,
overwrite=False,
save_filename=None,
use_auth_token=use_auth_token,
revision=revision,
)
try:
pymodule_local_path = fetch(
filename=pymodule_file,
source=source,
savedir=savedir,
overwrite=False,
save_filename=None,
use_auth_token=use_auth_token,
revision=revision,
)
sys.path.append(str(pymodule_local_path.parent))
except ValueError:
if pymodule_file == "custom.py":
# The optional custom Python module file did not exist
# and had the default name
pass
else:
# Custom Python module file not found, but some other
# filename than the default was given.
raise
# Load the modules:
with open(hparams_local_path) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Pretraining:
pretrainer = hparams["pretrainer"]
pretrainer.set_collect_in(savedir)
# For distributed setups, have this here:
run_on_main(pretrainer.collect_files, kwargs={"default_source": source})
# Load on the CPU. Later the params can be moved elsewhere by specifying
if not download_only:
# run_opts={"device": ...}
pretrainer.load_collected(device="cpu")
# Now return the system
return cls(hparams["modules"], hparams, **kwargs)
class EndToEndSLU(Pretrained):
"""An end-to-end SLU model.
The class can be used either to run only the encoder (encode()) to extract
features or to run the entire model (decode()) to map the speech to its semantics.
Example
-------
>>> from speechbrain.pretrained import EndToEndSLU
>>> tmpdir = getfixture("tmpdir")
>>> slu_model = EndToEndSLU.from_hparams(
... source="speechbrain/slu-timers-and-such-direct-librispeech-asr",
... savedir=tmpdir,
... )
>>> slu_model.decode_file("tests/samples/single-mic/example6.wav")
"{'intent': 'SimpleMath', 'slots': {'number1': 37.67, 'number2': 75.7, 'op': ' minus '}}"
"""
HPARAMS_NEEDED = ["tokenizer", "asr_model_source"]
MODULES_NEEDED = ["slu_enc", "beam_searcher"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.tokenizer = self.hparams.tokenizer
self.asr_model = EncoderDecoderASR.from_hparams(
source=self.hparams.asr_model_source,
run_opts={"device": self.device},
)
def decode_file(self, path):
"""Maps the given audio file to a string representing the
semantic dictionary for the utterance.
Arguments
---------
path : str
Path to audio file to decode.
Returns
-------
str
The predicted semantics.
"""
waveform = self.load_audio(path)
waveform = waveform.to(self.device)
# Fake a batch:
batch = waveform.unsqueeze(0)
rel_length = torch.tensor([1.0])
predicted_words, predicted_tokens = self.decode_batch(batch, rel_length)
return predicted_words[0]
def encode_batch(self, wavs, wav_lens):
"""Encodes the input audio into a sequence of hidden states
Arguments
---------
wavs : torch.Tensor
Batch of waveforms [batch, time, channels] or [batch, time]
depending on the model.
wav_lens : torch.Tensor
Lengths of the waveforms relative to the longest one in the
batch, tensor of shape [batch]. The longest one should have
relative length 1.0 and others len(waveform) / max_length.
Used for ignoring padding.
Returns
-------
torch.Tensor
The encoded batch
"""
wavs = wavs.float()
wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device)
ASR_encoder_out = self.asr_model.encode_batch(wavs.detach(), wav_lens)
encoder_out = self.mods.slu_enc(ASR_encoder_out)
return encoder_out
def decode_batch(self, wavs, wav_lens):
"""Maps the input audio to its semantics
Arguments
---------
wavs : torch.Tensor
Batch of waveforms [batch, time, channels] or [batch, time]
depending on the model.
wav_lens : torch.Tensor
Lengths of the waveforms relative to the longest one in the
batch, tensor of shape [batch]. The longest one should have
relative length 1.0 and others len(waveform) / max_length.
Used for ignoring padding.
Returns
-------
list
Each waveform in the batch decoded.
tensor
Each predicted token id.
"""
with torch.no_grad():
wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device)
encoder_out = self.encode_batch(wavs, wav_lens)
predicted_tokens, scores = self.mods.beam_searcher(
encoder_out, wav_lens
)
predicted_words = [
self.tokenizer.decode_ids(token_seq)
for token_seq in predicted_tokens
]
return predicted_words, predicted_tokens
def forward(self, wavs, wav_lens):
"""Runs full decoding - note: no gradients through decoding"""
return self.decode_batch(wavs, wav_lens)
class EncoderDecoderASR(Pretrained):
"""A ready-to-use Encoder-Decoder ASR model
The class can be used either to run only the encoder (encode()) to extract
features or to run the entire encoder-decoder model
(transcribe()) to transcribe speech. The given YAML must contain the fields
specified in the *_NEEDED[] lists.
Example
-------
>>> from speechbrain.pretrained import EncoderDecoderASR
>>> tmpdir = getfixture("tmpdir")
>>> asr_model = EncoderDecoderASR.from_hparams(
... source="speechbrain/asr-crdnn-rnnlm-librispeech",
... savedir=tmpdir,
... )
>>> asr_model.transcribe_file("tests/samples/single-mic/example2.flac")
"MY FATHER HAS REVEALED THE CULPRIT'S NAME"
"""
HPARAMS_NEEDED = ["tokenizer"]
MODULES_NEEDED = ["encoder", "decoder"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.tokenizer = self.hparams.tokenizer
def transcribe_file(self, path):
"""Transcribes the given audiofile into a sequence of words.
Arguments
---------
path : str
Path to audio file which to transcribe.
Returns
-------
str
The audiofile transcription produced by this ASR system.
"""
waveform = self.load_audio(path)
# Fake a batch:
batch = waveform.unsqueeze(0)
rel_length = torch.tensor([1.0])
predicted_words, predicted_tokens = self.transcribe_batch(
batch, rel_length
)
return predicted_words[0]
def encode_batch(self, wavs, wav_lens):
"""Encodes the input audio into a sequence of hidden states
The waveforms should already be in the model's desired format.
You can call:
``normalized = EncoderDecoderASR.normalizer(signal, sample_rate)``
to get a correctly converted signal in most cases.
Arguments
---------
wavs : torch.Tensor
Batch of waveforms [batch, time, channels] or [batch, time]
depending on the model.
wav_lens : torch.Tensor
Lengths of the waveforms relative to the longest one in the
batch, tensor of shape [batch]. The longest one should have
relative length 1.0 and others len(waveform) / max_length.
Used for ignoring padding.
Returns
-------
torch.Tensor
The encoded batch
"""
wavs = wavs.float()
wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device)
encoder_out = self.mods.encoder(wavs, wav_lens)
return encoder_out
def transcribe_batch(self, wavs, wav_lens):
"""Transcribes the input audio into a sequence of words
The waveforms should already be in the model's desired format.
You can call:
``normalized = EncoderDecoderASR.normalizer(signal, sample_rate)``
to get a correctly converted signal in most cases.
Arguments
---------
wavs : torch.Tensor
Batch of waveforms [batch, time, channels] or [batch, time]
depending on the model.
wav_lens : torch.Tensor
Lengths of the waveforms relative to the longest one in the
batch, tensor of shape [batch]. The longest one should have
relative length 1.0 and others len(waveform) / max_length.
Used for ignoring padding.
Returns
-------
list
Each waveform in the batch transcribed.
tensor
Each predicted token id.
"""
with torch.no_grad():
wav_lens = wav_lens.to(self.device)
encoder_out = self.encode_batch(wavs, wav_lens)
predicted_tokens, scores = self.mods.decoder(encoder_out, wav_lens)
predicted_words = [
self.tokenizer.decode_ids(token_seq)
for token_seq in predicted_tokens
]
return predicted_words, predicted_tokens
def forward(self, wavs, wav_lens):
"""Runs full transcription - note: no gradients through decoding"""
return self.transcribe_batch(wavs, wav_lens)
class WaveformEncoder(Pretrained):
"""A ready-to-use waveformEncoder model
It can be used to wrap different embedding models such as SSL ones (wav2vec2)
or speaker ones (Xvector) etc. Two functions are available: encode_batch and
encode_file. They can be used to obtain the embeddings directly from an audio
file or from a batch of audio tensors respectively.
The given YAML must contain the fields specified in the *_NEEDED[] lists.
Example
-------
>>> from speechbrain.pretrained import WaveformEncoder
>>> tmpdir = getfixture("tmpdir")
>>> ssl_model = WaveformEncoder.from_hparams(
... source="speechbrain/ssl-wav2vec2-base-libri",
... savedir=tmpdir,
... ) # doctest: +SKIP
>>> ssl_model.encode_file("samples/audio_samples/example_fr.wav") # doctest: +SKIP
"""
MODULES_NEEDED = ["encoder"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def encode_file(self, path):
"""Encode the given audiofile into a sequence of embeddings.
Arguments
---------
path : str
Path to audio file which to encode.
Returns
-------
torch.Tensor
The audiofile embeddings produced by this system.
"""
waveform = self.load_audio(path)
# Fake a batch:
batch = waveform.unsqueeze(0)
rel_length = torch.tensor([1.0])
results = self.encode_batch(batch, rel_length)
return results["embeddings"]
def encode_batch(self, wavs, wav_lens):
"""Encodes the input audio into a sequence of hidden states
The waveforms should already be in the model's desired format.
Arguments
---------
wavs : torch.Tensor
Batch of waveforms [batch, time, channels] or [batch, time]
depending on the model.
wav_lens : torch.Tensor
Lengths of the waveforms relative to the longest one in the
batch, tensor of shape [batch]. The longest one should have
relative length 1.0 and others len(waveform) / max_length.
Used for ignoring padding.
Returns
-------
torch.Tensor
The encoded batch
"""
wavs = wavs.float()
wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device)
encoder_out = self.mods.encoder(wavs, wav_lens)
return encoder_out
def forward(self, wavs, wav_lens):
"""Runs the encoder"""
return self.encode_batch(wavs, wav_lens)
class EncoderASR(Pretrained):
"""A ready-to-use Encoder ASR model
The class can be used either to run only the encoder (encode()) to extract
features or to run the entire encoder + decoder function model
(transcribe()) to transcribe speech. The given YAML must contain the fields
specified in the *_NEEDED[] lists.
Example
-------
>>> from speechbrain.pretrained import EncoderASR
>>> tmpdir = getfixture("tmpdir")
>>> asr_model = EncoderASR.from_hparams(
... source="speechbrain/asr-wav2vec2-commonvoice-fr",
... savedir=tmpdir,
... ) # doctest: +SKIP
>>> asr_model.transcribe_file("samples/audio_samples/example_fr.wav") # doctest: +SKIP
"""
HPARAMS_NEEDED = ["tokenizer", "decoding_function"]
MODULES_NEEDED = ["encoder"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.tokenizer = self.hparams.tokenizer
self.decoding_function = self.hparams.decoding_function
def transcribe_file(self, path):
"""Transcribes the given audiofile into a sequence of words.
Arguments
---------
path : str
Path to audio file which to transcribe.
Returns
-------
str
The audiofile transcription produced by this ASR system.
"""
waveform = self.load_audio(path)
# Fake a batch:
batch = waveform.unsqueeze(0)
rel_length = torch.tensor([1.0])
predicted_words, predicted_tokens = self.transcribe_batch(
batch, rel_length
)
return str(predicted_words[0])
def encode_batch(self, wavs, wav_lens):
"""Encodes the input audio into a sequence of hidden states
The waveforms should already be in the model's desired format.
You can call:
``normalized = EncoderASR.normalizer(signal, sample_rate)``
to get a correctly converted signal in most cases.
Arguments
---------
wavs : torch.Tensor
Batch of waveforms [batch, time, channels] or [batch, time]
depending on the model.
wav_lens : torch.Tensor
Lengths of the waveforms relative to the longest one in the
batch, tensor of shape [batch]. The longest one should have
relative length 1.0 and others len(waveform) / max_length.
Used for ignoring padding.
Returns
-------
torch.Tensor
The encoded batch
"""
wavs = wavs.float()
wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device)
encoder_out = self.mods.encoder(wavs, wav_lens)
return encoder_out
def transcribe_batch(self, wavs, wav_lens):
"""Transcribes the input audio into a sequence of words
The waveforms should already be in the model's desired format.
You can call:
``normalized = EncoderASR.normalizer(signal, sample_rate)``
to get a correctly converted signal in most cases.
Arguments
---------
wavs : torch.Tensor
Batch of waveforms [batch, time, channels] or [batch, time]
depending on the model.
wav_lens : torch.Tensor
Lengths of the waveforms relative to the longest one in the
batch, tensor of shape [batch]. The longest one should have
relative length 1.0 and others len(waveform) / max_length.
Used for ignoring padding.
Returns
-------
list
Each waveform in the batch transcribed.
tensor
Each predicted token id.
"""
with torch.no_grad():
wav_lens = wav_lens.to(self.device)
encoder_out = self.encode_batch(wavs, wav_lens)
predictions = self.decoding_function(encoder_out, wav_lens)
if isinstance(
self.tokenizer, speechbrain.dataio.encoder.CTCTextEncoder
):
predicted_words = [
"".join(self.tokenizer.decode_ndim(token_seq))
for token_seq in predictions
]
elif isinstance(
self.tokenizer, sentencepiece.SentencePieceProcessor
):
predicted_words = [
self.tokenizer.decode_ids(token_seq)
for token_seq in predictions
]
else:
sys.exit(
"The tokenizer must be sentencepiece or CTCTextEncoder"
)
return predicted_words, predictions
def forward(self, wavs, wav_lens):
"""Runs the encoder"""
return self.encode_batch(wavs, wav_lens)
class EncoderClassifier(Pretrained):
"""A ready-to-use class for utterance-level classification (e.g, speaker-id,
language-id, emotion recognition, keyword spotting, etc).
The class assumes that an encoder called "embedding_model" and a model
called "classifier" are defined in the yaml file. If you want to
convert the predicted index into a corresponding text label, please
provide the path of the label_encoder in a variable called 'lab_encoder_file'
within the yaml.
The class can be used either to run only the encoder (encode_batch()) to
extract embeddings or to run a classification step (classify_batch()).
```
Example
-------
>>> import torchaudio
>>> from speechbrain.pretrained import EncoderClassifier
>>> # Model is downloaded from the speechbrain HuggingFace repo
>>> tmpdir = getfixture("tmpdir")
>>> classifier = EncoderClassifier.from_hparams(
... source="speechbrain/spkrec-ecapa-voxceleb",
... savedir=tmpdir,
... )
>>> # Compute embeddings
>>> signal, fs = torchaudio.load("tests/samples/single-mic/example1.wav")
>>> embeddings = classifier.encode_batch(signal)
>>> # Classification
>>> prediction = classifier.classify_batch(signal)
"""
MODULES_NEEDED = [
"compute_features",
"mean_var_norm",
"embedding_model",
"classifier",
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def encode_batch(self, wavs, wav_lens=None, normalize=False):
"""Encodes the input audio into a single vector embedding.
The waveforms should already be in the model's desired format.
You can call:
``normalized = <this>.normalizer(signal, sample_rate)``
to get a correctly converted signal in most cases.
Arguments
---------
wavs : torch.Tensor
Batch of waveforms [batch, time, channels] or [batch, time]
depending on the model. Make sure the sample rate is fs=16000 Hz.
wav_lens : torch.Tensor
Lengths of the waveforms relative to the longest one in the
batch, tensor of shape [batch]. The longest one should have
relative length 1.0 and others len(waveform) / max_length.
Used for ignoring padding.
normalize : bool
If True, it normalizes the embeddings with the statistics
contained in mean_var_norm_emb.
Returns
-------
torch.Tensor
The encoded batch
"""
# Manage single waveforms in input
if len(wavs.shape) == 1:
wavs = wavs.unsqueeze(0)
# Assign full length if wav_lens is not assigned
if wav_lens is None:
wav_lens = torch.ones(wavs.shape[0], device=self.device)
# Storing waveform in the specified device
wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device)
wavs = wavs.float()
# Computing features and embeddings
feats = self.mods.compute_features(wavs)
feats = self.mods.mean_var_norm(feats, wav_lens)
embeddings = self.mods.embedding_model(feats, wav_lens)
if normalize:
embeddings = self.hparams.mean_var_norm_emb(
embeddings, torch.ones(embeddings.shape[0], device=self.device)
)
return embeddings
def classify_batch(self, wavs, wav_lens=None):
"""Performs classification on the top of the encoded features.
It returns the posterior probabilities, the index and, if the label
encoder is specified it also the text label.
Arguments
---------
wavs : torch.Tensor
Batch of waveforms [batch, time, channels] or [batch, time]
depending on the model. Make sure the sample rate is fs=16000 Hz.
wav_lens : torch.Tensor
Lengths of the waveforms relative to the longest one in the
batch, tensor of shape [batch]. The longest one should have
relative length 1.0 and others len(waveform) / max_length.
Used for ignoring padding.
Returns
-------
out_prob
The log posterior probabilities of each class ([batch, N_class])
score:
It is the value of the log-posterior for the best class ([batch,])
index
The indexes of the best class ([batch,])
text_lab:
List with the text labels corresponding to the indexes.
(label encoder should be provided).
"""
emb = self.encode_batch(wavs, wav_lens)
out_prob = self.mods.classifier(emb).squeeze(1)
score, index = torch.max(out_prob, dim=-1)
text_lab = self.hparams.label_encoder.decode_torch(index)
return out_prob, score, index, text_lab
def classify_file(self, path):
"""Classifies the given audiofile into the given set of labels.
Arguments
---------
path : str
Path to audio file to classify.
Returns
-------
out_prob
The log posterior probabilities of each class ([batch, N_class])
score:
It is the value of the log-posterior for the best class ([batch,])
index
The indexes of the best class ([batch,])
text_lab:
List with the text labels corresponding to the indexes.
(label encoder should be provided).
"""
waveform = self.load_audio(path)
# Fake a batch:
batch = waveform.unsqueeze(0)
rel_length = torch.tensor([1.0])
emb = self.encode_batch(batch, rel_length)
out_prob = self.mods.classifier(emb).squeeze(1)
score, index = torch.max(out_prob, dim=-1)
text_lab = self.hparams.label_encoder.decode_torch(index)
return out_prob, score, index, text_lab
def forward(self, wavs, wav_lens=None):
"""Runs the classification"""
return self.classify_batch(wavs, wav_lens)
class SpeakerRecognition(EncoderClassifier):
"""A ready-to-use model for speaker recognition. It can be used to
perform speaker verification with verify_batch().
```
Example
-------
>>> import torchaudio
>>> from speechbrain.pretrained import SpeakerRecognition
>>> # Model is downloaded from the speechbrain HuggingFace repo
>>> tmpdir = getfixture("tmpdir")
>>> verification = SpeakerRecognition.from_hparams(
... source="speechbrain/spkrec-ecapa-voxceleb",
... savedir=tmpdir,
... )
>>> # Perform verification
>>> signal, fs = torchaudio.load("tests/samples/single-mic/example1.wav")
>>> signal2, fs = torchaudio.load("tests/samples/single-mic/example2.flac")
>>> score, prediction = verification.verify_batch(signal, signal2)
"""
MODULES_NEEDED = [
"compute_features",
"mean_var_norm",
"embedding_model",
"mean_var_norm_emb",
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.similarity = torch.nn.CosineSimilarity(dim=-1, eps=1e-6)
def verify_batch(
self, wavs1, wavs2, wav1_lens=None, wav2_lens=None, threshold=0.25
):
"""Performs speaker verification with cosine distance.
It returns the score and the decision (0 different speakers,
1 same speakers).
Arguments
---------
wavs1 : Torch.Tensor
Tensor containing the speech waveform1 (batch, time).
Make sure the sample rate is fs=16000 Hz.
wavs2 : Torch.Tensor
Tensor containing the speech waveform2 (batch, time).
Make sure the sample rate is fs=16000 Hz.
wav1_lens: Torch.Tensor
Tensor containing the relative length for each sentence
in the length (e.g., [0.8 0.6 1.0])
wav2_lens: Torch.Tensor
Tensor containing the relative length for each sentence
in the length (e.g., [0.8 0.6 1.0])
threshold: Float
Threshold applied to the cosine distance to decide if the
speaker is different (0) or the same (1).
Returns
-------
score
The score associated to the binary verification output
(cosine distance).
prediction
The prediction is 1 if the two signals in input are from the same
speaker and 0 otherwise.
"""
emb1 = self.encode_batch(wavs1, wav1_lens, normalize=True)
emb2 = self.encode_batch(wavs2, wav2_lens, normalize=True)
score = self.similarity(emb1, emb2)
return score, score > threshold
def verify_files(self, path_x, path_y):
"""Speaker verification with cosine distance
Returns the score and the decision (0 different speakers,
1 same speakers).
Returns
-------
score
The score associated to the binary verification output
(cosine distance).
prediction
The prediction is 1 if the two signals in input are from the same
speaker and 0 otherwise.
"""
waveform_x = self.load_audio(path_x)
waveform_y = self.load_audio(path_y)
# Fake batches:
batch_x = waveform_x.unsqueeze(0)
batch_y = waveform_y.unsqueeze(0)
# Verify:
score, decision = self.verify_batch(batch_x, batch_y)
# Squeeze:
return score[0], decision[0]
class VAD(Pretrained):
"""A ready-to-use class for Voice Activity Detection (VAD) using a
pre-trained model.
Example
-------
>>> import torchaudio
>>> from speechbrain.pretrained import VAD
>>> # Model is downloaded from the speechbrain HuggingFace repo
>>> tmpdir = getfixture("tmpdir")
>>> VAD = VAD.from_hparams(
... source="speechbrain/vad-crdnn-libriparty",
... savedir=tmpdir,
... )
>>> # Perform VAD
>>> boundaries = VAD.get_speech_segments("tests/samples/single-mic/example1.wav")
"""
HPARAMS_NEEDED = ["sample_rate", "time_resolution", "device"]
MODULES_NEEDED = ["compute_features", "mean_var_norm", "model"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.time_resolution = self.hparams.time_resolution
self.sample_rate = self.hparams.sample_rate
self.device = self.hparams.device
def get_speech_prob_file(
self,
audio_file,
large_chunk_size=30,
small_chunk_size=10,
overlap_small_chunk=False,
):
"""Outputs the frame-level speech probability of the input audio file
using the neural model specified in the hparam file. To make this code
both parallelizable and scalable to long sequences, it uses a
double-windowing approach. First, we sequentially read non-overlapping
large chunks of the input signal. We then split the large chunks into
smaller chunks and we process them in parallel.
Arguments
---------
audio_file: path
Path of the audio file containing the recording. The file is read
with torchaudio.
large_chunk_size: float
Size (in seconds) of the large chunks that are read sequentially
from the input audio file.
small_chunk_size:
Size (in seconds) of the small chunks extracted from the large ones.
The audio signal is processed in parallel within the small chunks.
Note that large_chunk_size/small_chunk_size must be an integer.
overlap_small_chunk: bool
True, creates overlapped small chunks. The probabilities of the
overlapped chunks are combined using hamming windows.
Returns
-------
prob_vad: torch.Tensor
Tensor containing the frame-level speech probabilities for the
input audio file.
"""
# Getting the total size of the input file
sample_rate, audio_len = self._get_audio_info(audio_file)
if sample_rate != self.sample_rate:
raise ValueError(
"The detected sample rate is different from that set in the hparam file"
)
# Computing the length (in samples) of the large and small chunks
long_chunk_len = int(sample_rate * large_chunk_size)
small_chunk_len = int(sample_rate * small_chunk_size)
# Setting the step size of the small chunk (50% overlapping windows are supported)
small_chunk_step = small_chunk_size
if overlap_small_chunk:
small_chunk_step = small_chunk_size / 2
# Computing the length (in sample) of the small_chunk step size
small_chunk_len_step = int(sample_rate * small_chunk_step)
# Loop over big chunks
prob_chunks = []
last_chunk = False
begin_sample = 0
while True:
# Reading the big chunk
large_chunk, fs = torchaudio.load(
audio_file, frame_offset=begin_sample, num_frames=long_chunk_len
)
large_chunk = large_chunk.to(self.device)
# Manage padding of the last small chunk
if last_chunk or large_chunk.shape[-1] < small_chunk_len:
padding = torch.zeros(
1, small_chunk_len, device=large_chunk.device
)
large_chunk = torch.cat([large_chunk, padding], dim=1)
# Splitting the big chunk into smaller (overlapped) ones
small_chunks = torch.nn.functional.unfold(
large_chunk.unsqueeze(1).unsqueeze(2),
kernel_size=(1, small_chunk_len),
stride=(1, small_chunk_len_step),
)
small_chunks = small_chunks.squeeze(0).transpose(0, 1)
# Getting (in parallel) the frame-level speech probabilities
small_chunks_prob = self.get_speech_prob_chunk(small_chunks)
small_chunks_prob = small_chunks_prob[:, :-1, :]
# Manage overlapping chunks
if overlap_small_chunk:
small_chunks_prob = self._manage_overlapped_chunks(
small_chunks_prob
)
# Prepare for folding
small_chunks_prob = small_chunks_prob.permute(2, 1, 0)
# Computing lengths in samples
out_len = int(
large_chunk.shape[-1] / (sample_rate * self.time_resolution)
)
kernel_len = int(small_chunk_size / self.time_resolution)
step_len = int(small_chunk_step / self.time_resolution)
# Folding the frame-level predictions
small_chunks_prob = torch.nn.functional.fold(
small_chunks_prob,
output_size=(1, out_len),
kernel_size=(1, kernel_len),
stride=(1, step_len),
)
# Appending the frame-level speech probabilities of the large chunk
small_chunks_prob = small_chunks_prob.squeeze(1).transpose(-1, -2)
prob_chunks.append(small_chunks_prob)
# Check stop condition
if last_chunk:
break
# Update counter to process the next big chunk
begin_sample = begin_sample + long_chunk_len
# Check if the current chunk is the last one
if begin_sample + long_chunk_len > audio_len:
last_chunk = True
# Converting the list to a tensor
prob_vad = torch.cat(prob_chunks, dim=1)
last_elem = int(audio_len / (self.time_resolution * sample_rate))
prob_vad = prob_vad[:, 0:last_elem, :]
return prob_vad
def _manage_overlapped_chunks(self, small_chunks_prob):
"""This support function manages overlapped the case in which the
small chunks have a 50% overlap."""
# Weighting the frame-level probabilities with a hamming window
# reduces uncertainty when overlapping chunks are used.
hamming_window = torch.hamming_window(
small_chunks_prob.shape[1], device=self.device
)
# First and last chunks require special care
half_point = int(small_chunks_prob.shape[1] / 2)
small_chunks_prob[0, half_point:] = small_chunks_prob[
0, half_point:
] * hamming_window[half_point:].unsqueeze(1)
small_chunks_prob[-1, 0:half_point] = small_chunks_prob[
-1, 0:half_point
] * hamming_window[0:half_point].unsqueeze(1)
# Applying the window to all the other probabilities
small_chunks_prob[1:-1] = small_chunks_prob[
1:-1
] * hamming_window.unsqueeze(0).unsqueeze(2)
return small_chunks_prob
def get_speech_prob_chunk(self, wavs, wav_lens=None):
"""Outputs the frame-level posterior probability for the input audio chunks
Outputs close to zero refers to time steps with a low probability of speech
activity, while outputs closer to one likely contain speech.
Arguments
---------
wavs : torch.Tensor
Batch of waveforms [batch, time, channels] or [batch, time]
depending on the model. Make sure the sample rate is fs=16000 Hz.
wav_lens : torch.Tensor
Lengths of the waveforms relative to the longest one in the
batch, tensor of shape [batch]. The longest one should have
relative length 1.0 and others len(waveform) / max_length.
Used for ignoring padding.
Returns
-------
torch.Tensor
The encoded batch
"""
# Manage single waveforms in input
if len(wavs.shape) == 1:
wavs = wavs.unsqueeze(0)
# Assign full length if wav_lens is not assigned
if wav_lens is None:
wav_lens = torch.ones(wavs.shape[0], device=self.device)
# Storing waveform in the specified device
wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device)
wavs = wavs.float()
# Computing features and embeddings
feats = self.mods.compute_features(wavs)
feats = self.mods.mean_var_norm(feats, wav_lens)
outputs = self.mods.cnn(feats)
outputs = outputs.reshape(
outputs.shape[0],
outputs.shape[1],
outputs.shape[2] * outputs.shape[3],
)
outputs, h = self.mods.rnn(outputs)
outputs = self.mods.dnn(outputs)
output_prob = torch.sigmoid(outputs)
return output_prob
def apply_threshold(
self, vad_prob, activation_th=0.5, deactivation_th=0.25
):
"""Scans the frame-level speech probabilities and applies a threshold
on them. Speech starts when a value larger than activation_th is
detected, while it ends when observing a value lower than
the deactivation_th.
Arguments
---------
vad_prob: torch.Tensor
Frame-level speech probabilities.
activation_th: float
Threshold for starting a speech segment.
deactivation_th: float
Threshold for ending a speech segment.
Returns
-------
vad_th: torch.Tensor
Tensor containing 1 for speech regions and 0 for non-speech regions.
"""
vad_activation = (vad_prob >= activation_th).int()
vad_deactivation = (vad_prob >= deactivation_th).int()
vad_th = vad_activation + vad_deactivation
# Loop over batches and time steps
for batch in range(vad_th.shape[0]):
for time_step in range(vad_th.shape[1] - 1):
if (
vad_th[batch, time_step] == 2
and vad_th[batch, time_step + 1] == 1
):
vad_th[batch, time_step + 1] = 2
vad_th[vad_th == 1] = 0
vad_th[vad_th == 2] = 1
return vad_th
def get_boundaries(self, prob_th, output_value="seconds"):
"""Computes the time boundaries where speech activity is detected.
It takes in input frame-level binary decisions
(1 for speech, 0 for non-speech) and outputs the begin/end second
(or sample) of each detected speech region.
Arguments
---------
prob_th: torch.Tensor
Frame-level binary decisions (1 for speech frame, 0 for a
non-speech one). The tensor can be obtained from apply_threshold.
output_value: 'seconds' or 'samples'
When the option 'seconds' is set, the returned boundaries are in
seconds, otherwise, it reports them in samples.
Returns
-------
boundaries: torch.Tensor
Tensor containing the start second (or sample) of speech segments
in even positions and their corresponding end in odd positions
(e.g, [1.0, 1.5, 5,.0 6.0] means that we have two speech segment;
one from 1.0 to 1.5 seconds and another from 5.0 to 6.0 seconds).
"""
# Shifting frame-levels binary decision by 1
# This allows detecting changes in speech/non-speech activities
prob_th_shifted = torch.roll(prob_th, dims=1, shifts=1)
prob_th_shifted[:, 0, :] = 0
prob_th = prob_th + prob_th_shifted
# Needed to first and last time step
prob_th[:, 0, :] = (prob_th[:, 0, :] >= 1).int()
prob_th[:, -1, :] = (prob_th[:, -1, :] >= 1).int()
# Fix edge cases (when a speech starts in the last frames)
if (prob_th == 1).nonzero().shape[0] % 2 == 1:
prob_th = torch.cat(
(prob_th, torch.Tensor([1.0]).unsqueeze(0).unsqueeze(2)), dim=1
)
# Where prob_th is 1 there is a change
indexes = (prob_th == 1).nonzero()[:, 1].reshape(-1, 2)
# Remove 1 from end samples
indexes[:, -1] = indexes[:, -1] - 1
# From indexes to samples
seconds = (indexes * self.time_resolution).float()
samples = (self.sample_rate * seconds).round().int()
if output_value == "seconds":
boundaries = seconds
else:
boundaries = samples
return boundaries
def merge_close_segments(self, boundaries, close_th=0.250):
"""Merges segments that are shorter than the given threshold.
Arguments
---------
boundaries : str
Tensor containing the speech boundaries. It can be derived using the
get_boundaries method.
close_th: float
If the distance between boundaries is smaller than close_th, the
segments will be merged.
Returns
-------
new_boundaries
The new boundaries with the merged segments.
"""
new_boundaries = []
# Single segment case
if boundaries.shape[0] == 0:
return boundaries
# Getting beg and end of previous segment
prev_beg_seg = boundaries[0, 0].float()
prev_end_seg = boundaries[0, 1].float()
# Process all the segments
for i in range(1, boundaries.shape[0]):
beg_seg = boundaries[i, 0]
segment_distance = beg_seg - prev_end_seg
# Merging close segments
if segment_distance <= close_th:
prev_end_seg = boundaries[i, 1]
else:
# Appending new segments
new_boundaries.append([prev_beg_seg, prev_end_seg])
prev_beg_seg = beg_seg
prev_end_seg = boundaries[i, 1]
new_boundaries.append([prev_beg_seg, prev_end_seg])
new_boundaries = torch.FloatTensor(new_boundaries).to(boundaries.device)
return new_boundaries
def remove_short_segments(self, boundaries, len_th=0.250):
"""Removes segments that are too short.
Arguments
---------
boundaries : torch.Tensor
Tensor containing the speech boundaries. It can be derived using the
get_boundaries method.
len_th: float
If the length of the segment is smaller than close_th, the segments
will be merged.
Returns
-------
new_boundaries
The new boundaries without the short segments.
"""
new_boundaries = []
# Process the segments
for i in range(boundaries.shape[0]):
# Computing segment length
seg_len = boundaries[i, 1] - boundaries[i, 0]
# Accept segment only if longer than len_th
if seg_len > len_th:
new_boundaries.append([boundaries[i, 0], boundaries[i, 1]])
new_boundaries = torch.FloatTensor(new_boundaries).to(boundaries.device)
return new_boundaries
def save_boundaries(
self, boundaries, save_path=None, print_boundaries=True, audio_file=None
):
"""Saves the boundaries on a file (and/or prints them) in a readable format.
Arguments
---------
boundaries: torch.Tensor
Tensor containing the speech boundaries. It can be derived using the
get_boundaries method.
save_path: path
When to store the text file containing the speech/non-speech intervals.
print_boundaries: Bool
Prints the speech/non-speech intervals in the standard outputs.
audio_file: path
Path of the audio file containing the recording. The file is read
with torchaudio. It is used here to detect the length of the
signal.
"""
# Create a new file if needed
if save_path is not None:
f = open(save_path, mode="w", encoding="utf-8")
# Getting the total size of the input file
if audio_file is not None:
sample_rate, audio_len = self._get_audio_info(audio_file)
audio_len = audio_len / sample_rate
# Setting the rights format for second- or sample-based boundaries
if boundaries.dtype == torch.int:
value_format = "% i"
else:
value_format = "% .2f "
# Printing speech and non-speech intervals
last_end = 0
cnt_seg = 0
for i in range(boundaries.shape[0]):
begin_value = boundaries[i, 0]
end_value = boundaries[i, 1]
if last_end != begin_value:
cnt_seg = cnt_seg + 1
print_str = (
"segment_%03d " + value_format + value_format + "NON_SPEECH"
)
if print_boundaries:
print(print_str % (cnt_seg, last_end, begin_value))
if save_path is not None:
f.write(print_str % (cnt_seg, last_end, begin_value) + "\n")
cnt_seg = cnt_seg + 1
print_str = "segment_%03d " + value_format + value_format + "SPEECH"
if print_boundaries:
print(print_str % (cnt_seg, begin_value, end_value))
if save_path is not None:
f.write(print_str % (cnt_seg, begin_value, end_value) + "\n")
last_end = end_value
# Managing last segment
if audio_file is not None:
if last_end < audio_len:
cnt_seg = cnt_seg + 1
print_str = (
"segment_%03d " + value_format + value_format + "NON_SPEECH"
)
if print_boundaries:
print(print_str % (cnt_seg, end_value, audio_len))
if save_path is not None:
f.write(print_str % (cnt_seg, end_value, audio_len) + "\n")
if save_path is not None:
f.close()
def energy_VAD(
self,
audio_file,
boundaries,
activation_th=0.5,
deactivation_th=0.0,
eps=1e-6,
):
"""Applies energy-based VAD within the detected speech segments.The neural
network VAD often creates longer segments and tends to merge segments that
are close with each other.
The energy VAD post-processes can be useful for having a fine-grained voice
activity detection.
The energy VAD computes the energy within the small chunks. The energy is
normalized within the segment to have mean 0.5 and +-0.5 of std.
This helps to set the energy threshold.
Arguments
---------
audio_file: path
Path of the audio file containing the recording. The file is read
with torchaudio.
boundaries : torch.Tensor
Tensor containing the speech boundaries. It can be derived using the
get_boundaries method.
activation_th: float
A new speech segment is started it the energy is above activation_th.
deactivation_th: float
The segment is considered ended when the energy is <= deactivation_th.
eps: float
Small constant for numerical stability.
Returns
-------
new_boundaries
The new boundaries that are post-processed by the energy VAD.
"""
# Getting the total size of the input file
sample_rate, audio_len = self._get_audio_info(audio_file)
if sample_rate != self.sample_rate:
raise ValueError(
"The detected sample rate is different from that set in the hparam file"
)
# Computing the chunk length of the energy window
chunk_len = int(self.time_resolution * sample_rate)
new_boundaries = []
# Processing speech segments
for i in range(boundaries.shape[0]):
begin_sample = int(boundaries[i, 0] * sample_rate)
end_sample = int(boundaries[i, 1] * sample_rate)
seg_len = end_sample - begin_sample
# Reading the speech segment
segment, _ = torchaudio.load(
audio_file, frame_offset=begin_sample, num_frames=seg_len
)
# Create chunks
segment_chunks = self.create_chunks(
segment, chunk_size=chunk_len, chunk_stride=chunk_len
)
# Energy computation within each chunk
energy_chunks = segment_chunks.abs().sum(-1) + eps
energy_chunks = energy_chunks.log()
# Energy normalization
energy_chunks = (
(energy_chunks - energy_chunks.mean())
/ (2 * energy_chunks.std())
) + 0.5
energy_chunks = energy_chunks.unsqueeze(0).unsqueeze(2)
# Apply threshold based on the energy value
energy_vad = self.apply_threshold(
energy_chunks,
activation_th=activation_th,
deactivation_th=deactivation_th,
)
# Get the boundaries
energy_boundaries = self.get_boundaries(
energy_vad, output_value="seconds"
)
# Get the final boundaries in the original signal
for j in range(energy_boundaries.shape[0]):
start_en = boundaries[i, 0] + energy_boundaries[j, 0]
end_end = boundaries[i, 0] + energy_boundaries[j, 1]
new_boundaries.append([start_en, end_end])
# Convert boundaries to tensor
new_boundaries = torch.FloatTensor(new_boundaries).to(boundaries.device)
return new_boundaries
def create_chunks(self, x, chunk_size=16384, chunk_stride=16384):
"""Splits the input into smaller chunks of size chunk_size with
an overlap chunk_stride. The chunks are concatenated over
the batch axis.
Arguments
---------
x: torch.Tensor
Signal to split into chunks.
chunk_size : str
The size of each chunk.
chunk_stride:
The stride (hop) of each chunk.
Returns
-------
x: torch.Tensor
A new tensors with the chunks derived from the input signal.
"""
x = x.unfold(1, chunk_size, chunk_stride)
x = x.reshape(x.shape[0] * x.shape[1], -1)
return x
def _get_audio_info(self, audio_file):
"""Returns the sample rate and the length of the input audio file"""
# Getting the total size of the input file
metadata = torchaudio.info(audio_file)
sample_rate = metadata.sample_rate
audio_len = metadata.num_frames
return sample_rate, audio_len
def upsample_VAD(self, vad_out, audio_file, time_resolution=0.01):
"""Upsamples the output of the vad to help visualization. It creates a
signal that is 1 when there is speech and 0 when there is no speech.
The vad signal has the same resolution as the input one and can be
opened with it (e.g, using audacity) to visually figure out VAD regions.
Arguments
---------
vad_out: torch.Tensor
Tensor containing 1 for each frame of speech and 0 for each non-speech
frame.
audio_file: path
The original audio file used to compute vad_out
time_resolution : float
Time resolution of the vad_out signal.
Returns
-------
vad_signal
The upsampled version of the vad_out tensor.
"""
# Getting the total size of the input file
sample_rate, sig_len = self._get_audio_info(audio_file)
if sample_rate != self.sample_rate:
raise ValueError(
"The detected sample rate is different from that set in the hparam file"
)
beg_samp = 0
step_size = int(time_resolution * sample_rate)
end_samp = step_size
index = 0
# Initialize upsampled signal
vad_signal = torch.zeros(1, sig_len, device=vad_out.device)
# Upsample signal
while end_samp < sig_len:
vad_signal[0, beg_samp:end_samp] = vad_out[0, index, 0]
index = index + 1
beg_samp = beg_samp + step_size
end_samp = beg_samp + step_size
return vad_signal
def upsample_boundaries(self, boundaries, audio_file):
"""Based on the input boundaries, this method creates a signal that is 1
when there is speech and 0 when there is no speech.
The vad signal has the same resolution as the input one and can be
opened with it (e.g, using audacity) to visually figure out VAD regions.
Arguments
---------
boundaries: torch.Tensor
Tensor containing the boundaries of the speech segments.
audio_file: path
The original audio file used to compute vad_out
Returns
-------
vad_signal
The output vad signal with the same resolution of the input one.
"""
# Getting the total size of the input file
sample_rate, sig_len = self._get_audio_info(audio_file)
if sample_rate != self.sample_rate:
raise ValueError(
"The detected sample rate is different from that set in the hparam file"
)
# Initialization of the output signal
vad_signal = torch.zeros(1, sig_len, device=boundaries.device)
# Composing the vad signal from boundaries
for i in range(boundaries.shape[0]):
beg_sample = int(boundaries[i, 0] * sample_rate)
end_sample = int(boundaries[i, 1] * sample_rate)
vad_signal[0, beg_sample:end_sample] = 1.0
return vad_signal
def double_check_speech_segments(
self, boundaries, audio_file, speech_th=0.5
):
"""Takes in input the boundaries of the detected speech segments and
double checks (using the neural VAD) that they actually contain speech.
Arguments
---------
boundaries: torch.Tensor
Tensor containing the boundaries of the speech segments.
audio_file: path
The original audio file used to compute vad_out.
speech_th: float
Threshold on the mean posterior probability over which speech is
confirmed. Below that threshold, the segment is re-assigned to a
non-speech region.
Returns
-------
new_boundaries
The boundaries of the segments where speech activity is confirmed.
"""
# Getting the total size of the input file
sample_rate, sig_len = self._get_audio_info(audio_file)
# Double check the segments
new_boundaries = []
for i in range(boundaries.shape[0]):
beg_sample = int(boundaries[i, 0] * sample_rate)
end_sample = int(boundaries[i, 1] * sample_rate)
len_seg = end_sample - beg_sample
# Read the candidate speech segment
segment, fs = torchaudio.load(
audio_file, frame_offset=beg_sample, num_frames=len_seg
)
speech_prob = self.get_speech_prob_chunk(segment)
if speech_prob.mean() > speech_th:
# Accept this as a speech segment
new_boundaries.append([boundaries[i, 0], boundaries[i, 1]])
# Convert boundaries from list to tensor
new_boundaries = torch.FloatTensor(new_boundaries).to(boundaries.device)
return new_boundaries
def get_segments(
self, boundaries, audio_file, before_margin=0.1, after_margin=0.1
):
"""Returns a list containing all the detected speech segments.
Arguments
---------
boundaries: torch.Tensor
Tensor containing the boundaries of the speech segments.
audio_file: path
The original audio file used to compute vad_out.
before_margin: float
Used to cut the segments samples a bit before the detected margin.
after_margin: float
Use to cut the segments samples a bit after the detected margin.
Returns
-------
segments: list
List containing the detected speech segments
"""
sample_rate, sig_len = self._get_audio_info(audio_file)
if sample_rate != self.sample_rate:
raise ValueError(
"The detected sample rate is different from that set in the hparam file"
)
segments = []
for i in range(boundaries.shape[0]):
beg_sample = boundaries[i, 0] * sample_rate
end_sample = boundaries[i, 1] * sample_rate
beg_sample = int(max(0, beg_sample - before_margin * sample_rate))
end_sample = int(
min(sig_len, end_sample + after_margin * sample_rate)
)
len_seg = end_sample - beg_sample
vad_segment, fs = torchaudio.load(
audio_file, frame_offset=beg_sample, num_frames=len_seg
)
segments.append(vad_segment)
return segments
def get_speech_segments(
self,
audio_file,
large_chunk_size=30,
small_chunk_size=10,
overlap_small_chunk=False,
apply_energy_VAD=False,
double_check=True,
close_th=0.250,
len_th=0.250,
activation_th=0.5,
deactivation_th=0.25,
en_activation_th=0.5,
en_deactivation_th=0.0,
speech_th=0.50,
):
"""Detects speech segments within the input file. The input signal can
be both a short or a long recording. The function computes the
posterior probabilities on large chunks (e.g, 30 sec), that are read
sequentially (to avoid storing big signals in memory).
Each large chunk is, in turn, split into smaller chunks (e.g, 10 seconds)
that are processed in parallel. The pipeline for detecting the speech
segments is the following:
1- Compute posteriors probabilities at the frame level.
2- Apply a threshold on the posterior probability.
3- Derive candidate speech segments on top of that.
4- Apply energy VAD within each candidate segment (optional).
5- Merge segments that are too close.
6- Remove segments that are too short.
7- Double check speech segments (optional).
Arguments
---------
audio_file : str
Path to audio file.
large_chunk_size: float
Size (in seconds) of the large chunks that are read sequentially
from the input audio file.
small_chunk_size: float
Size (in seconds) of the small chunks extracted from the large ones.
The audio signal is processed in parallel within the small chunks.
Note that large_chunk_size/small_chunk_size must be an integer.
overlap_small_chunk: bool
If True, it creates overlapped small chunks (with 50% overlap).
The probabilities of the overlapped chunks are combined using
hamming windows.
apply_energy_VAD: bool
If True, a energy-based VAD is used on the detected speech segments.
The neural network VAD often creates longer segments and tends to
merge close segments together. The energy VAD post-processes can be
useful for having a fine-grained voice activity detection.
The energy thresholds is managed by activation_th and
deactivation_th (see below).
double_check: bool
If True, double checks (using the neural VAD) that the candidate
speech segments actually contain speech. A threshold on the mean
posterior probabilities provided by the neural network is applied
based on the speech_th parameter (see below).
activation_th: float
Threshold of the neural posteriors above which starting a speech segment.
deactivation_th: float
Threshold of the neural posteriors below which ending a speech segment.
en_activation_th: float
A new speech segment is started it the energy is above activation_th.
This is active only if apply_energy_VAD is True.
en_deactivation_th: float
The segment is considered ended when the energy is <= deactivation_th.
This is active only if apply_energy_VAD is True.
speech_th: float
Threshold on the mean posterior probability within the candidate
speech segment. Below that threshold, the segment is re-assigned to
a non-speech region. This is active only if double_check is True.
close_th: float
If the distance between boundaries is smaller than close_th, the
segments will be merged.
len_th: float
If the length of the segment is smaller than close_th, the segments
will be merged.
Returns
-------
boundaries: torch.Tensor
Tensor containing the start second of speech segments in even
positions and their corresponding end in odd positions
(e.g, [1.0, 1.5, 5,.0 6.0] means that we have two speech segment;
one from 1.0 to 1.5 seconds and another from 5.0 to 6.0 seconds).
"""
# Fetch audio file from web if not local
source, fl = split_path(audio_file)
audio_file = fetch(fl, source=source)
# Computing speech vs non speech probabilities
prob_chunks = self.get_speech_prob_file(
audio_file,
large_chunk_size=large_chunk_size,
small_chunk_size=small_chunk_size,
overlap_small_chunk=overlap_small_chunk,
)
# Apply a threshold to get candidate speech segments
prob_th = self.apply_threshold(
prob_chunks,
activation_th=activation_th,
deactivation_th=deactivation_th,
).float()
# Compute the boundaries of the speech segments
boundaries = self.get_boundaries(prob_th, output_value="seconds")
# Apply energy-based VAD on the detected speech segments
if apply_energy_VAD:
boundaries = self.energy_VAD(
audio_file,
boundaries,
activation_th=en_activation_th,
deactivation_th=en_deactivation_th,
)
# Merge short segments
boundaries = self.merge_close_segments(boundaries, close_th=close_th)
# Remove short segments
boundaries = self.remove_short_segments(boundaries, len_th=len_th)
# Double check speech segments
if double_check:
boundaries = self.double_check_speech_segments(
boundaries, audio_file, speech_th=speech_th
)
return boundaries
def forward(self, wavs, wav_lens=None):
"""Gets frame-level speech-activity predictions"""
return self.get_speech_prob_chunk(wavs, wav_lens)
class SepformerSeparation(Pretrained):
"""A "ready-to-use" speech separation model.
Uses Sepformer architecture.
Example
-------
>>> tmpdir = getfixture("tmpdir")
>>> model = SepformerSeparation.from_hparams(
... source="speechbrain/sepformer-wsj02mix",
... savedir=tmpdir)
>>> mix = torch.randn(1, 400)
>>> est_sources = model.separate_batch(mix)
>>> print(est_sources.shape)
torch.Size([1, 400, 2])
"""
MODULES_NEEDED = ["encoder", "masknet", "decoder"]
def separate_batch(self, mix):
"""Run source separation on batch of audio.
Arguments
---------
mix : torch.Tensor
The mixture of sources.
Returns
-------
tensor
Separated sources
"""
# Separation
mix = mix.to(self.device)
mix_w = self.mods.encoder(mix)
est_mask = self.mods.masknet(mix_w)
mix_w = torch.stack([mix_w] * self.hparams.num_spks)
sep_h = mix_w * est_mask
# Decoding
est_source = torch.cat(
[
self.mods.decoder(sep_h[i]).unsqueeze(-1)
for i in range(self.hparams.num_spks)
],
dim=-1,
)
# T changed after conv1d in encoder, fix it here
T_origin = mix.size(1)
T_est = est_source.size(1)
if T_origin > T_est:
est_source = F.pad(est_source, (0, 0, 0, T_origin - T_est))
else:
est_source = est_source[:, :T_origin, :]
return est_source
def separate_file(self, path, savedir="."):
"""Separate sources from file.
Arguments
---------
path : str
Path to file which has a mixture of sources. It can be a local
path, a web url, or a huggingface repo.
savedir : path
Path where to store the wav signals (when downloaded from the web).
Returns
-------
tensor
Separated sources
"""
source, fl = split_path(path)
path = fetch(fl, source=source, savedir=savedir)
batch, fs_file = torchaudio.load(path)
batch = batch.to(self.device)
fs_model = self.hparams.sample_rate
# resample the data if needed
if fs_file != fs_model:
print(
"Resampling the audio from {} Hz to {} Hz".format(
fs_file, fs_model
)
)
tf = torchaudio.transforms.Resample(
orig_freq=fs_file, new_freq=fs_model
).to(self.device)
batch = batch.mean(dim=0, keepdim=True)
batch = tf(batch)
est_sources = self.separate_batch(batch)
est_sources = (
est_sources / est_sources.abs().max(dim=1, keepdim=True)[0]
)
return est_sources
def forward(self, mix):
"""Runs separation on the input mix"""
return self.separate_batch(mix)
class SpectralMaskEnhancement(Pretrained):
"""A ready-to-use model for speech enhancement.
Arguments
---------
See ``Pretrained``.
Example
-------
>>> import torch
>>> from speechbrain.pretrained import SpectralMaskEnhancement
>>> # Model is downloaded from the speechbrain HuggingFace repo
>>> tmpdir = getfixture("tmpdir")
>>> enhancer = SpectralMaskEnhancement.from_hparams(
... source="speechbrain/metricgan-plus-voicebank",
... savedir=tmpdir,
... )
>>> enhanced = enhancer.enhance_file(
... "speechbrain/metricgan-plus-voicebank/example.wav"
... )
"""
HPARAMS_NEEDED = ["compute_stft", "spectral_magnitude", "resynth"]
MODULES_NEEDED = ["enhance_model"]
def compute_features(self, wavs):
"""Compute the log spectral magnitude features for masking.
Arguments
---------
wavs : torch.Tensor
A batch of waveforms to convert to log spectral mags.
"""
feats = self.hparams.compute_stft(wavs)
feats = self.hparams.spectral_magnitude(feats)
return torch.log1p(feats)
def enhance_batch(self, noisy, lengths=None):
"""Enhance a batch of noisy waveforms.
Arguments
---------
noisy : torch.Tensor
A batch of waveforms to perform enhancement on.
lengths : torch.Tensor
The lengths of the waveforms if the enhancement model handles them.
Returns
-------
torch.Tensor
A batch of enhanced waveforms of the same shape as input.
"""
noisy = noisy.to(self.device)
noisy_features = self.compute_features(noisy)
# Perform masking-based enhancement, multiplying output with input.
if lengths is not None:
mask = self.mods.enhance_model(noisy_features, lengths=lengths)
else:
mask = self.mods.enhance_model(noisy_features)
enhanced = torch.mul(mask, noisy_features)
# Return resynthesized waveforms
return self.hparams.resynth(torch.expm1(enhanced), noisy)
def enhance_file(self, filename, output_filename=None):
"""Enhance a wav file.
Arguments
---------
filename : str
Location on disk to load file for enhancement.
output_filename : str
If provided, writes enhanced data to this file.
"""
noisy = self.load_audio(filename)
noisy = noisy.to(self.device)
# Fake a batch:
batch = noisy.unsqueeze(0)
if lengths_arg_exists(self.enhance_batch):
enhanced = self.enhance_batch(batch, lengths=torch.tensor([1.0]))
else:
enhanced = self.enhance_batch(batch)
if output_filename is not None:
torchaudio.save(output_filename, enhanced, channels_first=False)
return enhanced.squeeze(0)
class EncodeDecodePipelineMixin:
"""
A mixin for pretrained models that makes it possible to specify an encoding pipeline and a decoding pipeline
"""
def create_pipelines(self):
"""
Initializes the encode and decode pipeline
"""
self._run_init_steps(self.hparams.encode_pipeline)
self._run_init_steps(self.hparams.decode_pipeline)
self.encode_pipeline = DataPipeline(
static_data_keys=self.INPUT_STATIC_KEYS,
dynamic_items=self.hparams.encode_pipeline["steps"],
output_keys=self.hparams.encode_pipeline["output_keys"],
)
self.decode_pipeline = DataPipeline(
static_data_keys=self.hparams.model_output_keys,
dynamic_items=self.hparams.decode_pipeline["steps"],
output_keys=self.OUTPUT_KEYS,
)
def _run_init_steps(self, pipeline_definition):
"""Encode/decode pipelines may include initialization
steps, such as filling text encoders with tokens. Calling
this method will run them, if defined"""
steps = pipeline_definition.get("init", [])
for step in steps:
step_func = step.get("func")
if not step_func or not callable(step_func):
raise ValueError("Invalid pipeline init definition")
step_func()
def _run_pipeline(self, pipeline, input, batch):
if batch:
output = pipeline(input)
else:
output = [pipeline(item) for item in input]
return output
def _get_encode_pipeline_input(self, input):
return input if self.batch_inputs else self._itemize(input)
def _get_decode_pipeline_input(self, model_output):
model_output_keys = getattr(self.hparams, "model_output_keys", None)
pipeline_input = model_output
if len(model_output_keys) == 1:
pipeline_input = (pipeline_input,)
# The input to a pipeline is a dictionary. If model_output_keys
# is provided, the output of the model is assumed to be a collection
# (e.g. a list or a tuple).
if model_output_keys:
pipeline_input = dict(zip(model_output_keys, pipeline_input))
# By default, the pipeline will be applied to in batch mode
# to the entire model input
if not self.batch_outputs:
pipeline_input = self._itemize(pipeline_input)
return pipeline_input
def _itemize(self, pipeline_input):
first_item = next(iter(pipeline_input.values()))
keys, values = pipeline_input.keys(), pipeline_input.values()
batch_length = len(first_item)
return [
dict(zip(keys, [value[idx] for value in values]))
for idx in range(batch_length)
]
def to_dict(self, data):
"""
Converts padded batches to dictionaries, leaves
other data types as is
Arguments
---------
data: object
a dictionary or a padded batch
Returns
-------
results: dict
the dictionary
"""
if isinstance(data, PaddedBatch):
data = {
key: self._get_value(data, key)
for key in self.hparams.encode_pipeline["output_keys"]
}
return data
def _get_value(self, data, key):
"""
Retrieves the value associated with the specified key, dereferencing
.data where applicable
Arguments
---------
data: PaddedBatch
a padded batch
key: str
the key
Returns
-------
result: object
the result
"""
value = getattr(data, key)
if not self.input_use_padded_data and isinstance(value, PaddedData):
value = value.data
return value
@property
def batch_inputs(self):
"""
Determines whether the input pipeline
operates on batches or individual examples
(true means batched)
Returns
-------
batch_inputs: bool
"""
return self.hparams.encode_pipeline.get("batch", True)
@property
def input_use_padded_data(self):
"""
If turned on, raw PaddedData instances will be passed to
the model. If turned off, only .data will be used
Returns
-------
result: bool
whether padded data is used as is
"""
return self.hparams.encode_pipeline.get("use_padded_data", False)
@property
def batch_outputs(self):
"""
Determines whether the output pipeline
operates on batches or individual examples
(true means batched)
Returns
-------
batch_outputs: bool
"""
return self.hparams.decode_pipeline.get("batch", True)
def _collate(self, data):
if not self.batch_inputs:
collate_fn = getattr(self.hparams, "collate_fn", PaddedBatch)
data = collate_fn(data)
return data
def encode_input(self, input):
"""
Encodes the inputs using the pipeline
Arguments
---------
input: dict
the raw inputs
Returns
-------
results: object
"""
pipeline_input = self._get_encode_pipeline_input(input)
model_input = self._run_pipeline(
pipeline=self.encode_pipeline,
input=pipeline_input,
batch=self.batch_inputs,
)
model_input = self._collate(model_input)
if hasattr(model_input, "to"):
model_input = model_input.to(self.device)
return self.to_dict(model_input)
def decode_output(self, output):
"""
Decodes the raw model outputs
Arguments
---------
output: tuple
raw model outputs
Returns
-------
result: dict or list
the output of the pipeline
"""
pipeline_input = self._get_decode_pipeline_input(output)
return self._run_pipeline(
pipeline=self.decode_pipeline,
input=pipeline_input,
batch=self.batch_outputs,
)
class GraphemeToPhoneme(Pretrained, EncodeDecodePipelineMixin):
"""
A pretrained model implementation for Grapheme-to-Phoneme (G2P) models
that take raw natural language text as an input and
Example
-------
>>> text = ("English is tough. It can be understood "
... "through thorough thought though")
>>> from speechbrain.pretrained import GraphemeToPhoneme
>>> tmpdir = getfixture('tmpdir')
>>> g2p = GraphemeToPhoneme.from_hparams('path/to/model', savedir=tmpdir) # doctest: +SKIP
>>> phonemes = g2p.g2p(text) # doctest: +SKIP
"""
INPUT_STATIC_KEYS = ["txt"]
OUTPUT_KEYS = ["phonemes"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.create_pipelines()
self.load_dependencies()
@property
def phonemes(self):
"""Returns the available phonemes"""
return self.hparams.phonemes
@property
def language(self):
"""Returns the language for which this model is available"""
return self.hparams.language
def g2p(self, text):
"""Performs the Grapheme-to-Phoneme conversion
Arguments
---------
text: str or list[str]
a single string to be encoded to phonemes - or a
sequence of strings
Returns
-------
result: list
if a single example was provided, the return value is a
single list of phonemes
"""
single = isinstance(text, str)
if single:
text = [text]
model_inputs = self.encode_input({"txt": text})
self._update_graphemes(model_inputs)
model_outputs = self.mods.model(**model_inputs)
decoded_output = self.decode_output(model_outputs)
phonemes = decoded_output["phonemes"]
if single:
phonemes = phonemes[0]
return phonemes
def _update_graphemes(self, model_inputs):
grapheme_sequence_mode = getattr(self.hparams, "grapheme_sequence_mode")
if grapheme_sequence_mode and grapheme_sequence_mode != "raw":
grapheme_encoded_key = f"grapheme_encoded_{grapheme_sequence_mode}"
if grapheme_encoded_key in model_inputs:
model_inputs["grapheme_encoded"] = model_inputs[
grapheme_encoded_key
]
def load_dependencies(self):
"""Loads any relevant model dependencies"""
deps_pretrainer = getattr(self.hparams, "deps_pretrainer", None)
if deps_pretrainer:
deps_pretrainer.collect_files()
deps_pretrainer.load_collected(device=self.device)
def __call__(self, text):
"""A convenience callable wrapper - same as G2P
Arguments
---------
text: str or list[str]
a single string to be encoded to phonemes - or a
sequence of strings
Returns
-------
result: list
if a single example was provided, the return value is a
single list of phonemes
"""
return self.g2p(text)
def forward(self, noisy, lengths=None):
"""Runs enhancement on the noisy input"""
return self.enhance_batch(noisy, lengths)
class WaveformEnhancement(Pretrained):
"""A ready-to-use model for speech enhancement.
Arguments
---------
See ``Pretrained``.
Example
-------
>>> from speechbrain.pretrained import WaveformEnhancement
>>> # Model is downloaded from the speechbrain HuggingFace repo
>>> tmpdir = getfixture("tmpdir")
>>> enhancer = WaveformEnhancement.from_hparams(
... source="speechbrain/mtl-mimic-voicebank",
... savedir=tmpdir,
... )
>>> enhanced = enhancer.enhance_file(
... "speechbrain/mtl-mimic-voicebank/example.wav"
... )
"""
MODULES_NEEDED = ["enhance_model"]
def enhance_batch(self, noisy, lengths=None):
"""Enhance a batch of noisy waveforms.
Arguments
---------
noisy : torch.Tensor
A batch of waveforms to perform enhancement on.
lengths : torch.Tensor
The lengths of the waveforms if the enhancement model handles them.
Returns
-------
torch.Tensor
A batch of enhanced waveforms of the same shape as input.
"""
noisy = noisy.to(self.device)
enhanced_wav, _ = self.mods.enhance_model(noisy)
return enhanced_wav
def enhance_file(self, filename, output_filename=None):
"""Enhance a wav file.
Arguments
---------
filename : str
Location on disk to load file for enhancement.
output_filename : str
If provided, writes enhanced data to this file.
"""
noisy = self.load_audio(filename)
# Fake a batch:
batch = noisy.unsqueeze(0)
enhanced = self.enhance_batch(batch)
if output_filename is not None:
torchaudio.save(output_filename, enhanced, channels_first=False)
return enhanced.squeeze(0)
def forward(self, noisy, lengths=None):
"""Runs enhancement on the noisy input"""
return self.enhance_batch(noisy, lengths)
class SNREstimator(Pretrained):
"""A "ready-to-use" SNR estimator.
"""
MODULES_NEEDED = ["encoder", "encoder_out"]
HPARAMS_NEEDED = ["stat_pooling", "snrmax", "snrmin"]
def estimate_batch(self, mix, predictions):
"""Run SI-SNR estimation on the estimated sources, and mixture.
Arguments
---------
mix : torch.Tensor
The mixture of sources of shape B X T
predictions : torch.Tensor
of size (B x T x C),
where B is batch size
T is number of time points
C is number of sources
Returns
-------
tensor
Estimate of SNR
"""
predictions = predictions.permute(0, 2, 1)
predictions = predictions.reshape(-1, predictions.size(-1))
if hasattr(self.hparams, "separation_norm_type"):
if self.hparams.separation_norm_type == "max":
predictions = (
predictions / predictions.max(dim=1, keepdim=True)[0]
)
mix = mix / mix.max(dim=1, keepdim=True)[0]
elif self.hparams.separation_norm_type == "stnorm":
predictions = (
predictions - predictions.mean(dim=1, keepdim=True)
) / predictions.std(dim=1, keepdim=True)
mix = (mix - mix.mean(dim=1, keepdim=True)) / mix.std(
dim=1, keepdim=True
)
min_T = min(predictions.shape[1], mix.shape[1])
assert predictions.shape[1] == mix.shape[1], "lengths change"
mix_repeat = mix.repeat(2, 1)
inp_cat = torch.cat(
[
predictions[:, :min_T].unsqueeze(1),
mix_repeat[:, :min_T].unsqueeze(1),
],
dim=1,
)
enc = self.mods.encoder(inp_cat)
enc = enc.permute(0, 2, 1)
enc_stats = self.hparams.stat_pooling(enc)
# this gets the SI-SNR estimate in the compressed range 0-1
snrhat = self.mods.encoder_out(enc_stats).squeeze()
# get the SI-SNR estimate in the true range
snrhat = self.gettrue_snrrange(snrhat)
return snrhat
def forward(self, mix, predictions):
"""Just run the batch estimate"""
return self.estimate_batch(mix, predictions)
def gettrue_snrrange(self, inp):
"""Convert from 0-1 range to true snr range"""
rnge = self.hparams.snrmax - self.hparams.snrmin
inp = inp * rnge
inp = inp + self.hparams.snrmin
return inp
class Tacotron2(Pretrained):
"""
A ready-to-use wrapper for Tacotron2 (text -> mel_spec).
Arguments
---------
hparams
Hyperparameters (from HyperPyYAML)
Example
-------
>>> tmpdir_vocoder = getfixture('tmpdir') / "vocoder"
>>> tacotron2 = Tacotron2.from_hparams(source="speechbrain/tts-tacotron2-ljspeech", savedir=tmpdir_vocoder)
>>> mel_output, mel_length, alignment = tacotron2.encode_text("Mary had a little lamb")
>>> items = [
... "A quick brown fox jumped over the lazy dog",
... "How much wood would a woodchuck chuck?",
... "Never odd or even"
... ]
>>> mel_outputs, mel_lengths, alignments = tacotron2.encode_batch(items)
>>> # One can combine the TTS model with a vocoder (that generates the final waveform)
>>> # Initialize the Vocoder (HiFIGAN)
>>> tmpdir_tts = getfixture('tmpdir') / "tts"
>>> hifi_gan = HIFIGAN.from_hparams(source="speechbrain/tts-hifigan-ljspeech", savedir=tmpdir_tts)
>>> # Running the TTS
>>> mel_output, mel_length, alignment = tacotron2.encode_text("Mary had a little lamb")
>>> # Running Vocoder (spectrogram-to-waveform)
>>> waveforms = hifi_gan.decode_batch(mel_output)
"""
HPARAMS_NEEDED = ["model", "text_to_sequence"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.text_cleaners = getattr(
self.hparams, "text_cleaners", ["english_cleaners"]
)
self.infer = self.hparams.model.infer
def text_to_seq(self, txt):
"""Encodes raw text into a tensor with a customer text-to-sequence function
"""
sequence = self.hparams.text_to_sequence(txt, self.text_cleaners)
return sequence, len(sequence)
def encode_batch(self, texts):
"""Computes mel-spectrogram for a list of texts
Texts must be sorted in decreasing order on their lengths
Arguments
---------
texts: List[str]
texts to be encoded into spectrogram
Returns
-------
tensors of output spectrograms, output lengths and alignments
"""
with torch.no_grad():
inputs = [
{
"text_sequences": torch.tensor(
self.text_to_seq(item)[0], device=self.device
)
}
for item in texts
]
inputs = speechbrain.dataio.batch.PaddedBatch(inputs)
lens = [self.text_to_seq(item)[1] for item in texts]
assert lens == sorted(
lens, reverse=True
), "input lengths must be sorted in decreasing order"
input_lengths = torch.tensor(lens, device=self.device)
mel_outputs_postnet, mel_lengths, alignments = self.infer(
inputs.text_sequences.data, input_lengths
)
return mel_outputs_postnet, mel_lengths, alignments
def encode_text(self, text):
"""Runs inference for a single text str"""
return self.encode_batch([text])
def forward(self, texts):
"Encodes the input texts."
return self.encode_batch(texts)
class HIFIGAN(Pretrained):
"""
A ready-to-use wrapper for HiFiGAN (mel_spec -> waveform).
Arguments
---------
hparams
Hyperparameters (from HyperPyYAML)
Example
-------
>>> tmpdir_vocoder = getfixture('tmpdir') / "vocoder"
>>> hifi_gan = HIFIGAN.from_hparams(source="speechbrain/tts-hifigan-ljspeech", savedir=tmpdir_vocoder)
>>> mel_specs = torch.rand(2, 80,298)
>>> waveforms = hifi_gan.decode_batch(mel_specs)
>>> # You can use the vocoder coupled with a TTS system
>>> # Initialize TTS (tacotron2)
>>> tmpdir_tts = getfixture('tmpdir') / "tts"
>>> tacotron2 = Tacotron2.from_hparams(source="speechbrain/tts-tacotron2-ljspeech", savedir=tmpdir_tts)
>>> # Running the TTS
>>> mel_output, mel_length, alignment = tacotron2.encode_text("Mary had a little lamb")
>>> # Running Vocoder (spectrogram-to-waveform)
>>> waveforms = hifi_gan.decode_batch(mel_output)
"""
HPARAMS_NEEDED = ["generator"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.infer = self.hparams.generator.inference
self.first_call = True
def decode_batch(self, spectrogram):
"""Computes waveforms from a batch of mel-spectrograms
Arguments
---------
spectrogram: torch.Tensor
Batch of mel-spectrograms [batch, mels, time]
Returns
-------
waveforms: torch.Tensor
Batch of mel-waveforms [batch, 1, time]
"""
# Prepare for inference by removing the weight norm
if self.first_call:
self.hparams.generator.remove_weight_norm()
self.first_call = False
with torch.no_grad():
waveform = self.infer(spectrogram.to(self.device))
return waveform
def decode_spectrogram(self, spectrogram):
"""Computes waveforms from a single mel-spectrogram
Arguments
---------
spectrogram: torch.Tensor
mel-spectrogram [mels, time]
Returns
-------
waveform: torch.Tensor
waveform [1, time]
audio can be saved by:
>>> waveform = torch.rand(1, 666666)
>>> sample_rate = 22050
>>> torchaudio.save(str(getfixture('tmpdir') / "test.wav"), waveform, sample_rate)
"""
if self.first_call:
self.hparams.generator.remove_weight_norm()
self.first_call = False
with torch.no_grad():
waveform = self.infer(spectrogram.unsqueeze(0).to(self.device))
return waveform.squeeze(0)
def forward(self, spectrogram):
"Decodes the input spectrograms"
return self.decode_batch(spectrogram)
class WhisperASR(Pretrained):
"""A ready-to-use Whisper ASR model
The class can be used to run the entire encoder-decoder whisper model
(transcribe()) to transcribe speech. The given YAML must contains the fields
specified in the *_NEEDED[] lists.
# Example
# -------
# >>> from speechbrain.pretrained.interfaces import foreign_class
# >>> tmpdir = getfixture("tmpdir")
# >>> asr_model = WhisperASR.from_hparams(source="speechbrain/asr-whisper-large-v2-commonvoice-fr", savedir=tmpdir,)
# >>> asr_model.transcribe_file("tests/samples/example2.wav")
# """
HPARAMS_NEEDED = ["language"]
MODULES_NEEDED = ["whisper", "decoder"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.tokenizer = self.hparams.whisper.tokenizer
self.tokenizer.set_prefix_tokens(
self.hparams.language, "transcribe", False
)
self.hparams.decoder.set_decoder_input_tokens(
self.tokenizer.prefix_tokens
)
def transcribe_file(self, path):
"""Transcribes the given audiofile into a sequence of words.
Arguments
---------
path : str
Path to audio file which to transcribe.
Returns
-------
str
The audiofile transcription produced by this ASR system.
"""
waveform = self.load_audio(path)
# Fake a batch:
batch = waveform.unsqueeze(0)
rel_length = torch.tensor([1.0])
predicted_words, predicted_tokens = self.transcribe_batch(
batch, rel_length
)
return predicted_words
def encode_batch(self, wavs, wav_lens):
"""Encodes the input audio into a sequence of hidden states
The waveforms should already be in the model's desired format.
You can call:
``normalized = EncoderDecoderASR.normalizer(signal, sample_rate)``
to get a correctly converted signal in most cases.
Arguments
---------
wavs : torch.tensor
Batch of waveforms [batch, time, channels].
wav_lens : torch.tensor
Lengths of the waveforms relative to the longest one in the
batch, tensor of shape [batch]. The longest one should have
relative length 1.0 and others len(waveform) / max_length.
Used for ignoring padding.
Returns
-------
torch.tensor
The encoded batch
"""
wavs = wavs.float()
wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device)
encoder_out = self.mods.whisper.forward_encoder(wavs)
return encoder_out
def transcribe_batch(self, wavs, wav_lens):
"""Transcribes the input audio into a sequence of words
The waveforms should already be in the model's desired format.
You can call:
``normalized = EncoderDecoderASR.normalizer(signal, sample_rate)``
to get a correctly converted signal in most cases.
Arguments
---------
wavs : torch.tensor
Batch of waveforms [batch, time, channels].
wav_lens : torch.tensor
Lengths of the waveforms relative to the longest one in the
batch, tensor of shape [batch]. The longest one should have
relative length 1.0 and others len(waveform) / max_length.
Used for ignoring padding.
Returns
-------
list
Each waveform in the batch transcribed.
tensor
Each predicted token id.
"""
with torch.no_grad():
wav_lens = wav_lens.to(self.device)
encoder_out = self.encode_batch(wavs, wav_lens)
predicted_tokens, scores = self.mods.decoder(encoder_out, wav_lens)
predicted_words = self.tokenizer.batch_decode(
predicted_tokens, skip_special_tokens=True
)
if self.hparams.normalized_transcripts:
predicted_words = [
self.tokenizer._normalize(text).split(" ")
for text in predicted_words
]
return predicted_words, predicted_tokens
def forward(self, wavs, wav_lens):
"""Runs full transcription - note: no gradients through decoding"""
return self.transcribe_batch(wavs, wav_lens)
| 107,365 | 34.812542 | 122 | py |
speechbrain | speechbrain-main/speechbrain/dataio/legacy.py | """SpeechBrain Extended CSV Compatibility."""
from speechbrain.dataio.dataset import DynamicItemDataset
import collections
import csv
import pickle
import logging
import torch
import torchaudio
import re
logger = logging.getLogger(__name__)
TORCHAUDIO_FORMATS = ["wav", "flac", "aac", "ogg", "flac", "mp3"]
ITEM_POSTFIX = "_data"
CSVItem = collections.namedtuple("CSVItem", ["data", "format", "opts"])
CSVItem.__doc__ = """The Legacy Extended CSV Data item triplet"""
class ExtendedCSVDataset(DynamicItemDataset):
"""Extended CSV compatibility for DynamicItemDataset.
Uses the SpeechBrain Extended CSV data format, where the CSV must have an
'ID' and 'duration' fields.
The rest of the fields come in triplets:
``<name>, <name>_format, <name>_opts``
These add a <name>_sb_data item in the dict. Additionally, a basic
DynamicItem (see DynamicItemDataset) is created, which loads the _sb_data
item.
Bash-like string replacements with $to_replace are supported.
NOTE
----
Mapping from legacy interface:
- csv_file -> csvpath
- sentence_sorting -> sorting, and "random" is not supported, use e.g.
``make_dataloader(..., shuffle = (sorting=="random"))``
- avoid_if_shorter_than -> min_duration
- avoid_if_longer_than -> max_duration
- csv_read -> output_keys, and if you want IDs add "id" as key
Arguments
---------
csvpath : str, path
Path to extended CSV.
replacements : dict
Used for Bash-like $-prefixed substitution,
e.g. ``{"data_folder": "/home/speechbrain/data"}``, which would
transform `$data_folder/utt1.wav` into `/home/speechbain/data/utt1.wav`
sorting : {"original", "ascending", "descending"}
Keep CSV order, or sort ascending or descending by duration.
min_duration : float, int
Minimum duration in seconds. Discards other entries.
max_duration : float, int
Maximum duration in seconds. Discards other entries.
dynamic_items : list
Configuration for extra dynamic items produced when fetching an
example. List of DynamicItems or dicts with keys::
func: <callable> # To be called
takes: <list> # key or list of keys of args this takes
provides: key # key or list of keys that this provides
NOTE: A dynamic item is automatically added for each CSV data-triplet
output_keys : list, None
The list of output keys to produce. You can refer to the names of the
CSV data-triplets. E.G. if the CSV has: wav,wav_format,wav_opts,
then the Dataset has a dynamic item output available with key ``"wav"``
NOTE: If None, read all existing.
"""
def __init__(
self,
csvpath,
replacements={},
sorting="original",
min_duration=0,
max_duration=36000,
dynamic_items=[],
output_keys=[],
):
if sorting not in ["original", "ascending", "descending"]:
clsname = self.__class__.__name__
raise ValueError(f"{clsname} doesn't support {sorting} sorting")
# Load the CSV, init class
data, di_to_add, data_names = load_sb_extended_csv(
csvpath, replacements
)
super().__init__(data, dynamic_items, output_keys)
self.pipeline.add_dynamic_items(di_to_add)
# Handle filtering, sorting:
reverse = False
sort_key = None
if sorting == "ascending" or "descending":
sort_key = "duration"
if sorting == "descending":
reverse = True
filtered_sorted_ids = self._filtered_sorted_ids(
key_min_value={"duration": min_duration},
key_max_value={"duration": max_duration},
sort_key=sort_key,
reverse=reverse,
)
self.data_ids = filtered_sorted_ids
# Handle None output_keys (differently than Base)
if not output_keys:
self.set_output_keys(data_names)
def load_sb_extended_csv(csv_path, replacements={}):
"""Loads SB Extended CSV and formats string values.
Uses the SpeechBrain Extended CSV data format, where the
CSV must have an 'ID' and 'duration' fields.
The rest of the fields come in triplets:
``<name>, <name>_format, <name>_opts``.
These add a <name>_sb_data item in the dict. Additionally, a
basic DynamicItem (see DynamicItemDataset) is created, which
loads the _sb_data item.
Bash-like string replacements with $to_replace are supported.
This format has its restriction, but they allow some tasks to
have loading specified by the CSV.
Arguments
----------
csv_path : str
Path to the CSV file.
replacements : dict
Optional dict:
e.g. ``{"data_folder": "/home/speechbrain/data"}``
This is used to recursively format all string values in the data.
Returns
-------
dict
CSV data with replacements applied.
list
List of DynamicItems to add in DynamicItemDataset.
"""
with open(csv_path, newline="") as csvfile:
result = {}
reader = csv.DictReader(csvfile, skipinitialspace=True)
variable_finder = re.compile(r"\$([\w.]+)")
if not reader.fieldnames[0] == "ID":
raise KeyError(
"CSV has to have an 'ID' field, with unique ids"
" for all data points"
)
if not reader.fieldnames[1] == "duration":
raise KeyError(
"CSV has to have an 'duration' field, "
"with the length of the data point in seconds."
)
if not len(reader.fieldnames[2:]) % 3 == 0:
raise ValueError(
"All named fields must have 3 entries: "
"<name>, <name>_format, <name>_opts"
)
names = reader.fieldnames[2::3]
for row in reader:
# Make a triplet for each name
data_point = {}
# ID:
data_id = row["ID"]
del row["ID"] # This is used as a key in result, instead.
# Duration:
data_point["duration"] = float(row["duration"])
del row["duration"] # This is handled specially.
if data_id in result:
raise ValueError(f"Duplicate id: {data_id}")
# Replacements:
# Only need to run these in the actual data,
# not in _opts, _format
for key, value in list(row.items())[::3]:
try:
row[key] = variable_finder.sub(
lambda match: replacements[match[1]], value
)
except KeyError:
raise KeyError(
f"The item {value} requires replacements "
"which were not supplied."
)
for i, name in enumerate(names):
triplet = CSVItem(*list(row.values())[i * 3 : i * 3 + 3])
data_point[name + ITEM_POSTFIX] = triplet
result[data_id] = data_point
# Make a DynamicItem for each CSV entry
# _read_csv_item delegates reading to further
dynamic_items_to_add = []
for name in names:
di = {
"func": _read_csv_item,
"takes": name + ITEM_POSTFIX,
"provides": name,
}
dynamic_items_to_add.append(di)
return result, dynamic_items_to_add, names
def _read_csv_item(item):
"""Reads the different formats supported in SB Extended CSV.
Delegates to the relevant functions.
"""
opts = _parse_csv_item_opts(item.opts)
if item.format in TORCHAUDIO_FORMATS:
audio, _ = torchaudio.load(item.data)
return audio.squeeze(0)
elif item.format == "pkl":
return read_pkl(item.data, opts)
elif item.format == "string":
# Just implement string reading here.
# NOTE: No longer supporting
# lab2ind mapping like before.
# Try decoding string
string = item.data
try:
string = string.decode("utf-8")
except AttributeError:
pass
# Splitting elements with ' '
string = string.split(" ")
return string
else:
raise TypeError(f"Don't know how to read {item.format}")
def _parse_csv_item_opts(entry):
"""Parse the _opts field in a SB Extended CSV item."""
# Accepting even slightly weirdly formatted entries:
entry = entry.strip()
if len(entry) == 0:
return {}
opts = {}
for opt in entry.split(" "):
opt_name, opt_val = opt.split(":")
opts[opt_name] = opt_val
return opts
def read_pkl(file, data_options={}, lab2ind=None):
"""This function reads tensors store in pkl format.
Arguments
---------
file : str
The path to file to read.
data_options : dict, optional
A dictionary containing options for the reader.
lab2ind : dict, optional
Mapping from label to integer indices.
Returns
-------
numpy.array
The array containing the read signal.
"""
# Trying to read data
try:
with open(file, "rb") as f:
pkl_element = pickle.load(f)
except pickle.UnpicklingError:
err_msg = "cannot read the pkl file %s" % (file)
raise ValueError(err_msg)
type_ok = False
if isinstance(pkl_element, list):
if isinstance(pkl_element[0], float):
tensor = torch.FloatTensor(pkl_element)
type_ok = True
if isinstance(pkl_element[0], int):
tensor = torch.LongTensor(pkl_element)
type_ok = True
if isinstance(pkl_element[0], str):
# convert string to integer as specified in self.label_dict
if lab2ind is not None:
for index, val in enumerate(pkl_element):
pkl_element[index] = lab2ind[val]
tensor = torch.LongTensor(pkl_element)
type_ok = True
if not (type_ok):
err_msg = (
"The pkl file %s can only contain list of integers, "
"floats, or strings. Got %s"
) % (file, type(pkl_element[0]))
raise ValueError(err_msg)
else:
tensor = pkl_element
tensor_type = tensor.dtype
# Conversion to 32 bit (if needed)
if tensor_type == "float64":
tensor = tensor.astype("float32")
if tensor_type == "int64":
tensor = tensor.astype("int32")
return tensor
| 10,629 | 32.533123 | 79 | py |
speechbrain | speechbrain-main/speechbrain/dataio/dataio.py | """
Data reading and writing.
Authors
* Mirco Ravanelli 2020
* Aku Rouhe 2020
* Ju-Chieh Chou 2020
* Samuele Cornell 2020
* Abdel HEBA 2020
* Gaelle Laperriere 2021
* Sahar Ghannay 2021
* Sylvain de Langen 2022
"""
import os
import torch
import logging
import numpy as np
import pickle
import hashlib
import csv
import time
import torchaudio
import json
import re
from speechbrain.utils.torch_audio_backend import check_torchaudio_backend
check_torchaudio_backend()
logger = logging.getLogger(__name__)
def load_data_json(json_path, replacements={}):
"""Loads JSON and recursively formats string values.
Arguments
----------
json_path : str
Path to CSV file.
replacements : dict
(Optional dict), e.g., {"data_folder": "/home/speechbrain/data"}.
This is used to recursively format all string values in the data.
Returns
-------
dict
JSON data with replacements applied.
Example
-------
>>> json_spec = '''{
... "ex1": {"files": ["{ROOT}/mic1/ex1.wav", "{ROOT}/mic2/ex1.wav"], "id": 1},
... "ex2": {"files": [{"spk1": "{ROOT}/ex2.wav"}, {"spk2": "{ROOT}/ex2.wav"}], "id": 2}
... }
... '''
>>> tmpfile = getfixture('tmpdir') / "test.json"
>>> with open(tmpfile, "w") as fo:
... _ = fo.write(json_spec)
>>> data = load_data_json(tmpfile, {"ROOT": "/home"})
>>> data["ex1"]["files"][0]
'/home/mic1/ex1.wav'
>>> data["ex2"]["files"][1]["spk2"]
'/home/ex2.wav'
"""
with open(json_path, "r") as f:
out_json = json.load(f)
_recursive_format(out_json, replacements)
return out_json
def _recursive_format(data, replacements):
# Data: dict or list, replacements : dict
# Replaces string keys in replacements by their values
# at all levels of data (in str values)
# Works in-place.
if isinstance(data, dict):
for key, item in data.items():
if isinstance(item, dict) or isinstance(item, list):
_recursive_format(item, replacements)
elif isinstance(item, str):
data[key] = item.format_map(replacements)
# If not dict, list or str, do nothing
if isinstance(data, list):
for i, item in enumerate(data):
if isinstance(item, dict) or isinstance(item, list):
_recursive_format(item, replacements)
elif isinstance(item, str):
data[i] = item.format_map(replacements)
# If not dict, list or str, do nothing
def load_data_csv(csv_path, replacements={}):
"""Loads CSV and formats string values.
Uses the SpeechBrain legacy CSV data format, where the CSV must have an
'ID' field.
If there is a field called duration, it is interpreted as a float.
The rest of the fields are left as they are (legacy _format and _opts fields
are not used to load the data in any special way).
Bash-like string replacements with $to_replace are supported.
Arguments
----------
csv_path : str
Path to CSV file.
replacements : dict
(Optional dict), e.g., {"data_folder": "/home/speechbrain/data"}
This is used to recursively format all string values in the data.
Returns
-------
dict
CSV data with replacements applied.
Example
-------
>>> csv_spec = '''ID,duration,wav_path
... utt1,1.45,$data_folder/utt1.wav
... utt2,2.0,$data_folder/utt2.wav
... '''
>>> tmpfile = getfixture("tmpdir") / "test.csv"
>>> with open(tmpfile, "w") as fo:
... _ = fo.write(csv_spec)
>>> data = load_data_csv(tmpfile, {"data_folder": "/home"})
>>> data["utt1"]["wav_path"]
'/home/utt1.wav'
"""
with open(csv_path, newline="") as csvfile:
result = {}
reader = csv.DictReader(csvfile, skipinitialspace=True)
variable_finder = re.compile(r"\$([\w.]+)")
for row in reader:
# ID:
try:
data_id = row["ID"]
del row["ID"] # This is used as a key in result, instead.
except KeyError:
raise KeyError(
"CSV has to have an 'ID' field, with unique ids"
" for all data points"
)
if data_id in result:
raise ValueError(f"Duplicate id: {data_id}")
# Replacements:
for key, value in row.items():
try:
row[key] = variable_finder.sub(
lambda match: str(replacements[match[1]]), value
)
except KeyError:
raise KeyError(
f"The item {value} requires replacements "
"which were not supplied."
)
# Duration:
if "duration" in row:
row["duration"] = float(row["duration"])
result[data_id] = row
return result
def read_audio(waveforms_obj):
"""General audio loading, based on a custom notation.
Expected use case is in conjunction with Datasets
specified by JSON.
The parameter may just be a path to a file:
`read_audio("/path/to/wav1.wav")`
Alternatively, you can specify more options in a dict, e.g.:
```
# load a file from sample 8000 through 15999
read_audio({
"file": "/path/to/wav2.wav",
"start": 8000,
"stop": 16000
})
```
Which codecs are supported depends on your torchaudio backend.
Refer to `torchaudio.load` documentation for further details.
Arguments
----------
waveforms_obj : str, dict
Path to audio or dict with the desired configuration.
Keys for the dict variant:
- `"file"` (str): Path to the audio file.
- `"start"` (int, optional): The first sample to load.
If unspecified, load from the very first frame.
- `"stop"` (int, optional): The last sample to load (exclusive).
If unspecified or equal to start, load from `start` to the end.
Will not fail if `stop` is past the sample count of the file and will
return less frames.
Returns
-------
torch.Tensor
1-channel: audio tensor with shape: `(samples, )`.
>=2-channels: audio tensor with shape: `(samples, channels)`.
Example
-------
>>> dummywav = torch.rand(16000)
>>> import os
>>> tmpfile = str(getfixture('tmpdir') / "wave.wav")
>>> write_audio(tmpfile, dummywav, 16000)
>>> asr_example = { "wav": tmpfile, "spk_id": "foo", "words": "foo bar"}
>>> loaded = read_audio(asr_example["wav"])
>>> loaded.allclose(dummywav.squeeze(0),atol=1e-4) # replace with eq with sox_io backend
True
"""
if isinstance(waveforms_obj, str):
audio, _ = torchaudio.load(waveforms_obj)
else:
path = waveforms_obj["file"]
start = waveforms_obj.get("start", 0)
# To match past SB behavior, `start == stop` or omitted `stop` means to
# load all frames from `start` to the file end.
stop = waveforms_obj.get("stop", start)
if start < 0:
raise ValueError(
f"Invalid sample range (start < 0): {start}..{stop}!"
)
if stop < start:
# Could occur if the user tried one of two things:
# - specify a negative value as an attempt to index from the end;
# - specify -1 as an attempt to load up to the last sample.
raise ValueError(
f"Invalid sample range (stop < start): {start}..{stop}!\n"
'Hint: Omit "stop" if you want to read to the end of file.'
)
# Requested to load until a specific frame?
if start != stop:
num_frames = stop - start
audio, fs = torchaudio.load(
path, num_frames=num_frames, frame_offset=start
)
else:
# Load to the end.
audio, fs = torchaudio.load(path, frame_offset=start)
audio = audio.transpose(0, 1)
return audio.squeeze(1)
def read_audio_multichannel(waveforms_obj):
"""General audio loading, based on a custom notation.
Expected use case is in conjunction with Datasets
specified by JSON.
The custom notation:
The annotation can be just a path to a file:
"/path/to/wav1.wav"
Multiple (possibly multi-channel) files can be specified, as long as they
have the same length:
{"files": [
"/path/to/wav1.wav",
"/path/to/wav2.wav"
]
}
Or you can specify a single file more succinctly:
{"files": "/path/to/wav2.wav"}
Offset number samples and stop number samples also can be specified to read
only a segment within the files.
{"files": [
"/path/to/wav1.wav",
"/path/to/wav2.wav"
]
"start": 8000
"stop": 16000
}
Arguments
----------
waveforms_obj : str, dict
Audio reading annotation, see above for format.
Returns
-------
torch.Tensor
Audio tensor with shape: (samples, ).
Example
-------
>>> dummywav = torch.rand(16000, 2)
>>> import os
>>> tmpfile = str(getfixture('tmpdir') / "wave.wav")
>>> write_audio(tmpfile, dummywav, 16000)
>>> asr_example = { "wav": tmpfile, "spk_id": "foo", "words": "foo bar"}
>>> loaded = read_audio(asr_example["wav"])
>>> loaded.allclose(dummywav.squeeze(0),atol=1e-4) # replace with eq with sox_io backend
True
"""
if isinstance(waveforms_obj, str):
audio, _ = torchaudio.load(waveforms_obj)
return audio.transpose(0, 1)
files = waveforms_obj["files"]
if not isinstance(files, list):
files = [files]
waveforms = []
start = waveforms_obj.get("start", 0)
# Default stop to start -> if not specified, num_frames becomes 0,
# which is the torchaudio default
stop = waveforms_obj.get("stop", start - 1)
num_frames = stop - start
for f in files:
audio, fs = torchaudio.load(
f, num_frames=num_frames, frame_offset=start
)
waveforms.append(audio)
out = torch.cat(waveforms, 0)
return out.transpose(0, 1)
def write_audio(filepath, audio, samplerate):
"""Write audio on disk. It is basically a wrapper to support saving
audio signals in the speechbrain format (audio, channels).
Arguments
---------
filepath: path
Path where to save the audio file.
audio : torch.Tensor
Audio file in the expected speechbrain format (signal, channels).
samplerate: int
Sample rate (e.g., 16000).
Example
-------
>>> import os
>>> tmpfile = str(getfixture('tmpdir') / "wave.wav")
>>> dummywav = torch.rand(16000, 2)
>>> write_audio(tmpfile, dummywav, 16000)
>>> loaded = read_audio(tmpfile)
>>> loaded.allclose(dummywav,atol=1e-4) # replace with eq with sox_io backend
True
"""
if len(audio.shape) == 2:
audio = audio.transpose(0, 1)
elif len(audio.shape) == 1:
audio = audio.unsqueeze(0)
torchaudio.save(filepath, audio, samplerate)
def load_pickle(pickle_path):
"""Utility function for loading .pkl pickle files.
Arguments
---------
pickle_path : str
Path to pickle file.
Returns
-------
out : object
Python object loaded from pickle.
"""
with open(pickle_path, "rb") as f:
out = pickle.load(f)
return out
def to_floatTensor(x: (list, tuple, np.ndarray)):
"""
Arguments
---------
x : (list, tuple, np.ndarray)
Input data to be converted to torch float.
Returns
-------
tensor : torch.tensor
Data now in torch.tensor float datatype.
"""
if isinstance(x, torch.Tensor):
return x.float()
if isinstance(x, np.ndarray):
return torch.from_numpy(x).float()
else:
return torch.tensor(x, dtype=torch.float)
def to_doubleTensor(x: (list, tuple, np.ndarray)):
"""
Arguments
---------
x : (list, tuple, np.ndarray)
Input data to be converted to torch double.
Returns
-------
tensor : torch.tensor
Data now in torch.tensor double datatype.
"""
if isinstance(x, torch.Tensor):
return x.double()
if isinstance(x, np.ndarray):
return torch.from_numpy(x).double()
else:
return torch.tensor(x, dtype=torch.double)
def to_longTensor(x: (list, tuple, np.ndarray)):
"""
Arguments
---------
x : (list, tuple, np.ndarray)
Input data to be converted to torch long.
Returns
-------
tensor : torch.tensor
Data now in torch.tensor long datatype.
"""
if isinstance(x, torch.Tensor):
return x.long()
if isinstance(x, np.ndarray):
return torch.from_numpy(x).long()
else:
return torch.tensor(x, dtype=torch.long)
def convert_index_to_lab(batch, ind2lab):
"""Convert a batch of integer IDs to string labels.
Arguments
---------
batch : list
List of lists, a batch of sequences.
ind2lab : dict
Mapping from integer IDs to labels.
Returns
-------
list
List of lists, same size as batch, with labels from ind2lab.
Example
-------
>>> ind2lab = {1: "h", 2: "e", 3: "l", 4: "o"}
>>> out = convert_index_to_lab([[4,1], [1,2,3,3,4]], ind2lab)
>>> for seq in out:
... print("".join(seq))
oh
hello
"""
return [[ind2lab[int(index)] for index in seq] for seq in batch]
def relative_time_to_absolute(batch, relative_lens, rate):
"""Converts SpeechBrain style relative length to the absolute duration.
Operates on batch level.
Arguments
---------
batch : torch.tensor
Sequences to determine the duration for.
relative_lens : torch.tensor
The relative length of each sequence in batch. The longest sequence in
the batch needs to have relative length 1.0.
rate : float
The rate at which sequence elements occur in real-world time. Sample
rate, if batch is raw wavs (recommended) or 1/frame_shift if batch is
features. This has to have 1/s as the unit.
Returns
------:
torch.tensor
Duration of each sequence in seconds.
Example
-------
>>> batch = torch.ones(2, 16000)
>>> relative_lens = torch.tensor([3./4., 1.0])
>>> rate = 16000
>>> print(relative_time_to_absolute(batch, relative_lens, rate))
tensor([0.7500, 1.0000])
"""
max_len = batch.shape[1]
durations = torch.round(relative_lens * max_len) / rate
return durations
class IterativeCSVWriter:
"""Write CSV files a line at a time.
Arguments
---------
outstream : file-object
A writeable stream
data_fields : list
List of the optional keys to write. Each key will be expanded to the
SpeechBrain format, producing three fields: key, key_format, key_opts.
Example
-------
>>> import io
>>> f = io.StringIO()
>>> writer = IterativeCSVWriter(f, ["phn"])
>>> print(f.getvalue())
ID,duration,phn,phn_format,phn_opts
>>> writer.write("UTT1",2.5,"sil hh ee ll ll oo sil","string","")
>>> print(f.getvalue())
ID,duration,phn,phn_format,phn_opts
UTT1,2.5,sil hh ee ll ll oo sil,string,
>>> writer.write(ID="UTT2",phn="sil ww oo rr ll dd sil",phn_format="string")
>>> print(f.getvalue())
ID,duration,phn,phn_format,phn_opts
UTT1,2.5,sil hh ee ll ll oo sil,string,
UTT2,,sil ww oo rr ll dd sil,string,
>>> writer.set_default('phn_format', 'string')
>>> writer.write_batch(ID=["UTT3","UTT4"],phn=["ff oo oo", "bb aa rr"])
>>> print(f.getvalue())
ID,duration,phn,phn_format,phn_opts
UTT1,2.5,sil hh ee ll ll oo sil,string,
UTT2,,sil ww oo rr ll dd sil,string,
UTT3,,ff oo oo,string,
UTT4,,bb aa rr,string,
"""
def __init__(self, outstream, data_fields, defaults={}):
self._outstream = outstream
self.fields = ["ID", "duration"] + self._expand_data_fields(data_fields)
self.defaults = defaults
self._outstream.write(",".join(self.fields))
def set_default(self, field, value):
"""Sets a default value for the given CSV field.
Arguments
---------
field : str
A field in the CSV.
value
The default value.
"""
if field not in self.fields:
raise ValueError(f"{field} is not a field in this CSV!")
self.defaults[field] = value
def write(self, *args, **kwargs):
"""Writes one data line into the CSV.
Arguments
---------
*args
Supply every field with a value in positional form OR.
**kwargs
Supply certain fields by key. The ID field is mandatory for all
lines, but others can be left empty.
"""
if args and kwargs:
raise ValueError(
"Use either positional fields or named fields, but not both."
)
if args:
if len(args) != len(self.fields):
raise ValueError("Need consistent fields")
to_write = [str(arg) for arg in args]
if kwargs:
if "ID" not in kwargs:
raise ValueError("I'll need to see some ID")
full_vals = self.defaults.copy()
full_vals.update(kwargs)
to_write = [str(full_vals.get(field, "")) for field in self.fields]
self._outstream.write("\n")
self._outstream.write(",".join(to_write))
def write_batch(self, *args, **kwargs):
"""Writes a batch of lines into the CSV.
Here each argument should be a list with the same length.
Arguments
---------
*args
Supply every field with a value in positional form OR.
**kwargs
Supply certain fields by key. The ID field is mandatory for all
lines, but others can be left empty.
"""
if args and kwargs:
raise ValueError(
"Use either positional fields or named fields, but not both."
)
if args:
if len(args) != len(self.fields):
raise ValueError("Need consistent fields")
for arg_row in zip(*args):
self.write(*arg_row)
if kwargs:
if "ID" not in kwargs:
raise ValueError("I'll need to see some ID")
keys = kwargs.keys()
for value_row in zip(*kwargs.values()):
kwarg_row = dict(zip(keys, value_row))
self.write(**kwarg_row)
@staticmethod
def _expand_data_fields(data_fields):
expanded = []
for data_field in data_fields:
expanded.append(data_field)
expanded.append(data_field + "_format")
expanded.append(data_field + "_opts")
return expanded
def write_txt_file(data, filename, sampling_rate=None):
"""Write data in text format.
Arguments
---------
data : str, list, torch.tensor, numpy.ndarray
The data to write in the text file.
filename : str
Path to file where to write the data.
sampling_rate : None
Not used, just here for interface compatibility.
Returns
-------
None
Example
-------
>>> tmpdir = getfixture('tmpdir')
>>> signal=torch.tensor([1,2,3,4])
>>> write_txt_file(signal, tmpdir / 'example.txt')
"""
del sampling_rate # Not used.
# Check if the path of filename exists
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, "w") as fout:
if isinstance(data, torch.Tensor):
data = data.tolist()
if isinstance(data, np.ndarray):
data = data.tolist()
if isinstance(data, list):
for line in data:
print(line, file=fout)
if isinstance(data, str):
print(data, file=fout)
def write_stdout(data, filename=None, sampling_rate=None):
"""Write data to standard output.
Arguments
---------
data : str, list, torch.tensor, numpy.ndarray
The data to write in the text file.
filename : None
Not used, just here for compatibility.
sampling_rate : None
Not used, just here for compatibility.
Returns
-------
None
Example
-------
>>> tmpdir = getfixture('tmpdir')
>>> signal = torch.tensor([[1,2,3,4]])
>>> write_stdout(signal, tmpdir / 'example.txt')
[1, 2, 3, 4]
"""
# Managing Torch.Tensor
if isinstance(data, torch.Tensor):
data = data.tolist()
# Managing np.ndarray
if isinstance(data, np.ndarray):
data = data.tolist()
if isinstance(data, list):
for line in data:
print(line)
if isinstance(data, str):
print(data)
def length_to_mask(length, max_len=None, dtype=None, device=None):
"""Creates a binary mask for each sequence.
Reference: https://discuss.pytorch.org/t/how-to-generate-variable-length-mask/23397/3
Arguments
---------
length : torch.LongTensor
Containing the length of each sequence in the batch. Must be 1D.
max_len : int
Max length for the mask, also the size of the second dimension.
dtype : torch.dtype, default: None
The dtype of the generated mask.
device: torch.device, default: None
The device to put the mask variable.
Returns
-------
mask : tensor
The binary mask.
Example
-------
>>> length=torch.Tensor([1,2,3])
>>> mask=length_to_mask(length)
>>> mask
tensor([[1., 0., 0.],
[1., 1., 0.],
[1., 1., 1.]])
"""
assert len(length.shape) == 1
if max_len is None:
max_len = length.max().long().item() # using arange to generate mask
mask = torch.arange(
max_len, device=length.device, dtype=length.dtype
).expand(len(length), max_len) < length.unsqueeze(1)
if dtype is None:
dtype = length.dtype
if device is None:
device = length.device
mask = torch.as_tensor(mask, dtype=dtype, device=device)
return mask
def read_kaldi_lab(kaldi_ali, kaldi_lab_opts):
"""Read labels in kaldi format.
Uses kaldi IO.
Arguments
---------
kaldi_ali : str
Path to directory where kaldi alignments are stored.
kaldi_lab_opts : str
A string that contains the options for reading the kaldi alignments.
Returns
-------
lab : dict
A dictionary containing the labels.
Note
----
This depends on kaldi-io-for-python. Install it separately.
See: https://github.com/vesis84/kaldi-io-for-python
Example
-------
This example requires kaldi files.
```
lab_folder = '/home/kaldi/egs/TIMIT/s5/exp/dnn4_pretrain-dbn_dnn_ali'
read_kaldi_lab(lab_folder, 'ali-to-pdf')
```
"""
# EXTRA TOOLS
try:
import kaldi_io
except ImportError:
raise ImportError("Could not import kaldi_io. Install it to use this.")
# Reading the Kaldi labels
lab = {
k: v
for k, v in kaldi_io.read_vec_int_ark(
"gunzip -c "
+ kaldi_ali
+ "/ali*.gz | "
+ kaldi_lab_opts
+ " "
+ kaldi_ali
+ "/final.mdl ark:- ark:-|"
)
}
return lab
def get_md5(file):
"""Get the md5 checksum of an input file.
Arguments
---------
file : str
Path to file for which compute the checksum.
Returns
-------
md5
Checksum for the given filepath.
Example
-------
>>> get_md5('tests/samples/single-mic/example1.wav')
'c482d0081ca35302d30d12f1136c34e5'
"""
# Lets read stuff in 64kb chunks!
BUF_SIZE = 65536
md5 = hashlib.md5()
# Computing md5
with open(file, "rb") as f:
while True:
data = f.read(BUF_SIZE)
if not data:
break
md5.update(data)
return md5.hexdigest()
def save_md5(files, out_file):
"""Saves the md5 of a list of input files as a pickled dict into a file.
Arguments
---------
files : list
List of input files from which we will compute the md5.
outfile : str
The path where to store the output pkl file.
Returns
-------
None
Example:
>>> files = ['tests/samples/single-mic/example1.wav']
>>> tmpdir = getfixture('tmpdir')
>>> save_md5(files, tmpdir / "md5.pkl")
"""
# Initialization of the dictionary
md5_dict = {}
# Computing md5 for all the files in the list
for file in files:
md5_dict[file] = get_md5(file)
# Saving dictionary in pkl format
save_pkl(md5_dict, out_file)
def save_pkl(obj, file):
"""Save an object in pkl format.
Arguments
---------
obj : object
Object to save in pkl format
file : str
Path to the output file
sampling_rate : int
Sampling rate of the audio file, TODO: this is not used?
Example
-------
>>> tmpfile = getfixture('tmpdir') / "example.pkl"
>>> save_pkl([1, 2, 3, 4, 5], tmpfile)
>>> load_pkl(tmpfile)
[1, 2, 3, 4, 5]
"""
with open(file, "wb") as f:
pickle.dump(obj, f)
def load_pkl(file):
"""Loads a pkl file.
For an example, see `save_pkl`.
Arguments
---------
file : str
Path to the input pkl file.
Returns
-------
The loaded object.
"""
# Deals with the situation where two processes are trying
# to access the same label dictionary by creating a lock
count = 100
while count > 0:
if os.path.isfile(file + ".lock"):
time.sleep(1)
count -= 1
else:
break
try:
open(file + ".lock", "w").close()
with open(file, "rb") as f:
return pickle.load(f)
finally:
if os.path.isfile(file + ".lock"):
os.remove(file + ".lock")
def prepend_bos_token(label, bos_index):
"""Create labels with <bos> token at the beginning.
Arguments
---------
label : torch.IntTensor
Containing the original labels. Must be of size: [batch_size, max_length].
bos_index : int
The index for <bos> token.
Returns
-------
new_label : tensor
The new label with <bos> at the beginning.
Example
-------
>>> label=torch.LongTensor([[1,0,0], [2,3,0], [4,5,6]])
>>> new_label=prepend_bos_token(label, bos_index=7)
>>> new_label
tensor([[7, 1, 0, 0],
[7, 2, 3, 0],
[7, 4, 5, 6]])
"""
new_label = label.long().clone()
batch_size = label.shape[0]
bos = new_label.new_zeros(batch_size, 1).fill_(bos_index)
new_label = torch.cat([bos, new_label], dim=1)
return new_label
def append_eos_token(label, length, eos_index):
"""Create labels with <eos> token appended.
Arguments
---------
label : torch.IntTensor
Containing the original labels. Must be of size: [batch_size, max_length]
length : torch.LongTensor
Containing the original length of each label sequences. Must be 1D.
eos_index : int
The index for <eos> token.
Returns
-------
new_label : tensor
The new label with <eos> appended.
Example
-------
>>> label=torch.IntTensor([[1,0,0], [2,3,0], [4,5,6]])
>>> length=torch.LongTensor([1,2,3])
>>> new_label=append_eos_token(label, length, eos_index=7)
>>> new_label
tensor([[1, 7, 0, 0],
[2, 3, 7, 0],
[4, 5, 6, 7]], dtype=torch.int32)
"""
new_label = label.int().clone()
batch_size = label.shape[0]
pad = new_label.new_zeros(batch_size, 1)
new_label = torch.cat([new_label, pad], dim=1)
new_label[torch.arange(batch_size), length.long()] = eos_index
return new_label
def merge_char(sequences, space="_"):
"""Merge characters sequences into word sequences.
Arguments
---------
sequences : list
Each item contains a list, and this list contains a character sequence.
space : string
The token represents space. Default: _
Returns
-------
The list contains word sequences for each sentence.
Example
-------
>>> sequences = [["a", "b", "_", "c", "_", "d", "e"], ["e", "f", "g", "_", "h", "i"]]
>>> results = merge_char(sequences)
>>> results
[['ab', 'c', 'de'], ['efg', 'hi']]
"""
results = []
for seq in sequences:
words = "".join(seq).split(space)
results.append(words)
return results
def merge_csvs(data_folder, csv_lst, merged_csv):
"""Merging several csv files into one file.
Arguments
---------
data_folder : string
The folder to store csv files to be merged and after merging.
csv_lst : list
Filenames of csv file to be merged.
merged_csv : string
The filename to write the merged csv file.
Example
-------
>>> tmpdir = getfixture('tmpdir')
>>> os.symlink(os.path.realpath("tests/samples/annotation/speech.csv"), tmpdir / "speech.csv")
>>> merge_csvs(tmpdir,
... ["speech.csv", "speech.csv"],
... "test_csv_merge.csv")
"""
write_path = os.path.join(data_folder, merged_csv)
if os.path.isfile(write_path):
logger.info("Skipping merging. Completed in previous run.")
with open(os.path.join(data_folder, csv_lst[0])) as f:
header = f.readline()
lines = []
for csv_file in csv_lst:
with open(os.path.join(data_folder, csv_file)) as f:
for i, line in enumerate(f):
if i == 0:
# Checking header
if line != header:
raise ValueError(
"Different header for " f"{csv_lst[0]} and {csv}."
)
continue
lines.append(line)
with open(write_path, "w") as f:
f.write(header)
for line in lines:
f.write(line)
logger.info(f"{write_path} is created.")
def split_word(sequences, space="_"):
"""Split word sequences into character sequences.
Arguments
---------
sequences: list
Each item contains a list, and this list contains a words sequence.
space: string
The token represents space. Default: _
Returns
-------
The list contains word sequences for each sentence.
Example
-------
>>> sequences = [['ab', 'c', 'de'], ['efg', 'hi']]
>>> results = split_word(sequences)
>>> results
[['a', 'b', '_', 'c', '_', 'd', 'e'], ['e', 'f', 'g', '_', 'h', 'i']]
"""
results = []
for seq in sequences:
chars = list(space.join(seq))
results.append(chars)
return results
def extract_concepts_values(sequences, keep_values, tag_in, tag_out, space):
"""keep the semantic concepts and values for evaluation.
Arguments
---------
sequences: list
Each item contains a list, and this list contains a character sequence.
keep_values: bool
If True, keep the values. If not don't.
tag_in: char
Indicates the start of the concept.
tag_out: char
Indicates the end of the concept.
space: string
The token represents space. Default: _
Returns
-------
The list contains concept and value sequences for each sentence.
Example
-------
>>> sequences = [['<reponse>','_','n','o','_','>','_','<localisation-ville>','_','L','e','_','M','a','n','s','_','>'], ['<reponse>','_','s','i','_','>'],['v','a','_','b','e','n','e']]
>>> results = extract_concepts_values(sequences, True, '<', '>', '_')
>>> results
[['<reponse> no', '<localisation-ville> Le Mans'], ['<reponse> si'], ['']]
"""
results = []
for sequence in sequences:
# ['<reponse>_no_>_<localisation-ville>_Le_Mans_>']
sequence = "".join(sequence)
# ['<reponse>','no','>','<localisation-ville>','Le','Mans,'>']
sequence = sequence.split(space)
processed_sequence = []
value = (
[]
) # If previous sequence value never used because never had a tag_out
kept = "" # If previous sequence kept never used because never had a tag_out
concept_open = False
for word in sequence:
if re.match(tag_in, word):
# If not close tag but new tag open
if concept_open and keep_values:
if len(value) != 0:
kept += " " + " ".join(value)
concept_open = False
processed_sequence.append(kept)
kept = word # 1st loop: '<reponse>'
value = [] # Concept's value
concept_open = True # Trying to catch the concept's value
# If we want the CER
if not keep_values:
processed_sequence.append(kept) # Add the kept concept
# If we have a tag_out, had a concept, and want the values for CVER
elif re.match(tag_out, word) and concept_open and keep_values:
# If we have a value
if len(value) != 0:
kept += " " + " ".join(
value
) # 1st loop: '<response>' + ' ' + 'no'
concept_open = False # Wait for a new tag_in to pursue
processed_sequence.append(kept) # Add the kept concept + value
elif concept_open:
value.append(word) # 1st loop: 'no'
# If not close tag but end sequence
if concept_open and keep_values:
if len(value) != 0:
kept += " " + " ".join(value)
concept_open = False
processed_sequence.append(kept)
if len(processed_sequence) == 0:
processed_sequence.append("")
results.append(processed_sequence)
return results
| 34,200 | 28.560069 | 187 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.