code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = ["image_processor", "tokenizer"]
__lowerCamelCase : int = "BlipImageProcessor"
__lowerCamelCase : int = ("BertTokenizer", "BertTokenizerFast")
def __init__( self, lowerCamelCase__, lowerCamelCase__ ):
A : List[str] = False
super().__init__(lowerCamelCase__, lowerCamelCase__ )
A : Optional[Any] = self.image_processor
def __call__( self, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = True, lowerCamelCase__ = False, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = 0, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = True, lowerCamelCase__ = None, **lowerCamelCase__, ):
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
A : List[Any] = self.tokenizer
A : List[Any] = self.tokenizer(
text=lowerCamelCase__, add_special_tokens=lowerCamelCase__, padding=lowerCamelCase__, truncation=lowerCamelCase__, max_length=lowerCamelCase__, stride=lowerCamelCase__, pad_to_multiple_of=lowerCamelCase__, return_attention_mask=lowerCamelCase__, return_overflowing_tokens=lowerCamelCase__, return_special_tokens_mask=lowerCamelCase__, return_offsets_mapping=lowerCamelCase__, return_token_type_ids=lowerCamelCase__, return_length=lowerCamelCase__, verbose=lowerCamelCase__, return_tensors=lowerCamelCase__, **lowerCamelCase__, )
return text_encoding
# add pixel_values
A : Dict = self.image_processor(lowerCamelCase__, return_tensors=lowerCamelCase__ )
if text is not None:
A : Union[str, Any] = self.tokenizer(
text=lowerCamelCase__, add_special_tokens=lowerCamelCase__, padding=lowerCamelCase__, truncation=lowerCamelCase__, max_length=lowerCamelCase__, stride=lowerCamelCase__, pad_to_multiple_of=lowerCamelCase__, return_attention_mask=lowerCamelCase__, return_overflowing_tokens=lowerCamelCase__, return_special_tokens_mask=lowerCamelCase__, return_offsets_mapping=lowerCamelCase__, return_token_type_ids=lowerCamelCase__, return_length=lowerCamelCase__, verbose=lowerCamelCase__, return_tensors=lowerCamelCase__, **lowerCamelCase__, )
else:
A : Any = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase__ )
return encoding_image_processor
def _lowerCAmelCase ( self, *lowerCamelCase__, **lowerCamelCase__ ):
return self.tokenizer.batch_decode(*lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, *lowerCamelCase__, **lowerCamelCase__ ):
return self.tokenizer.decode(*lowerCamelCase__, **lowerCamelCase__ )
@property
def _lowerCAmelCase ( self ):
A : Tuple = self.tokenizer.model_input_names
A : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 662 |
import re
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
if len(re.findall("""[ATCG]""" , _lowerCAmelCase ) ) != len(_lowerCAmelCase ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 1 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
SCREAMING_SNAKE_CASE_:Union[str, Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
SCREAMING_SNAKE_CASE_:Optional[int] = 128_022
SCREAMING_SNAKE_CASE_:List[str] = 128_028
@require_sentencepiece
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = MaMaaaTokenizer
__lowerCamelCase : List[str] = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : List[Any] = True
def _lowerCAmelCase ( self ):
super().setUp()
A : Optional[int] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
A : List[str] = dict(zip(lowerCamelCase__, range(len(lowerCamelCase__ ) ) ) )
A : int = Path(self.tmpdirname )
save_json(lowerCamelCase__, save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowerCamelCase__, save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
A : str = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return MaMaaaTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return (
"This is a test",
"This is a test",
)
def _lowerCAmelCase ( self ):
A : Optional[Any] = """</s>"""
A : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ), lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ), lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Tuple = self.get_tokenizer()
A : List[Any] = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0], """</s>""" )
self.assertEqual(vocab_keys[1], """<unk>""" )
self.assertEqual(vocab_keys[-1], """<s>""" )
self.assertEqual(len(lowerCamelCase__ ), tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("""Skip this test while all models are still to be uploaded.""" )
def _lowerCAmelCase ( self ):
pass
def _lowerCAmelCase ( self ):
A : str = self.get_tokenizer()
A : List[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCamelCase__, ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ), [2, 3, 4, 5, 6], )
A : Optional[int] = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(lowerCamelCase__, ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
A : Optional[int] = tokenizer.convert_tokens_to_string(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__, """This is a test""" )
@slow
def _lowerCAmelCase ( self ):
# fmt: off
A : List[Any] = {"""input_ids""": [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__, model_name="""facebook/m2m100_418M""", revision="""c168bae485c864188cf9aa0e4108b0b6934dc91e""", )
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Any = "facebook/m2m100_418M"
__lowerCamelCase : Tuple = [
"In my opinion, there are two levels of response from the French government.",
"NSA Affair Emphasizes Complete Lack of Debate on Intelligence",
]
__lowerCamelCase : str = [
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
]
# fmt: off
__lowerCamelCase : Dict = [EN_CODE, 593, 1949, 11_5781, 4, 7_1586, 4234, 6_0633, 12_6233, 432, 12_3808, 1_5592, 1197, 11_7132, 12_0618, 5, 2]
@classmethod
def _lowerCAmelCase ( cls ):
A : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name, src_lang="""en""", tgt_lang="""fr""" )
A : List[Any] = 1
return cls
def _lowerCAmelCase ( self ):
self.assertEqual(self.tokenizer.get_lang_id("""ar""" ), 12_8006 )
self.assertEqual(self.tokenizer.get_lang_id("""en""" ), 12_8022 )
self.assertEqual(self.tokenizer.get_lang_id("""ro""" ), 12_8076 )
self.assertEqual(self.tokenizer.get_lang_id("""mr""" ), 12_8063 )
def _lowerCAmelCase ( self ):
A : int = self.tokenizer.get_vocab()
self.assertEqual(len(lowerCamelCase__ ), self.tokenizer.vocab_size )
self.assertEqual(vocab["""<unk>"""], 3 )
self.assertIn(self.tokenizer.get_lang_token("""en""" ), lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Any = """en"""
A : List[str] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
self.assertIn(lowerCamelCase__, self.tokenizer.all_special_ids )
# fmt: off
A : int = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2]
# fmt: on
A : Dict = self.tokenizer.decode(lowerCamelCase__, skip_special_tokens=lowerCamelCase__ )
A : Any = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__, lowerCamelCase__ )
self.assertNotIn(self.tokenizer.eos_token, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Dict = tempfile.mkdtemp()
A : Optional[int] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(lowerCamelCase__ )
A : str = MaMaaaTokenizer.from_pretrained(lowerCamelCase__ )
self.assertDictEqual(new_tok.lang_token_to_id, lowerCamelCase__ )
@require_torch
def _lowerCAmelCase ( self ):
A : List[Any] = """en"""
A : Optional[int] = """fr"""
A : Dict = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=lowerCamelCase__, return_tensors="""pt""" )
A : Any = shift_tokens_right(
batch["""labels"""], self.tokenizer.pad_token_id, self.tokenizer.eos_token_id )
for k in batch:
A : Tuple = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def _lowerCAmelCase ( self ):
A : str = """mr"""
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] )
A : Dict = """zh"""
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] )
@require_torch
def _lowerCAmelCase ( self ):
A : Optional[int] = """mr"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
A : List[str] = """zh"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def _lowerCAmelCase ( self ):
A : Dict = self.tokenizer._build_translation_inputs("""A test""", return_tensors="""pt""", src_lang="""en""", tgt_lang="""ar""" )
self.assertEqual(
nested_simplify(lowerCamelCase__ ), {
# en_XX, A, test, EOS
"""input_ids""": [[12_8022, 58, 4183, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 12_8006,
}, )
| 662 |
from __future__ import annotations
SCREAMING_SNAKE_CASE_:Tuple = """#"""
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self ):
A : dict = {}
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : List[Any] = self._trie
for char in text:
if char not in trie:
A : str = {}
A : str = trie[char]
A : Optional[int] = True
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Dict = self._trie
for char in prefix:
if char in trie:
A : Optional[Any] = trie[char]
else:
return []
return self._elements(lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : int = []
for c, v in d.items():
A : List[Any] = [""" """] if c == END else [(c + s) for s in self._elements(lowerCamelCase__ )]
result.extend(lowerCamelCase__ )
return tuple(lowerCamelCase__ )
SCREAMING_SNAKE_CASE_:Any = Trie()
SCREAMING_SNAKE_CASE_:Tuple = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def __UpperCamelCase ( _lowerCAmelCase ) -> tuple:
"""simple docstring"""
A : List[str] = trie.find_word(_lowerCAmelCase )
return tuple(string + word for word in suffixes )
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
print(autocomplete_using_trie("""de""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 662 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE_:Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Union[str, Any] = {
"""SenseTime/deformable-detr""": """https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json""",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : str = "deformable_detr"
__lowerCamelCase : Optional[int] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self, lowerCamelCase__=True, lowerCamelCase__=None, lowerCamelCase__=3, lowerCamelCase__=300, lowerCamelCase__=1024, lowerCamelCase__=6, lowerCamelCase__=1024, lowerCamelCase__=8, lowerCamelCase__=6, lowerCamelCase__=1024, lowerCamelCase__=8, lowerCamelCase__=0.0, lowerCamelCase__=True, lowerCamelCase__="relu", lowerCamelCase__=256, lowerCamelCase__=0.1, lowerCamelCase__=0.0, lowerCamelCase__=0.0, lowerCamelCase__=0.02, lowerCamelCase__=1.0, lowerCamelCase__=True, lowerCamelCase__=False, lowerCamelCase__="sine", lowerCamelCase__="resnet50", lowerCamelCase__=True, lowerCamelCase__=False, lowerCamelCase__=4, lowerCamelCase__=4, lowerCamelCase__=4, lowerCamelCase__=False, lowerCamelCase__=300, lowerCamelCase__=False, lowerCamelCase__=1, lowerCamelCase__=5, lowerCamelCase__=2, lowerCamelCase__=1, lowerCamelCase__=1, lowerCamelCase__=5, lowerCamelCase__=2, lowerCamelCase__=0.1, lowerCamelCase__=0.25, lowerCamelCase__=False, **lowerCamelCase__, ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
A : Union[str, Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowerCamelCase__, lowerCamelCase__ ):
A : int = backbone_config.get("""model_type""" )
A : Optional[int] = CONFIG_MAPPING[backbone_model_type]
A : Any = config_class.from_dict(lowerCamelCase__ )
A : int = use_timm_backbone
A : Dict = backbone_config
A : Tuple = num_channels
A : Optional[Any] = num_queries
A : Optional[Any] = max_position_embeddings
A : List[Any] = d_model
A : Optional[int] = encoder_ffn_dim
A : List[str] = encoder_layers
A : List[str] = encoder_attention_heads
A : str = decoder_ffn_dim
A : List[str] = decoder_layers
A : Optional[int] = decoder_attention_heads
A : Optional[int] = dropout
A : Union[str, Any] = attention_dropout
A : Optional[int] = activation_dropout
A : str = activation_function
A : Optional[int] = init_std
A : List[Any] = init_xavier_std
A : int = encoder_layerdrop
A : Dict = auxiliary_loss
A : Dict = position_embedding_type
A : Dict = backbone
A : int = use_pretrained_backbone
A : List[str] = dilation
# deformable attributes
A : List[str] = num_feature_levels
A : Union[str, Any] = encoder_n_points
A : Union[str, Any] = decoder_n_points
A : Optional[int] = two_stage
A : Union[str, Any] = two_stage_num_proposals
A : str = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
A : Tuple = class_cost
A : str = bbox_cost
A : int = giou_cost
# Loss coefficients
A : Optional[int] = mask_loss_coefficient
A : int = dice_loss_coefficient
A : Optional[int] = bbox_loss_coefficient
A : Optional[int] = giou_loss_coefficient
A : List[Any] = eos_coefficient
A : str = focal_alpha
A : Dict = disable_custom_kernels
super().__init__(is_encoder_decoder=lowerCamelCase__, **lowerCamelCase__ )
@property
def _lowerCAmelCase ( self ):
return self.encoder_attention_heads
@property
def _lowerCAmelCase ( self ):
return self.d_model
def _lowerCAmelCase ( self ):
A : Any = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A : Optional[int] = self.backbone_config.to_dict()
A : Any = self.__class__.model_type
return output
| 662 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
SCREAMING_SNAKE_CASE_:Optional[int] = logging.getLogger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[int] = bnb_quantization_config.load_in_abit
A : int = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
A : Any = []
# custom device map
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(device_map.keys() ) > 1:
A : Optional[int] = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
A : int = get_keys_to_not_convert(_lowerCAmelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_lowerCAmelCase )
A : Optional[Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
A : Dict = []
A : Tuple = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_lowerCAmelCase )
# compatibility with peft
A : Union[str, Any] = load_in_abit
A : Tuple = load_in_abit
A : List[str] = get_parameter_device(_lowerCAmelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
A : Optional[int] = replace_with_bnb_layers(_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase )
# convert param to the right dtype
A : Tuple = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
A : Optional[Any] = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
A : int = getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_lowerCAmelCase ):
param.to(_lowerCAmelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
A : str = replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase )
A : Optional[Any] = get_quantized_model_device_map(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , max_memory=_lowerCAmelCase , no_split_module_classes=_lowerCAmelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
A : Tuple = True
A : int = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=_lowerCAmelCase , offload_state_dict=_lowerCAmelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_lowerCAmelCase , device_map=_lowerCAmelCase , offload_dir=_lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[int]:
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
A : Optional[int] = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
A : Tuple = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
A : Any = {}
A : List[str] = special_dtypes
A : Any = no_split_module_classes
A : Union[str, Any] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
A : Tuple = get_balanced_memory(
_lowerCAmelCase , low_zero=(device_map == """balanced_low_0""") , max_memory=_lowerCAmelCase , **_lowerCAmelCase , )
A : int = max_memory
A : Any = infer_auto_device_map(_lowerCAmelCase , **_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# check if don't have any quantized module on the cpu
A : Optional[Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
A : Optional[int] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]:
"""simple docstring"""
if modules_to_not_convert is None:
A : Optional[Any] = []
A , A : Dict = _replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , ) -> int:
"""simple docstring"""
A : Optional[int] = False
for name, module in model.named_children():
if current_key_name is None:
A : int = []
current_key_name.append(_lowerCAmelCase )
if isinstance(_lowerCAmelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
A : Dict = """.""".join(_lowerCAmelCase )
A : Optional[Any] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
A : Dict = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
A : Optional[Any] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_lowerCAmelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
A : Dict = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
A : Any = module.weight.data
if module.bias is not None:
A : Any = module.bias.data
bnb_module.requires_grad_(_lowerCAmelCase )
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : Dict = True
if len(list(module.children() ) ) > 0:
A , A : Dict = _replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : Union[str, Any] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __UpperCamelCase ( _lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
with init_empty_weights():
A : Tuple = deepcopy(_lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
A : Optional[int] = find_tied_parameters(_lowerCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A : int = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
A : Optional[int] = sum(_lowerCAmelCase , [] )
A : Tuple = len(_lowerCAmelCase ) > 0
# Check if it is a base model
A : List[str] = False
if hasattr(_lowerCAmelCase , """base_model_prefix""" ):
A : Optional[Any] = not hasattr(_lowerCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
A : str = list(model.named_children() )
A : Tuple = [list_modules[-1][0]]
# add last module together with tied weights
A : int = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
A : Optional[Any] = list(set(_lowerCAmelCase ) ) + list(_lowerCAmelCase )
# remove ".weight" from the keys
A : Union[str, Any] = [""".weight""", """.bias"""]
A : Optional[int] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
A : List[str] = name.replace(_lowerCAmelCase , """""" )
filtered_module_names.append(_lowerCAmelCase )
return filtered_module_names
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
for m in model.modules():
if isinstance(_lowerCAmelCase , bnb.nn.Linearabit ):
return True
return False
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
return next(parameter.parameters() ).device
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , 0 , dtype=_lowerCAmelCase , value=_lowerCAmelCase )
A : Tuple = param_name
A : Union[str, Any] = model
if "." in tensor_name:
A : int = tensor_name.split(""".""" )
for split in splits[:-1]:
A : Union[str, Any] = getattr(_lowerCAmelCase , _lowerCAmelCase )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
A : Optional[Any] = new_module
A : List[str] = splits[-1]
# offload weights
A : Optional[int] = False
offload_weight(module._parameters[tensor_name] , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _lowerCAmelCase , index=_lowerCAmelCase , )
else:
offload_weight(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase )
offload_weight(_lowerCAmelCase , param_name.replace("""weight""" , """SCB""" ) , _lowerCAmelCase , index=_lowerCAmelCase )
set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , """meta""" , dtype=_lowerCAmelCase , value=torch.empty(*param.size() ) )
| 662 | 1 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Tuple = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE_:int = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
"""simple docstring"""
A : Dict = state_dict.pop(_lowerCAmelCase )
A : Optional[int] = val
def __UpperCamelCase ( _lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
A : Dict = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A : Optional[int] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
A : Any = value
else:
A : List[str] = value
return new_state_dict
def __UpperCamelCase ( _lowerCAmelCase ) -> Dict:
"""simple docstring"""
A : List[Any] = """"""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A : Optional[int] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
A : Optional[Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
A : List[str] = in_proj_weight[:256, :]
A : str = in_proj_bias[:256]
A : List[Any] = in_proj_weight[256:512, :]
A : List[Any] = in_proj_bias[256:512]
A : Optional[Any] = in_proj_weight[-256:, :]
A : Optional[int] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
A : List[Any] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
A : Tuple = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
A : Dict = in_proj_weight[:256, :]
A : str = in_proj_bias[:256]
A : Optional[Any] = in_proj_weight[256:512, :]
A : Dict = in_proj_bias[256:512]
A : List[str] = in_proj_weight[-256:, :]
A : Optional[Any] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
A : List[Any] = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
A : List[Any] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
A : Dict = in_proj_weight_cross_attn[:256, :]
A : Any = in_proj_bias_cross_attn[:256]
A : Any = in_proj_weight_cross_attn[256:512, :]
A : List[Any] = in_proj_bias_cross_attn[256:512]
A : Tuple = in_proj_weight_cross_attn[-256:, :]
A : Optional[Any] = in_proj_bias_cross_attn[-256:]
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
A , A : Tuple = image.size
A : Dict = max(_lowerCAmelCase , _lowerCAmelCase )
A : Tuple = 800 if """detection""" in checkpoint_url else 1000
A : List[Any] = target_max_size / current_max_size
A : Union[str, Any] = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __UpperCamelCase ( _lowerCAmelCase ) -> List[str]:
"""simple docstring"""
A : str = F.to_tensor(_lowerCAmelCase )
A : int = F.normalize(_lowerCAmelCase , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
"""simple docstring"""
logger.info("""Converting model...""" )
# load original state dict
A : Optional[int] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="""cpu""" )
# rename keys
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : Union[str, Any] = rename_backbone_keys(_lowerCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowerCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A : Optional[int] = """model."""
for key in state_dict.copy().keys():
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
A : List[Any] = state_dict.pop(_lowerCAmelCase )
A : int = val
# create HuggingFace model and load state dict
A : int = TableTransformerConfig(
backbone="""resnet18""" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
A : List[str] = 15
A : Dict = 2
A : List[str] = {0: """table""", 1: """table rotated"""}
A : List[Any] = idalabel
A : Any = {v: k for k, v in idalabel.items()}
else:
A : str = 125
A : str = 6
A : List[str] = {
0: """table""",
1: """table column""",
2: """table row""",
3: """table column header""",
4: """table projected row header""",
5: """table spanning cell""",
}
A : Tuple = idalabel
A : int = {v: k for k, v in idalabel.items()}
A : str = DetrImageProcessor(
format="""coco_detection""" , max_size=800 if """detection""" in checkpoint_url else 1000 )
A : str = TableTransformerForObjectDetection(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
# verify our conversion
A : List[Any] = """example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png"""
A : List[str] = hf_hub_download(repo_id="""nielsr/example-pdf""" , repo_type="""dataset""" , filename=_lowerCAmelCase )
A : Union[str, Any] = Image.open(_lowerCAmelCase ).convert("""RGB""" )
A : List[Any] = normalize(resize(_lowerCAmelCase , _lowerCAmelCase ) ).unsqueeze(0 )
A : Tuple = model(_lowerCAmelCase )
if "detection" in checkpoint_url:
A : List[str] = (1, 15, 3)
A : Dict = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
A : Union[str, Any] = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
A : Union[str, Any] = (1, 125, 7)
A : List[str] = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
A : Dict = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , _lowerCAmelCase , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , _lowerCAmelCase , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
# Push model to HF hub
logger.info("""Pushing model to the hub...""" )
A : int = (
"""microsoft/table-transformer-detection"""
if """detection""" in checkpoint_url
else """microsoft/table-transformer-structure-recognition"""
)
model.push_to_hub(_lowerCAmelCase )
image_processor.push_to_hub(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
SCREAMING_SNAKE_CASE_:List[Any] = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 662 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __UpperCamelCase ( ) -> Dict:
"""simple docstring"""
A : Tuple = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
A : Dict = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(_lowerCAmelCase )
DownloadCommand.register_subcommand(_lowerCAmelCase )
EnvironmentCommand.register_subcommand(_lowerCAmelCase )
RunCommand.register_subcommand(_lowerCAmelCase )
ServeCommand.register_subcommand(_lowerCAmelCase )
UserCommands.register_subcommand(_lowerCAmelCase )
AddNewModelCommand.register_subcommand(_lowerCAmelCase )
AddNewModelLikeCommand.register_subcommand(_lowerCAmelCase )
LfsCommands.register_subcommand(_lowerCAmelCase )
PTtoTFCommand.register_subcommand(_lowerCAmelCase )
# Let's go
A : Tuple = parser.parse_args()
if not hasattr(_lowerCAmelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
A : Any = args.func(_lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 662 | 1 |
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=1e-12 ) -> Tuple:
"""simple docstring"""
A : Any = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_lowerCAmelCase , axis=1 ) , a_min=_lowerCAmelCase ) ).T
A : Any = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_lowerCAmelCase , axis=1 ) , a_min=_lowerCAmelCase ) ).T
return jnp.matmul(_lowerCAmelCase , norm_emb_a.T )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
__lowerCamelCase : CLIPConfig
__lowerCamelCase : jnp.dtype = jnp.floataa
def _lowerCAmelCase ( self ):
A : Optional[Any] = FlaxCLIPVisionModule(self.config.vision_config )
A : Any = nn.Dense(self.config.projection_dim, use_bias=lowerCamelCase__, dtype=self.dtype )
A : List[Any] = self.param("""concept_embeds""", jax.nn.initializers.ones, (17, self.config.projection_dim) )
A : Union[str, Any] = self.param(
"""special_care_embeds""", jax.nn.initializers.ones, (3, self.config.projection_dim) )
A : Dict = self.param("""concept_embeds_weights""", jax.nn.initializers.ones, (17,) )
A : Tuple = self.param("""special_care_embeds_weights""", jax.nn.initializers.ones, (3,) )
def __call__( self, lowerCamelCase__ ):
A : Dict = self.vision_model(lowerCamelCase__ )[1]
A : Dict = self.visual_projection(lowerCamelCase__ )
A : Optional[Any] = jax_cosine_distance(lowerCamelCase__, self.special_care_embeds )
A : str = jax_cosine_distance(lowerCamelCase__, self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
A : Optional[int] = 0.0
A : List[Any] = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
A : Dict = jnp.round(lowerCamelCase__, 3 )
A : Optional[Any] = jnp.any(special_scores > 0, axis=1, keepdims=lowerCamelCase__ )
# Use a lower threshold if an image has any special care concept
A : List[str] = is_special_care * 0.01
A : Dict = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
A : Optional[int] = jnp.round(lowerCamelCase__, 3 )
A : Tuple = jnp.any(concept_scores > 0, axis=1 )
return has_nsfw_concepts
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Dict = CLIPConfig
__lowerCamelCase : Any = "clip_input"
__lowerCamelCase : List[str] = FlaxStableDiffusionSafetyCheckerModule
def __init__( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = 0, lowerCamelCase__ = jnp.floataa, lowerCamelCase__ = True, **lowerCamelCase__, ):
if input_shape is None:
A : int = (1, 224, 224, 3)
A : List[str] = self.module_class(config=lowerCamelCase__, dtype=lowerCamelCase__, **lowerCamelCase__ )
super().__init__(lowerCamelCase__, lowerCamelCase__, input_shape=lowerCamelCase__, seed=lowerCamelCase__, dtype=lowerCamelCase__, _do_init=_do_init )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None ):
# init input tensor
A : int = jax.random.normal(lowerCamelCase__, lowerCamelCase__ )
A , A : Union[str, Any] = jax.random.split(lowerCamelCase__ )
A : List[Any] = {"""params""": params_rng, """dropout""": dropout_rng}
A : Tuple = self.module.init(lowerCamelCase__, lowerCamelCase__ )["""params"""]
return random_params
def __call__( self, lowerCamelCase__, lowerCamelCase__ = None, ):
A : List[str] = jnp.transpose(lowerCamelCase__, (0, 2, 3, 1) )
return self.module.apply(
{"""params""": params or self.params}, jnp.array(lowerCamelCase__, dtype=jnp.floataa ), rngs={}, )
| 662 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_:int = {
"""configuration_blenderbot""": [
"""BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotConfig""",
"""BlenderbotOnnxConfig""",
],
"""tokenization_blenderbot""": ["""BlenderbotTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Union[str, Any] = ["""BlenderbotTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Optional[int] = [
"""BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotForCausalLM""",
"""BlenderbotForConditionalGeneration""",
"""BlenderbotModel""",
"""BlenderbotPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Union[str, Any] = [
"""TFBlenderbotForConditionalGeneration""",
"""TFBlenderbotModel""",
"""TFBlenderbotPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Any = [
"""FlaxBlenderbotForConditionalGeneration""",
"""FlaxBlenderbotModel""",
"""FlaxBlenderbotPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 662 | 1 |
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> float:
"""simple docstring"""
_validate_point(_lowerCAmelCase )
_validate_point(_lowerCAmelCase )
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(_lowerCAmelCase , _lowerCAmelCase ) ) )
def __UpperCamelCase ( _lowerCAmelCase ) -> None:
"""simple docstring"""
if point:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
for item in point:
if not isinstance(_lowerCAmelCase , (int, float) ):
A : Union[str, Any] = (
"""Expected a list of numbers as input, found """
f'''{type(_lowerCAmelCase ).__name__}'''
)
raise TypeError(_lowerCAmelCase )
else:
A : List[str] = f'''Expected a list of numbers as input, found {type(_lowerCAmelCase ).__name__}'''
raise TypeError(_lowerCAmelCase )
else:
raise ValueError("""Missing an input""" )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> float:
"""simple docstring"""
_validate_point(_lowerCAmelCase )
_validate_point(_lowerCAmelCase )
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(_lowerCAmelCase , _lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 |
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> list[int]:
"""simple docstring"""
A : Optional[int] = int(_lowerCAmelCase )
# Initialize Result
A : int = []
# Traverse through all denomination
for denomination in reversed(_lowerCAmelCase ):
# Find denominations
while int(_lowerCAmelCase ) >= int(_lowerCAmelCase ):
total_value -= int(_lowerCAmelCase )
answer.append(_lowerCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:List[Any] = []
SCREAMING_SNAKE_CASE_:Dict = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
SCREAMING_SNAKE_CASE_:Optional[int] = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
SCREAMING_SNAKE_CASE_:Tuple = [1, 2, 5, 10, 20, 50, 100, 500, 2_000]
SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F"""Following is minimal change for {value}: """)
SCREAMING_SNAKE_CASE_:str = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 662 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_:Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Optional[Any] = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = "sew-d"
def __init__( self, lowerCamelCase__=32, lowerCamelCase__=768, lowerCamelCase__=12, lowerCamelCase__=12, lowerCamelCase__=3072, lowerCamelCase__=2, lowerCamelCase__=512, lowerCamelCase__=256, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=("p2c", "c2p"), lowerCamelCase__="layer_norm", lowerCamelCase__="gelu_python", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=0.0, lowerCamelCase__=0.1, lowerCamelCase__=0.02, lowerCamelCase__=1e-7, lowerCamelCase__=1e-5, lowerCamelCase__="group", lowerCamelCase__="gelu", lowerCamelCase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512), lowerCamelCase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1), lowerCamelCase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1), lowerCamelCase__=False, lowerCamelCase__=128, lowerCamelCase__=16, lowerCamelCase__=True, lowerCamelCase__=0.05, lowerCamelCase__=10, lowerCamelCase__=2, lowerCamelCase__=0.0, lowerCamelCase__=10, lowerCamelCase__=0, lowerCamelCase__="mean", lowerCamelCase__=False, lowerCamelCase__=False, lowerCamelCase__=256, lowerCamelCase__=0, lowerCamelCase__=1, lowerCamelCase__=2, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__, pad_token_id=lowerCamelCase__, bos_token_id=lowerCamelCase__, eos_token_id=lowerCamelCase__ )
A : Union[str, Any] = hidden_size
A : str = feat_extract_norm
A : Any = feat_extract_activation
A : Optional[Any] = list(lowerCamelCase__ )
A : str = list(lowerCamelCase__ )
A : Any = list(lowerCamelCase__ )
A : Optional[int] = conv_bias
A : Any = num_conv_pos_embeddings
A : Union[str, Any] = num_conv_pos_embedding_groups
A : Any = len(self.conv_dim )
A : int = num_hidden_layers
A : Union[str, Any] = intermediate_size
A : Optional[int] = squeeze_factor
A : Tuple = max_position_embeddings
A : Dict = position_buckets
A : str = share_att_key
A : Union[str, Any] = relative_attention
A : Dict = norm_rel_ebd
A : List[str] = list(lowerCamelCase__ )
A : Optional[Any] = hidden_act
A : str = num_attention_heads
A : Optional[int] = hidden_dropout
A : Optional[Any] = attention_dropout
A : Optional[Any] = activation_dropout
A : List[str] = feat_proj_dropout
A : Dict = final_dropout
A : List[Any] = layer_norm_eps
A : Tuple = feature_layer_norm_eps
A : List[Any] = initializer_range
A : List[Any] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A : Optional[Any] = apply_spec_augment
A : Optional[int] = mask_time_prob
A : List[Any] = mask_time_length
A : List[str] = mask_time_min_masks
A : List[str] = mask_feature_prob
A : Tuple = mask_feature_length
A : Optional[int] = mask_feature_min_masks
# ctc loss
A : Optional[int] = ctc_loss_reduction
A : Any = ctc_zero_infinity
# sequence classification
A : Union[str, Any] = use_weighted_layer_sum
A : Any = classifier_proj_size
@property
def _lowerCAmelCase ( self ):
return functools.reduce(operator.mul, self.conv_stride, 1 )
| 662 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_:Union[str, Any] = """tiny-wmt19-en-ru"""
# Build
# borrowed from a test
SCREAMING_SNAKE_CASE_:Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
SCREAMING_SNAKE_CASE_:Any = dict(zip(vocab, range(len(vocab))))
SCREAMING_SNAKE_CASE_:Dict = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_:List[Any] = Path(tmpdirname)
SCREAMING_SNAKE_CASE_:str = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""]
SCREAMING_SNAKE_CASE_:Union[str, Any] = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""]
SCREAMING_SNAKE_CASE_:Any = build_dir / VOCAB_FILES_NAMES["""merges_file"""]
with open(src_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, """w""") as fp:
fp.write("""\n""".join(merges))
SCREAMING_SNAKE_CASE_:Optional[int] = FSMTTokenizer(
langs=["""en""", """ru"""],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
SCREAMING_SNAKE_CASE_:Optional[int] = FSMTConfig(
langs=["""ru""", """en"""],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
SCREAMING_SNAKE_CASE_:Optional[Any] = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
SCREAMING_SNAKE_CASE_:Tuple = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
SCREAMING_SNAKE_CASE_:str = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 662 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_:int = {
"""configuration_blenderbot""": [
"""BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotConfig""",
"""BlenderbotOnnxConfig""",
],
"""tokenization_blenderbot""": ["""BlenderbotTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Union[str, Any] = ["""BlenderbotTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Optional[int] = [
"""BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotForCausalLM""",
"""BlenderbotForConditionalGeneration""",
"""BlenderbotModel""",
"""BlenderbotPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Union[str, Any] = [
"""TFBlenderbotForConditionalGeneration""",
"""TFBlenderbotModel""",
"""TFBlenderbotPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Any = [
"""FlaxBlenderbotForConditionalGeneration""",
"""FlaxBlenderbotModel""",
"""FlaxBlenderbotPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 662 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:int = """Hello, World!"""
SCREAMING_SNAKE_CASE_:List[Any] = """en_XX"""
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Optional[int] = Path("""data_bin""" )
A : Optional[Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowerCAmelCase ).parent ) , checkpoint_file=Path(_lowerCAmelCase ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(_lowerCAmelCase ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(_lowerCAmelCase ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(_lowerCAmelCase )
A : Any = xmod.model.encoder.sentence_encoder
A : Optional[int] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , _lowerCAmelCase )
A : int = XmodForSequenceClassification(_lowerCAmelCase ) if classification_head else XmodForMaskedLM(_lowerCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
A : Any = xmod_sent_encoder.embed_tokens.weight
A : int = xmod_sent_encoder.embed_positions.weight
A : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
A : Dict = xmod_sent_encoder.layernorm_embedding.weight
A : int = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
A : str = model.roberta.encoder.layer[i]
A : Tuple = xmod_sent_encoder.layers[i]
# self attention
A : Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
A : List[str] = xmod_layer.self_attn.q_proj.weight
A : Optional[int] = xmod_layer.self_attn.q_proj.bias
A : List[Any] = xmod_layer.self_attn.k_proj.weight
A : Union[str, Any] = xmod_layer.self_attn.k_proj.bias
A : Optional[int] = xmod_layer.self_attn.v_proj.weight
A : Dict = xmod_layer.self_attn.v_proj.bias
# self-attention output
A : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
A : Optional[Any] = xmod_layer.self_attn.out_proj.weight
A : Dict = xmod_layer.self_attn.out_proj.bias
A : Union[str, Any] = xmod_layer.self_attn_layer_norm.weight
A : str = xmod_layer.self_attn_layer_norm.bias
# intermediate
A : str = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
A : Optional[int] = xmod_layer.fca.weight
A : Optional[int] = xmod_layer.fca.bias
# output
A : Dict = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
A : Union[str, Any] = xmod_layer.fca.weight
A : int = xmod_layer.fca.bias
A : List[str] = xmod_layer.final_layer_norm.weight
A : Optional[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
A : str = xmod_layer.adapter_layer_norm.weight
A : str = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
A : Optional[int] = bert_output.adapter_modules[lang_code]
A : int = xmod_layer.adapter_modules[lang_code]
A : Optional[Any] = from_adapter.fca.weight
A : Optional[Any] = from_adapter.fca.bias
A : List[str] = from_adapter.fca.weight
A : Any = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
A : Dict = xmod_sent_encoder.layer_norm.weight
A : int = xmod_sent_encoder.layer_norm.bias
if classification_head:
A : int = xmod.model.classification_heads["""mnli"""].dense.weight
A : Optional[Any] = xmod.model.classification_heads["""mnli"""].dense.bias
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
A : Any = xmod.model.encoder.lm_head.dense.weight
A : Tuple = xmod.model.encoder.lm_head.dense.bias
A : Any = xmod.model.encoder.lm_head.layer_norm.weight
A : List[str] = xmod.model.encoder.lm_head.layer_norm.bias
A : Union[str, Any] = xmod.model.encoder.lm_head.weight
A : Tuple = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
A : Optional[int] = xmod.encode(_lowerCAmelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowerCAmelCase )
A : List[str] = model(_lowerCAmelCase )[0]
if classification_head:
A : Dict = xmod.model.classification_heads["""mnli"""](xmod.extract_features(_lowerCAmelCase ) )
else:
A : Optional[Any] = xmod.model(_lowerCAmelCase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
A : str = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
A : Optional[Any] = torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(_lowerCAmelCase ).mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 662 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Tuple = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : str = "mobilenet_v1"
def __init__( self, lowerCamelCase__=3, lowerCamelCase__=224, lowerCamelCase__=1.0, lowerCamelCase__=8, lowerCamelCase__="relu6", lowerCamelCase__=True, lowerCamelCase__=0.999, lowerCamelCase__=0.02, lowerCamelCase__=0.001, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
A : Union[str, Any] = num_channels
A : Tuple = image_size
A : List[str] = depth_multiplier
A : Tuple = min_depth
A : Union[str, Any] = hidden_act
A : Dict = tf_padding
A : Optional[int] = classifier_dropout_prob
A : Union[str, Any] = initializer_range
A : str = layer_norm_eps
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Dict = version.parse("1.11" )
@property
def _lowerCAmelCase ( self ):
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def _lowerCAmelCase ( self ):
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def _lowerCAmelCase ( self ):
return 1e-4
| 662 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : Any = tempfile.mkdtemp()
A : List[str] = BlipImageProcessor()
A : Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
A : str = BlipProcessor(lowerCamelCase__, lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ ).tokenizer
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ ).image_processor
def _lowerCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
A : Any = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
A : Any = [Image.fromarray(np.moveaxis(lowerCamelCase__, 0, -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ):
A : int = BlipProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : Any = self.get_tokenizer(bos_token="""(BOS)""", eos_token="""(EOS)""" )
A : Union[str, Any] = self.get_image_processor(do_normalize=lowerCamelCase__, padding_value=1.0 )
A : Dict = BlipProcessor.from_pretrained(
self.tmpdirname, bos_token="""(BOS)""", eos_token="""(EOS)""", do_normalize=lowerCamelCase__, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.get_image_processor()
A : str = self.get_tokenizer()
A : Dict = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Any = self.prepare_image_inputs()
A : int = image_processor(lowerCamelCase__, return_tensors="""np""" )
A : Optional[Any] = processor(images=lowerCamelCase__, return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 )
def _lowerCAmelCase ( self ):
A : List[str] = self.get_image_processor()
A : int = self.get_tokenizer()
A : str = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[Any] = """lower newer"""
A : List[Any] = processor(text=lowerCamelCase__ )
A : str = tokenizer(lowerCamelCase__, return_token_type_ids=lowerCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def _lowerCAmelCase ( self ):
A : List[Any] = self.get_image_processor()
A : Dict = self.get_tokenizer()
A : Union[str, Any] = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[int] = """lower newer"""
A : Union[str, Any] = self.prepare_image_inputs()
A : str = processor(text=lowerCamelCase__, images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ), ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _lowerCAmelCase ( self ):
A : List[Any] = self.get_image_processor()
A : Dict = self.get_tokenizer()
A : Dict = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A : Optional[int] = processor.batch_decode(lowerCamelCase__ )
A : Dict = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.get_image_processor()
A : int = self.get_tokenizer()
A : Optional[int] = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[int] = """lower newer"""
A : List[str] = self.prepare_image_inputs()
A : Optional[int] = processor(text=lowerCamelCase__, images=lowerCamelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ), ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 662 | 1 |
from typing import Any
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase ) -> bool:
"""simple docstring"""
return np.array_equal(_lowerCAmelCase , matrix.conjugate().T )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Any = v.conjugate().T
A : List[Any] = v_star.dot(_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , np.ndarray )
return (v_star_dot.dot(_lowerCAmelCase )) / (v_star.dot(_lowerCAmelCase ))
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
A : Any = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
A : str = np.array([[1], [2], [3]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) )
A : Tuple = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 662 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
return f'''gaussian_noise_s={seed}_shape={"_".join([str(lowerCamelCase__ ) for s in shape] )}.npy'''
def _lowerCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 4, 64, 64), lowerCamelCase__=False ):
A : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
A : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ )
return image
def _lowerCAmelCase ( self, lowerCamelCase__=False, lowerCamelCase__="CompVis/stable-diffusion-v1-4" ):
A : str = jnp.bfloataa if fpaa else jnp.floataa
A : Union[str, Any] = """bf16""" if fpaa else None
A , A : str = FlaxUNetaDConditionModel.from_pretrained(
lowerCamelCase__, subfolder="""unet""", dtype=lowerCamelCase__, revision=lowerCamelCase__ )
return model, params
def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 77, 768), lowerCamelCase__=False ):
A : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
A : List[str] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A , A : List[str] = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""", fpaa=lowerCamelCase__ )
A : str = self.get_latents(lowerCamelCase__, fpaa=lowerCamelCase__ )
A : int = self.get_encoder_hidden_states(lowerCamelCase__, fpaa=lowerCamelCase__ )
A : Optional[Any] = model.apply(
{"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample
assert sample.shape == latents.shape
A : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
A : Dict = jnp.array(lowerCamelCase__, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A , A : Tuple = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""", fpaa=lowerCamelCase__ )
A : int = self.get_latents(lowerCamelCase__, shape=(4, 4, 96, 96), fpaa=lowerCamelCase__ )
A : Union[str, Any] = self.get_encoder_hidden_states(lowerCamelCase__, shape=(4, 77, 1024), fpaa=lowerCamelCase__ )
A : Dict = model.apply(
{"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample
assert sample.shape == latents.shape
A : Dict = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
A : List[Any] = jnp.array(lowerCamelCase__, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 )
| 662 | 1 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__ = 16, lowerCamelCase__ = 88, lowerCamelCase__ = None, lowerCamelCase__ = 1, lowerCamelCase__ = 0.0, lowerCamelCase__ = 32, lowerCamelCase__ = None, lowerCamelCase__ = False, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = "geglu", lowerCamelCase__ = None, ):
super().__init__()
A : Optional[int] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=lowerCamelCase__, attention_head_dim=lowerCamelCase__, in_channels=lowerCamelCase__, num_layers=lowerCamelCase__, dropout=lowerCamelCase__, norm_num_groups=lowerCamelCase__, cross_attention_dim=lowerCamelCase__, attention_bias=lowerCamelCase__, sample_size=lowerCamelCase__, num_vector_embeds=lowerCamelCase__, activation_fn=lowerCamelCase__, num_embeds_ada_norm=lowerCamelCase__, )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
A : Optional[Any] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
A : str = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
A : str = [1, 0]
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__ = True, ):
A : Optional[Any] = hidden_states
A : int = []
A : Optional[int] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
A : Optional[int] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
A : Tuple = self.transformer_index_for_condition[i]
A : Union[str, Any] = self.transformers[transformer_index](
lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, timestep=lowerCamelCase__, cross_attention_kwargs=lowerCamelCase__, return_dict=lowerCamelCase__, )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
A : List[str] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
A : Optional[Any] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=lowerCamelCase__ )
| 662 |
from typing import Any
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase ) -> bool:
"""simple docstring"""
return np.array_equal(_lowerCAmelCase , matrix.conjugate().T )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Any = v.conjugate().T
A : List[Any] = v_star.dot(_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , np.ndarray )
return (v_star_dot.dot(_lowerCAmelCase )) / (v_star.dot(_lowerCAmelCase ))
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
A : Any = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
A : str = np.array([[1], [2], [3]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) )
A : Tuple = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 662 | 1 |
from typing import Any
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__ ):
A : List[Any] = data
A : List[str] = None
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self ):
A : Any = None
def _lowerCAmelCase ( self ):
A : Union[str, Any] = self.head
while temp is not None:
print(temp.data, end=""" """ )
A : Union[str, Any] = temp.next
print()
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : List[Any] = Node(lowerCamelCase__ )
A : Any = self.head
A : Tuple = new_node
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
if node_data_a == node_data_a:
return
else:
A : Tuple = self.head
while node_a is not None and node_a.data != node_data_a:
A : Union[str, Any] = node_a.next
A : Optional[Any] = self.head
while node_a is not None and node_a.data != node_data_a:
A : Dict = node_a.next
if node_a is None or node_a is None:
return
A , A : Dict = node_a.data, node_a.data
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:str = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("""After swapping""")
ll.print_list()
| 662 |
from __future__ import annotations
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase ) -> tuple[np.ndarray, np.ndarray]:
"""simple docstring"""
A , A : int = np.shape(_lowerCAmelCase )
if rows != columns:
A : Union[str, Any] = (
"""'table' has to be of square shaped array but got a """
f'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(_lowerCAmelCase )
A : Union[str, Any] = np.zeros((rows, columns) )
A : Dict = np.zeros((rows, columns) )
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
A : Any = (table[i][j] - total) / upper[j][j]
A : Union[str, Any] = 1
for j in range(_lowerCAmelCase , _lowerCAmelCase ):
A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) )
A : str = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 1 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). " , SCREAMING_SNAKE_CASE__ , )
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Any = RobertaConfig
__lowerCamelCase : str = "roberta"
def __init__( self, lowerCamelCase__ ):
super().__init__(lowerCamelCase__ )
A : Optional[Any] = RobertaEmbeddings(lowerCamelCase__ )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. " , SCREAMING_SNAKE_CASE__ , )
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Any = RobertaConfig
__lowerCamelCase : Dict = "roberta"
def __init__( self, lowerCamelCase__ ):
super().__init__(lowerCamelCase__ )
A : Union[str, Any] = config.num_labels
A : List[Any] = config.num_hidden_layers
A : List[str] = DeeRobertaModel(lowerCamelCase__ )
A : int = nn.Dropout(config.hidden_dropout_prob )
A : int = nn.Linear(config.hidden_size, self.config.num_labels )
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=-1, lowerCamelCase__=False, ):
A : List[Any] = self.num_layers
try:
A : List[str] = self.roberta(
lowerCamelCase__, attention_mask=lowerCamelCase__, token_type_ids=lowerCamelCase__, position_ids=lowerCamelCase__, head_mask=lowerCamelCase__, inputs_embeds=lowerCamelCase__, )
A : Optional[int] = outputs[1]
A : List[Any] = self.dropout(lowerCamelCase__ )
A : Optional[int] = self.classifier(lowerCamelCase__ )
A : str = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
A : Any = e.message
A : Optional[int] = e.exit_layer
A : List[Any] = outputs[0]
if not self.training:
A : int = entropy(lowerCamelCase__ )
A : int = []
A : Optional[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
A : List[str] = MSELoss()
A : Dict = loss_fct(logits.view(-1 ), labels.view(-1 ) )
else:
A : List[Any] = CrossEntropyLoss()
A : Union[str, Any] = loss_fct(logits.view(-1, self.num_labels ), labels.view(-1 ) )
# work with highway exits
A : str = []
for highway_exit in outputs[-1]:
A : int = highway_exit[0]
if not self.training:
highway_logits_all.append(lowerCamelCase__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
A : Optional[Any] = MSELoss()
A : Optional[int] = loss_fct(highway_logits.view(-1 ), labels.view(-1 ) )
else:
A : Optional[int] = CrossEntropyLoss()
A : Optional[Any] = loss_fct(highway_logits.view(-1, self.num_labels ), labels.view(-1 ) )
highway_losses.append(lowerCamelCase__ )
if train_highway:
A : Tuple = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
A : int = (loss,) + outputs
if not self.training:
A : List[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
A : int = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 662 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple[int, int]:
"""simple docstring"""
def constraint_to_multiple_of(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=0 , _lowerCAmelCase=None ):
A : Optional[int] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
A : Optional[Any] = math.floor(val / multiple ) * multiple
if x < min_val:
A : Any = math.ceil(val / multiple ) * multiple
return x
A : Optional[Any] = (output_size, output_size) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else output_size
A , A : List[Any] = get_image_size(_lowerCAmelCase )
A , A : List[Any] = output_size
# determine new height and width
A : Optional[int] = output_height / input_height
A : Optional[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
A : Any = scale_width
else:
# fit height
A : int = scale_height
A : Any = constraint_to_multiple_of(scale_height * input_height , multiple=_lowerCAmelCase )
A : int = constraint_to_multiple_of(scale_width * input_width , multiple=_lowerCAmelCase )
return (new_height, new_width)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ["pixel_values"]
def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : int = size if size is not None else {"""height""": 384, """width""": 384}
A : str = get_size_dict(lowerCamelCase__ )
A : Optional[Any] = do_resize
A : Optional[int] = size
A : Union[str, Any] = keep_aspect_ratio
A : int = ensure_multiple_of
A : Dict = resample
A : Optional[Any] = do_rescale
A : Any = rescale_factor
A : str = do_normalize
A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Dict = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
A : Optional[Any] = get_resize_output_image_size(
lowerCamelCase__, output_size=(size["""height"""], size["""width"""]), keep_aspect_ratio=lowerCamelCase__, multiple=lowerCamelCase__, )
return resize(lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ):
A : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
A : str = size if size is not None else self.size
A : str = get_size_dict(lowerCamelCase__ )
A : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
A : Optional[int] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
A : Tuple = resample if resample is not None else self.resample
A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A : int = rescale_factor if rescale_factor is not None else self.rescale_factor
A : int = do_normalize if do_normalize is not None else self.do_normalize
A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
A : Optional[int] = image_std if image_std is not None else self.image_std
A : Any = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A : str = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
A : Dict = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images]
if do_rescale:
A : Optional[Any] = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images]
if do_normalize:
A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images]
A : Dict = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images]
A : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : Any = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(lowerCamelCase__ ):
A : int = target_sizes.numpy()
A : Union[str, Any] = []
for idx in range(len(lowerCamelCase__ ) ):
A : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ), size=target_sizes[idx], mode="""bilinear""", align_corners=lowerCamelCase__ )
A : Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCamelCase__ )
else:
A : List[str] = logits.argmax(dim=1 )
A : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 662 | 1 |
from ...configuration_utils import PretrainedConfig
SCREAMING_SNAKE_CASE_:Union[str, Any] = {
"""google/tapas-base-finetuned-sqa""": (
"""https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wtq""": (
"""https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wikisql-supervised""": (
"""https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-tabfact""": (
"""https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = "tapas"
def __init__( self, lowerCamelCase__=3_0522, lowerCamelCase__=768, lowerCamelCase__=12, lowerCamelCase__=12, lowerCamelCase__=3072, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=1024, lowerCamelCase__=[3, 256, 256, 2, 256, 256, 10], lowerCamelCase__=0.02, lowerCamelCase__=1e-12, lowerCamelCase__=0, lowerCamelCase__=10.0, lowerCamelCase__=0, lowerCamelCase__=1.0, lowerCamelCase__=None, lowerCamelCase__=1.0, lowerCamelCase__=False, lowerCamelCase__=None, lowerCamelCase__=1.0, lowerCamelCase__=1.0, lowerCamelCase__=False, lowerCamelCase__=False, lowerCamelCase__="ratio", lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=64, lowerCamelCase__=32, lowerCamelCase__=False, lowerCamelCase__=True, lowerCamelCase__=False, lowerCamelCase__=False, lowerCamelCase__=True, lowerCamelCase__=False, lowerCamelCase__=None, lowerCamelCase__=None, **lowerCamelCase__, ):
super().__init__(pad_token_id=lowerCamelCase__, **lowerCamelCase__ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
A : Dict = vocab_size
A : Optional[Any] = hidden_size
A : int = num_hidden_layers
A : Tuple = num_attention_heads
A : Tuple = hidden_act
A : Optional[Any] = intermediate_size
A : int = hidden_dropout_prob
A : Dict = attention_probs_dropout_prob
A : Any = max_position_embeddings
A : Dict = type_vocab_sizes
A : Dict = initializer_range
A : List[str] = layer_norm_eps
# Fine-tuning task hyperparameters
A : Optional[Any] = positive_label_weight
A : Optional[int] = num_aggregation_labels
A : List[str] = aggregation_loss_weight
A : Tuple = use_answer_as_supervision
A : Tuple = answer_loss_importance
A : Any = use_normalized_answer_loss
A : Dict = huber_loss_delta
A : List[str] = temperature
A : int = aggregation_temperature
A : Union[str, Any] = use_gumbel_for_cells
A : Union[str, Any] = use_gumbel_for_aggregation
A : Tuple = average_approximation_function
A : int = cell_selection_preference
A : List[str] = answer_loss_cutoff
A : List[Any] = max_num_rows
A : Any = max_num_columns
A : Union[str, Any] = average_logits_per_cell
A : Dict = select_one_column
A : Optional[Any] = allow_empty_column_selection
A : int = init_cell_selection_weights_to_zero
A : int = reset_position_index_per_cell
A : Tuple = disable_per_token_loss
# Aggregation hyperparameters
A : Tuple = aggregation_labels
A : str = no_aggregation_label_index
if isinstance(self.aggregation_labels, lowerCamelCase__ ):
A : Union[str, Any] = {int(lowerCamelCase__ ): v for k, v in aggregation_labels.items()}
| 662 |
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__ ):
# we need a list not a string, so do something to change the type
A : List[Any] = arr.split(""",""" )
def _lowerCAmelCase ( self ):
A : int = [int(self.array[0] )] * len(self.array )
A : Optional[Any] = [int(self.array[0] )] * len(self.array )
for i in range(1, len(self.array ) ):
A : Union[str, Any] = max(
int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) )
A : Dict = max(sum_value[i], rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:int = input("""please input some numbers:""")
SCREAMING_SNAKE_CASE_:Dict = SubArray(whole_array)
SCREAMING_SNAKE_CASE_:Optional[int] = array.solve_sub_array()
print(("""the results is:""", re))
| 662 | 1 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
SCREAMING_SNAKE_CASE_:int = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
SCREAMING_SNAKE_CASE_:Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 662 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:List[Any] = {
"""google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = "bit"
__lowerCamelCase : Union[str, Any] = ["preactivation", "bottleneck"]
__lowerCamelCase : Union[str, Any] = ["SAME", "VALID"]
def __init__( self, lowerCamelCase__=3, lowerCamelCase__=64, lowerCamelCase__=[256, 512, 1024, 2048], lowerCamelCase__=[3, 4, 6, 3], lowerCamelCase__="preactivation", lowerCamelCase__="relu", lowerCamelCase__=None, lowerCamelCase__=32, lowerCamelCase__=0.0, lowerCamelCase__=False, lowerCamelCase__=32, lowerCamelCase__=1, lowerCamelCase__=None, lowerCamelCase__=None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
A : List[Any] = global_padding.upper()
else:
raise ValueError(f'''Padding strategy {global_padding} not supported''' )
A : Dict = num_channels
A : List[Any] = embedding_size
A : Optional[Any] = hidden_sizes
A : str = depths
A : str = layer_type
A : Union[str, Any] = hidden_act
A : Any = global_padding
A : Optional[int] = num_groups
A : Dict = drop_path_rate
A : List[Any] = embedding_dynamic_padding
A : List[Any] = output_stride
A : Union[str, Any] = width_factor
A : Dict = ["""stem"""] + [f'''stage{idx}''' for idx in range(1, len(lowerCamelCase__ ) + 1 )]
A , A : Any = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__, out_indices=lowerCamelCase__, stage_names=self.stage_names )
| 662 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_:List[str] = {
"""configuration_xmod""": [
"""XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XmodConfig""",
"""XmodOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Any = [
"""XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XmodForCausalLM""",
"""XmodForMaskedLM""",
"""XmodForMultipleChoice""",
"""XmodForQuestionAnswering""",
"""XmodForSequenceClassification""",
"""XmodForTokenClassification""",
"""XmodModel""",
"""XmodPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 662 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=13, lowerCamelCase__=7, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=99, lowerCamelCase__=32, lowerCamelCase__=5, lowerCamelCase__=4, lowerCamelCase__=37, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=50, lowerCamelCase__=0.02, lowerCamelCase__=True, lowerCamelCase__=None, ):
A : List[str] = parent
A : List[str] = batch_size
A : Optional[int] = seq_length
A : Optional[int] = is_training
A : Tuple = use_input_mask
A : Optional[Any] = vocab_size
A : str = hidden_size
A : Any = num_hidden_layers
A : List[Any] = num_attention_heads
A : Optional[int] = intermediate_size
A : int = hidden_act
A : Dict = hidden_dropout_prob
A : Optional[Any] = attention_probs_dropout_prob
A : List[Any] = max_position_embeddings
A : int = initializer_range
A : Tuple = use_labels
A : List[str] = scope
def _lowerCAmelCase ( self ):
A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : int = None
if self.use_input_mask:
A : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
A : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : List[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def _lowerCAmelCase ( self ):
return BertGenerationConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, is_decoder=lowerCamelCase__, initializer_range=self.initializer_range, )
def _lowerCAmelCase ( self ):
(
(
A
) , (
A
) , (
A
) , (
A
) ,
) : List[Any] = self.prepare_config_and_inputs()
A : Any = True
A : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : str = BertGenerationEncoder(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Optional[int] = model(lowerCamelCase__, attention_mask=lowerCamelCase__ )
A : List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : List[str] = True
A : Union[str, Any] = BertGenerationEncoder(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Any = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, )
A : Optional[Any] = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : Union[str, Any] = True
A : Optional[int] = True
A : Optional[int] = BertGenerationDecoder(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
# first forward pass
A : int = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, use_cache=lowerCamelCase__, )
A : List[str] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A : Optional[Any] = ids_tensor((self.batch_size, 3), config.vocab_size )
A : int = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
A : List[str] = torch.cat([input_ids, next_tokens], dim=-1 )
A : Union[str, Any] = torch.cat([input_mask, next_mask], dim=-1 )
A : List[str] = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0]
A : Any = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, past_key_values=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0]
# select random slice
A : Any = ids_tensor((1,), output_from_past.shape[-1] ).item()
A : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
A : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-3 ) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, *lowerCamelCase__, ):
A : Optional[int] = BertGenerationDecoder(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : List[str] = model(lowerCamelCase__, attention_mask=lowerCamelCase__, labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self ):
A , A , A , A : str = self.prepare_config_and_inputs()
A : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Any = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
__lowerCamelCase : int = (BertGenerationDecoder,) if is_torch_available() else ()
__lowerCamelCase : List[Any] = (
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self ):
A : Any = BertGenerationEncoderTester(self )
A : Optional[int] = ConfigTester(self, config_class=lowerCamelCase__, hidden_size=37 )
def _lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ):
A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A , A , A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
A : Any = """bert"""
self.model_tester.create_and_check_model(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
# This regression test was failing with PyTorch < 1.3
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
A : int = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, )
def _lowerCAmelCase ( self ):
A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ )
@slow
def _lowerCAmelCase ( self ):
A : Tuple = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(lowerCamelCase__ )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
A : Optional[int] = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A : Optional[int] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
A : Union[str, Any] = model(lowerCamelCase__ )[0]
A : List[Any] = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape, lowerCamelCase__ )
A : Tuple = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase__, atol=1e-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
A : Optional[Any] = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
A : Dict = model(lowerCamelCase__ )[0]
A : List[str] = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape, lowerCamelCase__ )
A : Optional[Any] = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase__, atol=1e-4 ) )
| 662 | 1 |
from __future__ import annotations
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[int, int]:
"""simple docstring"""
if b == 0:
return (1, 0)
((A) , (A)) : int = extended_euclid(_lowerCAmelCase , a % b )
A : Tuple = a // b
return (y, x - k * y)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
"""simple docstring"""
((A) , (A)) : int = extended_euclid(_lowerCAmelCase , _lowerCAmelCase )
A : Union[str, Any] = na * na
A : Optional[int] = ra * x * na + ra * y * na
return (n % m + m) % m
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
"""simple docstring"""
((A) , (A)) : Dict = extended_euclid(_lowerCAmelCase , _lowerCAmelCase )
if b < 0:
A : Tuple = (b % n + n) % n
return b
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
"""simple docstring"""
A , A : Any = invert_modulo(_lowerCAmelCase , _lowerCAmelCase ), invert_modulo(_lowerCAmelCase , _lowerCAmelCase )
A : int = na * na
A : Any = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="""chinese_remainder_theorem""", verbose=True)
testmod(name="""chinese_remainder_theorem2""", verbose=True)
testmod(name="""invert_modulo""", verbose=True)
testmod(name="""extended_euclid""", verbose=True)
| 662 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_:Union[str, Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : str = ["pixel_values"]
def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 384}
A : Optional[Any] = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Optional[Any] = do_resize
A : Dict = size
# Default value set here for backwards compatibility where the value in config is None
A : Dict = crop_pct if crop_pct is not None else 224 / 256
A : Optional[int] = resample
A : List[str] = do_rescale
A : Tuple = rescale_factor
A : Optional[int] = do_normalize
A : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Tuple = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
A : List[str] = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
A : int = int(shortest_edge / crop_pct )
A : List[Any] = get_resize_output_image_size(lowerCamelCase__, size=lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Any = resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCamelCase__, size=(shortest_edge, shortest_edge), data_format=lowerCamelCase__, **lowerCamelCase__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCamelCase__, size=(shortest_edge, shortest_edge), resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ):
A : Dict = do_resize if do_resize is not None else self.do_resize
A : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct
A : str = resample if resample is not None else self.resample
A : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
A : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
A : Dict = do_normalize if do_normalize is not None else self.do_normalize
A : List[str] = image_mean if image_mean is not None else self.image_mean
A : Optional[Any] = image_std if image_std is not None else self.image_std
A : Optional[Any] = size if size is not None else self.size
A : str = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Any = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A : List[Any] = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
A : Any = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, crop_pct=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images]
if do_rescale:
A : str = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images]
if do_normalize:
A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images]
A : Tuple = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images]
A : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
| 662 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_:Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:int = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Any = "data2vec-text"
def __init__( self, lowerCamelCase__=3_0522, lowerCamelCase__=768, lowerCamelCase__=12, lowerCamelCase__=12, lowerCamelCase__=3072, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=512, lowerCamelCase__=2, lowerCamelCase__=0.02, lowerCamelCase__=1e-12, lowerCamelCase__=1, lowerCamelCase__=0, lowerCamelCase__=2, lowerCamelCase__="absolute", lowerCamelCase__=True, lowerCamelCase__=None, **lowerCamelCase__, ):
super().__init__(pad_token_id=lowerCamelCase__, bos_token_id=lowerCamelCase__, eos_token_id=lowerCamelCase__, **lowerCamelCase__ )
A : int = vocab_size
A : Union[str, Any] = hidden_size
A : Union[str, Any] = num_hidden_layers
A : Dict = num_attention_heads
A : str = hidden_act
A : str = intermediate_size
A : Optional[Any] = hidden_dropout_prob
A : str = attention_probs_dropout_prob
A : Any = max_position_embeddings
A : Optional[Any] = type_vocab_size
A : List[Any] = initializer_range
A : Optional[Any] = layer_norm_eps
A : str = position_embedding_type
A : Tuple = use_cache
A : int = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@property
def _lowerCAmelCase ( self ):
if self.task == "multiple-choice":
A : Tuple = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 662 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Tuple = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any:
"""simple docstring"""
A : Dict = """backbone.""" if is_semantic else """"""
A : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', """beit.embeddings.cls_token"""),
(f'''{prefix}patch_embed.proj.weight''', """beit.embeddings.patch_embeddings.projection.weight"""),
(f'''{prefix}patch_embed.proj.bias''', """beit.embeddings.patch_embeddings.projection.bias"""),
(f'''{prefix}pos_embed''', """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
A : Dict = """backbone.""" if is_semantic else """"""
# queries, keys and values
A : Union[str, Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
A : Tuple = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
A : Optional[int] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
A : int = in_proj_weight[
: config.hidden_size, :
]
A : Any = q_bias
A : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A : Tuple = in_proj_weight[
-config.hidden_size :, :
]
A : Union[str, Any] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
A : str = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
A : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
A : Dict = gamma_a
A : Dict = gamma_a
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
"""simple docstring"""
A : List[str] = dct.pop(_lowerCAmelCase )
A : Optional[Any] = val
def __UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
A : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A : Optional[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> str:
"""simple docstring"""
A : Dict = False if """rvlcdip""" in checkpoint_url else True
A : Union[str, Any] = BeitConfig(use_absolute_position_embeddings=_lowerCAmelCase , use_mask_token=_lowerCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
A : Dict = 1024
A : List[Any] = 4096
A : int = 24
A : int = 16
# labels
if "rvlcdip" in checkpoint_url:
A : List[Any] = 16
A : List[Any] = """huggingface/label-files"""
A : int = """rvlcdip-id2label.json"""
A : Dict = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
A : List[str] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
A : int = idalabel
A : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
A : List[str] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="""cpu""" )["""model"""]
A : str = create_rename_keys(_lowerCAmelCase , has_lm_head=_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , has_lm_head=_lowerCAmelCase )
# load HuggingFace model
A : Any = BeitForMaskedImageModeling(_lowerCAmelCase ) if has_lm_head else BeitForImageClassification(_lowerCAmelCase )
model.eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image
A : Any = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_lowerCAmelCase )
A : int = prepare_img()
A : Tuple = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" )
A : str = encoding["""pixel_values"""]
A : Tuple = model(_lowerCAmelCase )
A : Optional[int] = outputs.logits
# verify logits
A : Tuple = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(_lowerCAmelCase ), "Shape of logits not as expected"
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
if has_lm_head:
A : Any = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
A : List[Any] = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_lowerCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_lowerCAmelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 662 | 1 |
import math
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> float:
"""simple docstring"""
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(_lowerCAmelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="""malus_law""")
| 662 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE_:Optional[int] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""", lowerCamelCase__, )
super().__init__(*lowerCamelCase__, **lowerCamelCase__ )
| 662 | 1 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class SCREAMING_SNAKE_CASE__ ( datasets.BuilderConfig ):
'''simple docstring'''
__lowerCamelCase : Optional[datasets.Features] = None
class SCREAMING_SNAKE_CASE__ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
__lowerCamelCase : int = PandasConfig
def _lowerCAmelCase ( self ):
return datasets.DatasetInfo(features=self.config.features )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
A : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCamelCase__, (str, list, tuple) ):
A : Union[str, Any] = data_files
if isinstance(lowerCamelCase__, lowerCamelCase__ ):
A : Optional[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A : List[Any] = [dl_manager.iter_files(lowerCamelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"""files""": files} )]
A : str = []
for split_name, files in data_files.items():
if isinstance(lowerCamelCase__, lowerCamelCase__ ):
A : Any = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A : Union[str, Any] = [dl_manager.iter_files(lowerCamelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCamelCase__, gen_kwargs={"""files""": files} ) )
return splits
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
A : List[str] = table_cast(lowerCamelCase__, self.config.features.arrow_schema )
return pa_table
def _lowerCAmelCase ( self, lowerCamelCase__ ):
for i, file in enumerate(itertools.chain.from_iterable(lowerCamelCase__ ) ):
with open(lowerCamelCase__, """rb""" ) as f:
A : Optional[int] = pa.Table.from_pandas(pd.read_pickle(lowerCamelCase__ ) )
yield i, self._cast_table(lowerCamelCase__ )
| 662 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(
lowerCamelCase__, split=lowerCamelCase__, features=lowerCamelCase__, cache_dir=lowerCamelCase__, keep_in_memory=lowerCamelCase__, streaming=lowerCamelCase__, num_proc=lowerCamelCase__, **lowerCamelCase__, )
A : List[Any] = path_or_paths if isinstance(lowerCamelCase__, lowerCamelCase__ ) else {self.split: path_or_paths}
A : str = Text(
cache_dir=lowerCamelCase__, data_files=lowerCamelCase__, features=lowerCamelCase__, **lowerCamelCase__, )
def _lowerCAmelCase ( self ):
# Build iterable dataset
if self.streaming:
A : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A : List[str] = None
A : Dict = None
A : Tuple = None
A : Tuple = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__, download_mode=lowerCamelCase__, verification_mode=lowerCamelCase__, base_path=lowerCamelCase__, num_proc=self.num_proc, )
A : List[str] = self.builder.as_dataset(
split=self.split, verification_mode=lowerCamelCase__, in_memory=self.keep_in_memory )
return dataset
| 662 | 1 |
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
A : List[str] = nn.Linear(3, 4 )
A : Dict = nn.BatchNormad(4 )
A : Optional[int] = nn.Linear(4, 5 )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase__ ) ) )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : Tuple = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCamelCase__, model.state_dict() )
A : Optional[int] = os.path.join(lowerCamelCase__, """index.json""" )
self.assertTrue(os.path.isfile(lowerCamelCase__ ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
A : List[str] = os.path.join(lowerCamelCase__, f'''{key}.dat''' )
self.assertTrue(os.path.isfile(lowerCamelCase__ ) )
# TODO: add tests on the fact weights are properly loaded
def _lowerCAmelCase ( self ):
A : int = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
A : int = torch.randn(2, 3, dtype=lowerCamelCase__ )
with TemporaryDirectory() as tmp_dir:
A : Union[str, Any] = offload_weight(lowerCamelCase__, """weight""", lowerCamelCase__, {} )
A : Dict = os.path.join(lowerCamelCase__, """weight.dat""" )
self.assertTrue(os.path.isfile(lowerCamelCase__ ) )
self.assertDictEqual(lowerCamelCase__, {"""weight""": {"""shape""": [2, 3], """dtype""": str(lowerCamelCase__ ).split(""".""" )[1]}} )
A : Optional[Any] = load_offloaded_weight(lowerCamelCase__, index["""weight"""] )
self.assertTrue(torch.equal(lowerCamelCase__, lowerCamelCase__ ) )
def _lowerCAmelCase ( self ):
A : Dict = ModelForTest()
A : Any = model.state_dict()
A : List[str] = {k: v for k, v in state_dict.items() if """linear2""" not in k}
A : Dict = {k: v for k, v in state_dict.items() if """linear2""" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCamelCase__, lowerCamelCase__ )
A : int = OffloadedWeightsLoader(state_dict=lowerCamelCase__, save_folder=lowerCamelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(lowerCamelCase__ ), sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowerCamelCase__, weight_map[key] ) )
A : Optional[Any] = {k: v for k, v in state_dict.items() if """weight""" in k}
A : Union[str, Any] = {k: v for k, v in state_dict.items() if """weight""" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCamelCase__, lowerCamelCase__ )
A : List[str] = OffloadedWeightsLoader(state_dict=lowerCamelCase__, save_folder=lowerCamelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(lowerCamelCase__ ), sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowerCamelCase__, weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCamelCase__, lowerCamelCase__ )
# Duplicates are removed
A : int = OffloadedWeightsLoader(state_dict=lowerCamelCase__, save_folder=lowerCamelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(lowerCamelCase__ ), sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowerCamelCase__, weight_map[key] ) )
def _lowerCAmelCase ( self ):
A : Tuple = {"""a.1""": 0, """a.10""": 1, """a.2""": 2}
A : List[Any] = extract_submodules_state_dict(lowerCamelCase__, ["""a.1""", """a.2"""] )
self.assertDictEqual(lowerCamelCase__, {"""a.1""": 0, """a.2""": 2} )
A : Any = {"""a.1.a""": 0, """a.10.a""": 1, """a.2.a""": 2}
A : List[Any] = extract_submodules_state_dict(lowerCamelCase__, ["""a.1""", """a.2"""] )
self.assertDictEqual(lowerCamelCase__, {"""a.1.a""": 0, """a.2.a""": 2} )
| 662 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
SCREAMING_SNAKE_CASE_:int = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
SCREAMING_SNAKE_CASE_:Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 662 | 1 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
SCREAMING_SNAKE_CASE_:Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
SCREAMING_SNAKE_CASE_:Union[str, Any] = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=8 ) -> Optional[Any]:
"""simple docstring"""
A : Tuple = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
A : Optional[int] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, ):
super().__init__()
self.register_modules(
unet=lowerCamelCase__, scheduler=lowerCamelCase__, movq=lowerCamelCase__, )
A : List[str] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
if latents is None:
A : int = randn_tensor(lowerCamelCase__, generator=lowerCamelCase__, device=lowerCamelCase__, dtype=lowerCamelCase__ )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
A : Dict = latents.to(lowerCamelCase__ )
A : Tuple = latents * scheduler.init_noise_sigma
return latents
def _lowerCAmelCase ( self, lowerCamelCase__=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
A : Any = torch.device(f'''cuda:{gpu_id}''' )
A : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""", """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
A : List[str] = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("""cpu""", silence_dtype_warnings=lowerCamelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
A : Tuple = None
for cpu_offloaded_model in [self.unet, self.movq]:
A , A : List[Any] = cpu_offload_with_hook(lowerCamelCase__, lowerCamelCase__, prev_module_hook=lowerCamelCase__ )
# We'll offload the last model manually.
A : Optional[int] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowerCAmelCase ( self ):
if not hasattr(self.unet, """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase__, """_hf_hook""" )
and hasattr(module._hf_hook, """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCamelCase__ )
def __call__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = 512, lowerCamelCase__ = 512, lowerCamelCase__ = 100, lowerCamelCase__ = 4.0, lowerCamelCase__ = 1, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = "pil", lowerCamelCase__ = True, ):
A : Optional[Any] = self._execution_device
A : Union[str, Any] = guidance_scale > 1.0
if isinstance(lowerCamelCase__, lowerCamelCase__ ):
A : List[Any] = torch.cat(lowerCamelCase__, dim=0 )
if isinstance(lowerCamelCase__, lowerCamelCase__ ):
A : Any = torch.cat(lowerCamelCase__, dim=0 )
if isinstance(lowerCamelCase__, lowerCamelCase__ ):
A : int = torch.cat(lowerCamelCase__, dim=0 )
A : Union[str, Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
A : str = image_embeds.repeat_interleave(lowerCamelCase__, dim=0 )
A : List[str] = negative_image_embeds.repeat_interleave(lowerCamelCase__, dim=0 )
A : int = hint.repeat_interleave(lowerCamelCase__, dim=0 )
A : Optional[Any] = torch.cat([negative_image_embeds, image_embeds], dim=0 ).to(dtype=self.unet.dtype, device=lowerCamelCase__ )
A : Optional[int] = torch.cat([hint, hint], dim=0 ).to(dtype=self.unet.dtype, device=lowerCamelCase__ )
self.scheduler.set_timesteps(lowerCamelCase__, device=lowerCamelCase__ )
A : Tuple = self.scheduler.timesteps
A : Any = self.movq.config.latent_channels
A , A : List[Any] = downscale_height_and_width(lowerCamelCase__, lowerCamelCase__, self.movq_scale_factor )
# create initial latent
A : Tuple = self.prepare_latents(
(batch_size, num_channels_latents, height, width), image_embeds.dtype, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, self.scheduler, )
for i, t in enumerate(self.progress_bar(lowerCamelCase__ ) ):
# expand the latents if we are doing classifier free guidance
A : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A : Union[str, Any] = {"""image_embeds""": image_embeds, """hint""": hint}
A : Tuple = self.unet(
sample=lowerCamelCase__, timestep=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, added_cond_kwargs=lowerCamelCase__, return_dict=lowerCamelCase__, )[0]
if do_classifier_free_guidance:
A , A : List[Any] = noise_pred.split(latents.shape[1], dim=1 )
A , A : List[Any] = noise_pred.chunk(2 )
A , A : Tuple = variance_pred.chunk(2 )
A : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
A : int = torch.cat([noise_pred, variance_pred_text], dim=1 )
if not (
hasattr(self.scheduler.config, """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
A , A : Any = noise_pred.split(latents.shape[1], dim=1 )
# compute the previous noisy sample x_t -> x_t-1
A : int = self.scheduler.step(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, generator=lowerCamelCase__, )[0]
# post-processing
A : Optional[int] = self.movq.decode(lowerCamelCase__, force_not_quantize=lowerCamelCase__ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
A : Tuple = image * 0.5 + 0.5
A : Union[str, Any] = image.clamp(0, 1 )
A : str = image.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
A : List[str] = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase__ )
| 662 |
def __UpperCamelCase ( _lowerCAmelCase = 1000 ) -> int:
"""simple docstring"""
A , A : str = 1, 1
A : List[Any] = []
for i in range(1 , n + 1 ):
A : Optional[int] = prev_numerator + 2 * prev_denominator
A : Any = prev_numerator + prev_denominator
if len(str(_lowerCAmelCase ) ) > len(str(_lowerCAmelCase ) ):
result.append(_lowerCAmelCase )
A : int = numerator
A : int = denominator
return len(_lowerCAmelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 662 | 1 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=99, lowerCamelCase__=13, lowerCamelCase__=7, lowerCamelCase__=9, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=False, lowerCamelCase__=32, lowerCamelCase__=5, lowerCamelCase__=4, lowerCamelCase__=37, lowerCamelCase__=8, lowerCamelCase__=0.1, lowerCamelCase__=0.002, lowerCamelCase__=1, lowerCamelCase__=0, lowerCamelCase__=0, lowerCamelCase__=None, lowerCamelCase__=None, ):
A : List[Any] = parent
A : List[Any] = batch_size
A : str = encoder_seq_length
A : Dict = decoder_seq_length
# For common tests
A : Tuple = self.decoder_seq_length
A : Optional[int] = is_training
A : List[str] = use_attention_mask
A : Tuple = use_labels
A : Union[str, Any] = vocab_size
A : List[Any] = hidden_size
A : Optional[int] = num_hidden_layers
A : List[str] = num_attention_heads
A : str = d_ff
A : int = relative_attention_num_buckets
A : Any = dropout_rate
A : int = initializer_factor
A : Union[str, Any] = eos_token_id
A : Optional[Any] = pad_token_id
A : int = decoder_start_token_id
A : Optional[int] = None
A : Any = decoder_layers
def _lowerCAmelCase ( self ):
return TaConfig.from_pretrained("""google/umt5-base""" )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=None, ):
if attention_mask is None:
A : Dict = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
A : Optional[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
A : Dict = torch.ones(config.num_hidden_layers, config.num_attention_heads, device=lowerCamelCase__ )
if decoder_head_mask is None:
A : int = torch.ones(config.num_decoder_layers, config.num_attention_heads, device=lowerCamelCase__ )
if cross_attn_head_mask is None:
A : Any = torch.ones(
config.num_decoder_layers, config.num_attention_heads, device=lowerCamelCase__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _lowerCAmelCase ( self ):
A : str = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size )
A : int = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
A : List[Any] = input_ids.clamp(self.pad_token_id + 1 )
A : Tuple = decoder_input_ids.clamp(self.pad_token_id + 1 )
A : Dict = self.get_config()
A : List[Any] = config.num_attention_heads
A : Dict = self.prepare_inputs_dict(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
return config, input_dict
def _lowerCAmelCase ( self ):
A , A : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCAmelCase ( self ):
return TaConfig(
vocab_size=166, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, )
def _lowerCAmelCase ( self ):
return TaConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, ):
A : List[Any] = UMTaModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Union[str, Any] = model(
input_ids=lowerCamelCase__, decoder_input_ids=lowerCamelCase__, attention_mask=lowerCamelCase__, decoder_attention_mask=lowerCamelCase__, )
A : int = model(input_ids=lowerCamelCase__, decoder_input_ids=lowerCamelCase__ )
A : Any = result.last_hidden_state
A : str = result.past_key_values
A : List[Any] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(lowerCamelCase__ ), config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ), 4 )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, ):
A : str = UMTaModel(config=lowerCamelCase__ ).get_decoder().to(lowerCamelCase__ ).eval()
# first forward pass
A : Tuple = model(lowerCamelCase__, use_cache=lowerCamelCase__ )
A : int = model(lowerCamelCase__ )
A : Optional[int] = model(lowerCamelCase__, use_cache=lowerCamelCase__ )
self.parent.assertTrue(len(lowerCamelCase__ ) == len(lowerCamelCase__ ) )
self.parent.assertTrue(len(lowerCamelCase__ ) == len(lowerCamelCase__ ) + 1 )
A , A : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A : Union[str, Any] = ids_tensor((self.batch_size, 1), config.vocab_size )
# append to next input_ids and
A : Any = torch.cat([input_ids, next_tokens], dim=-1 )
A : List[Any] = model(lowerCamelCase__ )["""last_hidden_state"""]
A : Optional[Any] = model(lowerCamelCase__, past_key_values=lowerCamelCase__ )["""last_hidden_state"""]
# select random slice
A : List[str] = ids_tensor((1,), output_from_past.shape[-1] ).item()
A : Any = output_from_no_past[:, -1, random_slice_idx].detach()
A : Optional[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-3 ) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, ):
A : Union[str, Any] = UMTaModel(config=lowerCamelCase__ ).to(lowerCamelCase__ ).half().eval()
A : str = model(**lowerCamelCase__ )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(lowerCamelCase__ ).any().item() )
@require_torch
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
__lowerCamelCase : List[str] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
__lowerCamelCase : List[str] = (
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
__lowerCamelCase : str = True
__lowerCamelCase : List[str] = False
__lowerCamelCase : Dict = False
__lowerCamelCase : List[str] = True
__lowerCamelCase : Optional[int] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
__lowerCamelCase : Tuple = [0.8, 0.9]
def _lowerCAmelCase ( self ):
A : Optional[int] = UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def _lowerCAmelCase ( self ):
A : List[Any] = self.model_tester.prepare_config_and_inputs()
A : Optional[int] = UMTaModel(config_and_inputs[0] ).to(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
lowerCamelCase__, (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]), f'''{tmpdirname}/t5_test.onnx''', export_params=lowerCamelCase__, opset_version=9, input_names=["""input_ids""", """decoder_input_ids"""], )
@unittest.skipIf(torch_device == """cpu""", """Cant do half precision""" )
def _lowerCAmelCase ( self ):
A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Tuple = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
A : Tuple = self.model_tester.prepare_config_and_inputs()
A : Dict = config_and_inputs[0]
A : Any = UMTaForConditionalGeneration(lowerCamelCase__ ).eval()
model.to(lowerCamelCase__ )
A : Union[str, Any] = {
"""head_mask""": torch.zeros(config.num_layers, config.num_heads, device=lowerCamelCase__ ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers, config.num_heads, device=lowerCamelCase__ ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers, config.num_heads, device=lowerCamelCase__ ),
}
for attn_name, (name, mask) in zip(lowerCamelCase__, head_masking.items() ):
A : Dict = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
A : List[Any] = torch.ones(
config.num_decoder_layers, config.num_heads, device=lowerCamelCase__ )
A : Optional[Any] = model.generate(
config_and_inputs[1]["""input_ids"""], num_beams=1, max_length=3, output_attentions=lowerCamelCase__, return_dict_in_generate=lowerCamelCase__, **lowerCamelCase__, )
# We check the state of decoder_attentions and cross_attentions just from the last step
A : Any = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ), 0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def _lowerCAmelCase ( self ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def _lowerCAmelCase ( self ):
A : List[str] = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""", return_dict=lowerCamelCase__ ).to(lowerCamelCase__ )
A : Any = AutoTokenizer.from_pretrained("""google/umt5-small""", use_fast=lowerCamelCase__, legacy=lowerCamelCase__ )
A : Optional[int] = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
A : int = tokenizer(lowerCamelCase__, return_tensors="""pt""", padding=lowerCamelCase__ ).input_ids
# fmt: off
A : Any = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(lowerCamelCase__, lowerCamelCase__ )
A : str = model.generate(input_ids.to(lowerCamelCase__ ) )
A : int = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
A : Optional[int] = tokenizer.batch_decode(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__, lowerCamelCase__ )
| 662 |
import re
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
if len(re.findall("""[ATCG]""" , _lowerCAmelCase ) ) != len(_lowerCAmelCase ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 1 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def __UpperCamelCase ( _lowerCAmelCase ) -> Dict:
"""simple docstring"""
A : int = fname.split(os.path.sep )[-1]
return re.search(R"""^(.*)_\d+\.jpg$""" , _lowerCAmelCase ).groups()[0]
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=None, lowerCamelCase__=None ):
A : Any = file_names
A : Tuple = image_transform
A : List[Any] = label_to_id
def __len__( self ):
return len(self.file_names )
def __getitem__( self, lowerCamelCase__ ):
A : int = self.file_names[idx]
A : str = PIL.Image.open(lowerCamelCase__ )
A : Optional[int] = raw_image.convert("""RGB""" )
if self.image_transform is not None:
A : int = self.image_transform(lowerCamelCase__ )
A : Union[str, Any] = extract_label(lowerCamelCase__ )
if self.label_to_id is not None:
A : str = self.label_to_id[label]
return {"image": image, "label": label}
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
if args.with_tracking:
A : Optional[int] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir )
else:
A : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A : List[Any] = config["""lr"""]
A : List[Any] = int(config["""num_epochs"""] )
A : str = int(config["""seed"""] )
A : Optional[int] = int(config["""batch_size"""] )
A : Optional[int] = config["""image_size"""]
if not isinstance(_lowerCAmelCase , (list, tuple) ):
A : Optional[int] = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , """isdigit""" ):
if args.checkpointing_steps == "epoch":
A : Any = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
A : Optional[Any] = int(args.checkpointing_steps )
else:
raise ValueError(
f'''Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.''' )
else:
A : Dict = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
A : Optional[int] = os.path.split(_lowerCAmelCase )[-1].split(""".""" )[0]
accelerator.init_trackers(_lowerCAmelCase , _lowerCAmelCase )
# Grab all the image filenames
A : Union[str, Any] = [os.path.join(args.data_dir , _lowerCAmelCase ) for fname in os.listdir(args.data_dir ) if fname.endswith(""".jpg""" )]
# Build the label correspondences
A : Tuple = [extract_label(_lowerCAmelCase ) for fname in file_names]
A : Union[str, Any] = list(set(_lowerCAmelCase ) )
id_to_label.sort()
A : Dict = {lbl: i for i, lbl in enumerate(_lowerCAmelCase )}
# Set the seed before splitting the data.
np.random.seed(_lowerCAmelCase )
torch.manual_seed(_lowerCAmelCase )
torch.cuda.manual_seed_all(_lowerCAmelCase )
# Split our filenames between train and validation
A : int = np.random.permutation(len(_lowerCAmelCase ) )
A : Optional[Any] = int(0.8 * len(_lowerCAmelCase ) )
A : Optional[Any] = random_perm[:cut]
A : Union[str, Any] = random_perm[cut:]
# For training we use a simple RandomResizedCrop
A : Tuple = Compose([RandomResizedCrop(_lowerCAmelCase , scale=(0.5, 1.0) ), ToTensor()] )
A : str = PetsDataset(
[file_names[i] for i in train_split] , image_transform=_lowerCAmelCase , label_to_id=_lowerCAmelCase )
# For evaluation, we use a deterministic Resize
A : Optional[int] = Compose([Resize(_lowerCAmelCase ), ToTensor()] )
A : List[Any] = PetsDataset([file_names[i] for i in eval_split] , image_transform=_lowerCAmelCase , label_to_id=_lowerCAmelCase )
# Instantiate dataloaders.
A : int = DataLoader(_lowerCAmelCase , shuffle=_lowerCAmelCase , batch_size=_lowerCAmelCase , num_workers=4 )
A : Optional[int] = DataLoader(_lowerCAmelCase , shuffle=_lowerCAmelCase , batch_size=_lowerCAmelCase , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A : Any = create_model("""resnet50d""" , pretrained=_lowerCAmelCase , num_classes=len(_lowerCAmelCase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A : Tuple = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
A : List[str] = False
for param in model.get_classifier().parameters():
A : str = True
# We normalize the batches of images to be a bit faster.
A : Optional[int] = torch.tensor(model.default_cfg["""mean"""] )[None, :, None, None].to(accelerator.device )
A : List[Any] = torch.tensor(model.default_cfg["""std"""] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
A : int = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
A : str = OneCycleLR(optimizer=_lowerCAmelCase , max_lr=_lowerCAmelCase , epochs=_lowerCAmelCase , steps_per_epoch=len(_lowerCAmelCase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A , A , A , A , A : str = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
A : Any = 0
# We also need to keep track of the starting epoch so files are named properly
A : List[Any] = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f'''Resumed from checkpoint: {args.resume_from_checkpoint}''' )
accelerator.load_state(args.resume_from_checkpoint )
A : Tuple = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
A : int = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
A : int = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
A : Dict = os.path.splitext(_lowerCAmelCase )[0]
if "epoch" in training_difference:
A : Optional[int] = int(training_difference.replace("""epoch_""" , """""" ) ) + 1
A : str = None
else:
A : int = int(training_difference.replace("""step_""" , """""" ) )
A : Dict = resume_step // len(_lowerCAmelCase )
resume_step -= starting_epoch * len(_lowerCAmelCase )
# Now we train the model
for epoch in range(_lowerCAmelCase , _lowerCAmelCase ):
model.train()
if args.with_tracking:
A : str = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
A : Union[str, Any] = accelerator.skip_first_batches(_lowerCAmelCase , _lowerCAmelCase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
A : Union[str, Any] = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
A : Optional[int] = {k: v.to(accelerator.device ) for k, v in batch.items()}
A : Union[str, Any] = (batch["""image"""] - mean) / std
A : Optional[Any] = model(_lowerCAmelCase )
A : Union[str, Any] = torch.nn.functional.cross_entropy(_lowerCAmelCase , batch["""label"""] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(_lowerCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A : Tuple = f'''step_{overall_step}'''
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
A : int = os.path.join(args.output_dir , _lowerCAmelCase )
accelerator.save_state(_lowerCAmelCase )
model.eval()
A : str = 0
A : Any = 0
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
A : Optional[Any] = {k: v.to(accelerator.device ) for k, v in batch.items()}
A : Optional[int] = (batch["""image"""] - mean) / std
with torch.no_grad():
A : List[str] = model(_lowerCAmelCase )
A : Dict = outputs.argmax(dim=-1 )
A , A : str = accelerator.gather_for_metrics((predictions, batch["""label"""]) )
A : List[str] = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
A : Optional[Any] = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}: {100 * eval_metric:.2f}''' )
if args.with_tracking:
accelerator.log(
{
"""accuracy""": 100 * eval_metric,
"""train_loss""": total_loss.item() / len(_lowerCAmelCase ),
"""epoch""": epoch,
} , step=_lowerCAmelCase , )
if checkpointing_steps == "epoch":
A : List[Any] = f'''epoch_{epoch}'''
if args.output_dir is not None:
A : Optional[Any] = os.path.join(args.output_dir , _lowerCAmelCase )
accelerator.save_state(_lowerCAmelCase )
if args.with_tracking:
accelerator.end_training()
def __UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
A : List[Any] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument("""--data_dir""" , required=_lowerCAmelCase , help="""The data folder on disk.""" )
parser.add_argument("""--fp16""" , action="""store_true""" , help="""If passed, will use FP16 training.""" )
parser.add_argument(
"""--mixed_precision""" , type=_lowerCAmelCase , default=_lowerCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--checkpointing_steps""" , type=_lowerCAmelCase , default=_lowerCAmelCase , help="""Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.""" , )
parser.add_argument(
"""--output_dir""" , type=_lowerCAmelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=_lowerCAmelCase , default=_lowerCAmelCase , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , )
parser.add_argument(
"""--project_dir""" , type=_lowerCAmelCase , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , )
A : int = parser.parse_args()
A : Any = {"""lr""": 3e-2, """num_epochs""": 3, """seed""": 42, """batch_size""": 64, """image_size""": 224}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 662 |
from __future__ import annotations
SCREAMING_SNAKE_CASE_:Tuple = """#"""
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self ):
A : dict = {}
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : List[Any] = self._trie
for char in text:
if char not in trie:
A : str = {}
A : str = trie[char]
A : Optional[int] = True
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Dict = self._trie
for char in prefix:
if char in trie:
A : Optional[Any] = trie[char]
else:
return []
return self._elements(lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : int = []
for c, v in d.items():
A : List[Any] = [""" """] if c == END else [(c + s) for s in self._elements(lowerCamelCase__ )]
result.extend(lowerCamelCase__ )
return tuple(lowerCamelCase__ )
SCREAMING_SNAKE_CASE_:Any = Trie()
SCREAMING_SNAKE_CASE_:Tuple = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def __UpperCamelCase ( _lowerCAmelCase ) -> tuple:
"""simple docstring"""
A : List[str] = trie.find_word(_lowerCAmelCase )
return tuple(string + word for word in suffixes )
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
print(autocomplete_using_trie("""de""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 662 | 1 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : List[str] = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
A , A : Dict = get_aligned_output_features_output_indices(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
self.assertEqual(lowerCamelCase__, ["""c"""] )
self.assertEqual(lowerCamelCase__, [2] )
# Out indices set to match out features
A , A : List[Any] = get_aligned_output_features_output_indices(["""a""", """c"""], lowerCamelCase__, lowerCamelCase__ )
self.assertEqual(lowerCamelCase__, ["""a""", """c"""] )
self.assertEqual(lowerCamelCase__, [0, 2] )
# Out features set to match out indices
A , A : Union[str, Any] = get_aligned_output_features_output_indices(lowerCamelCase__, [0, 2], lowerCamelCase__ )
self.assertEqual(lowerCamelCase__, ["""a""", """c"""] )
self.assertEqual(lowerCamelCase__, [0, 2] )
# Out features selected from negative indices
A , A : List[str] = get_aligned_output_features_output_indices(lowerCamelCase__, [-3, -1], lowerCamelCase__ )
self.assertEqual(lowerCamelCase__, ["""a""", """c"""] )
self.assertEqual(lowerCamelCase__, [-3, -1] )
def _lowerCAmelCase ( self ):
# Stage names must be set
with self.assertRaises(lowerCamelCase__ ):
verify_out_features_out_indices(["""a""", """b"""], (0, 1), lowerCamelCase__ )
# Out features must be a list
with self.assertRaises(lowerCamelCase__ ):
verify_out_features_out_indices(("""a""", """b"""), (0, 1), ["""a""", """b"""] )
# Out features must be a subset of stage names
with self.assertRaises(lowerCamelCase__ ):
verify_out_features_out_indices(["""a""", """b"""], (0, 1), ["""a"""] )
# Out indices must be a list or tuple
with self.assertRaises(lowerCamelCase__ ):
verify_out_features_out_indices(lowerCamelCase__, 0, ["""a""", """b"""] )
# Out indices must be a subset of stage names
with self.assertRaises(lowerCamelCase__ ):
verify_out_features_out_indices(lowerCamelCase__, (0, 1), ["""a"""] )
# Out features and out indices must be the same length
with self.assertRaises(lowerCamelCase__ ):
verify_out_features_out_indices(["""a""", """b"""], (0,), ["""a""", """b""", """c"""] )
# Out features should match out indices
with self.assertRaises(lowerCamelCase__ ):
verify_out_features_out_indices(["""a""", """b"""], (0, 2), ["""a""", """b""", """c"""] )
# Out features and out indices should be in order
with self.assertRaises(lowerCamelCase__ ):
verify_out_features_out_indices(["""b""", """a"""], (0, 1), ["""a""", """b"""] )
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""], (0, 1, -1), ["""a""", """b""", """c""", """d"""] )
def _lowerCAmelCase ( self ):
A : Dict = BackboneMixin()
A : Optional[Any] = ["""a""", """b""", """c"""]
A : Optional[Any] = ["""a""", """c"""]
A : List[str] = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features, ["""a""", """c"""] )
self.assertEqual(backbone.out_indices, [0, 2] )
# Check out features and indices are updated correctly
A : Optional[int] = ["""a""", """b"""]
self.assertEqual(backbone.out_features, ["""a""", """b"""] )
self.assertEqual(backbone.out_indices, [0, 1] )
A : List[str] = [-3, -1]
self.assertEqual(backbone.out_features, ["""a""", """c"""] )
self.assertEqual(backbone.out_indices, [-3, -1] )
| 662 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
SCREAMING_SNAKE_CASE_:Optional[int] = logging.getLogger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[int] = bnb_quantization_config.load_in_abit
A : int = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
A : Any = []
# custom device map
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(device_map.keys() ) > 1:
A : Optional[int] = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
A : int = get_keys_to_not_convert(_lowerCAmelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_lowerCAmelCase )
A : Optional[Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
A : Dict = []
A : Tuple = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_lowerCAmelCase )
# compatibility with peft
A : Union[str, Any] = load_in_abit
A : Tuple = load_in_abit
A : List[str] = get_parameter_device(_lowerCAmelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
A : Optional[int] = replace_with_bnb_layers(_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase )
# convert param to the right dtype
A : Tuple = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
A : Optional[Any] = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
A : int = getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_lowerCAmelCase ):
param.to(_lowerCAmelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
A : str = replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase )
A : Optional[Any] = get_quantized_model_device_map(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , max_memory=_lowerCAmelCase , no_split_module_classes=_lowerCAmelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
A : Tuple = True
A : int = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=_lowerCAmelCase , offload_state_dict=_lowerCAmelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_lowerCAmelCase , device_map=_lowerCAmelCase , offload_dir=_lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[int]:
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
A : Optional[int] = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
A : Tuple = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
A : Any = {}
A : List[str] = special_dtypes
A : Any = no_split_module_classes
A : Union[str, Any] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
A : Tuple = get_balanced_memory(
_lowerCAmelCase , low_zero=(device_map == """balanced_low_0""") , max_memory=_lowerCAmelCase , **_lowerCAmelCase , )
A : int = max_memory
A : Any = infer_auto_device_map(_lowerCAmelCase , **_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# check if don't have any quantized module on the cpu
A : Optional[Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
A : Optional[int] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]:
"""simple docstring"""
if modules_to_not_convert is None:
A : Optional[Any] = []
A , A : Dict = _replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , ) -> int:
"""simple docstring"""
A : Optional[int] = False
for name, module in model.named_children():
if current_key_name is None:
A : int = []
current_key_name.append(_lowerCAmelCase )
if isinstance(_lowerCAmelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
A : Dict = """.""".join(_lowerCAmelCase )
A : Optional[Any] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
A : Dict = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
A : Optional[Any] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_lowerCAmelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
A : Dict = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
A : Any = module.weight.data
if module.bias is not None:
A : Any = module.bias.data
bnb_module.requires_grad_(_lowerCAmelCase )
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : Dict = True
if len(list(module.children() ) ) > 0:
A , A : Dict = _replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : Union[str, Any] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __UpperCamelCase ( _lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
with init_empty_weights():
A : Tuple = deepcopy(_lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
A : Optional[int] = find_tied_parameters(_lowerCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A : int = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
A : Optional[int] = sum(_lowerCAmelCase , [] )
A : Tuple = len(_lowerCAmelCase ) > 0
# Check if it is a base model
A : List[str] = False
if hasattr(_lowerCAmelCase , """base_model_prefix""" ):
A : Optional[Any] = not hasattr(_lowerCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
A : str = list(model.named_children() )
A : Tuple = [list_modules[-1][0]]
# add last module together with tied weights
A : int = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
A : Optional[Any] = list(set(_lowerCAmelCase ) ) + list(_lowerCAmelCase )
# remove ".weight" from the keys
A : Union[str, Any] = [""".weight""", """.bias"""]
A : Optional[int] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
A : List[str] = name.replace(_lowerCAmelCase , """""" )
filtered_module_names.append(_lowerCAmelCase )
return filtered_module_names
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
for m in model.modules():
if isinstance(_lowerCAmelCase , bnb.nn.Linearabit ):
return True
return False
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
return next(parameter.parameters() ).device
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , 0 , dtype=_lowerCAmelCase , value=_lowerCAmelCase )
A : Tuple = param_name
A : Union[str, Any] = model
if "." in tensor_name:
A : int = tensor_name.split(""".""" )
for split in splits[:-1]:
A : Union[str, Any] = getattr(_lowerCAmelCase , _lowerCAmelCase )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
A : Optional[Any] = new_module
A : List[str] = splits[-1]
# offload weights
A : Optional[int] = False
offload_weight(module._parameters[tensor_name] , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _lowerCAmelCase , index=_lowerCAmelCase , )
else:
offload_weight(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase )
offload_weight(_lowerCAmelCase , param_name.replace("""weight""" , """SCB""" ) , _lowerCAmelCase , index=_lowerCAmelCase )
set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , """meta""" , dtype=_lowerCAmelCase , value=torch.empty(*param.size() ) )
| 662 | 1 |
def __UpperCamelCase ( _lowerCAmelCase ) -> float:
"""simple docstring"""
if edge <= 0 or not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError("""Length must be a positive.""" )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def __UpperCamelCase ( _lowerCAmelCase ) -> float:
"""simple docstring"""
if edge <= 0 or not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError("""Length must be a positive.""" )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __UpperCamelCase ( ) -> Dict:
"""simple docstring"""
A : Tuple = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
A : Dict = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(_lowerCAmelCase )
DownloadCommand.register_subcommand(_lowerCAmelCase )
EnvironmentCommand.register_subcommand(_lowerCAmelCase )
RunCommand.register_subcommand(_lowerCAmelCase )
ServeCommand.register_subcommand(_lowerCAmelCase )
UserCommands.register_subcommand(_lowerCAmelCase )
AddNewModelCommand.register_subcommand(_lowerCAmelCase )
AddNewModelLikeCommand.register_subcommand(_lowerCAmelCase )
LfsCommands.register_subcommand(_lowerCAmelCase )
PTtoTFCommand.register_subcommand(_lowerCAmelCase )
# Let's go
A : Tuple = parser.parse_args()
if not hasattr(_lowerCAmelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
A : Any = args.func(_lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 662 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_:str = {
"""configuration_blenderbot_small""": [
"""BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotSmallConfig""",
"""BlenderbotSmallOnnxConfig""",
],
"""tokenization_blenderbot_small""": ["""BlenderbotSmallTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Optional[int] = ["""BlenderbotSmallTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:int = [
"""BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotSmallForCausalLM""",
"""BlenderbotSmallForConditionalGeneration""",
"""BlenderbotSmallModel""",
"""BlenderbotSmallPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Optional[Any] = [
"""TFBlenderbotSmallForConditionalGeneration""",
"""TFBlenderbotSmallModel""",
"""TFBlenderbotSmallPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:List[Any] = [
"""FlaxBlenderbotSmallForConditionalGeneration""",
"""FlaxBlenderbotSmallModel""",
"""FlaxBlenderbotSmallPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 662 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_:int = {
"""configuration_blenderbot""": [
"""BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotConfig""",
"""BlenderbotOnnxConfig""",
],
"""tokenization_blenderbot""": ["""BlenderbotTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Union[str, Any] = ["""BlenderbotTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Optional[int] = [
"""BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotForCausalLM""",
"""BlenderbotForConditionalGeneration""",
"""BlenderbotModel""",
"""BlenderbotPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Union[str, Any] = [
"""TFBlenderbotForConditionalGeneration""",
"""TFBlenderbotModel""",
"""TFBlenderbotPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Any = [
"""FlaxBlenderbotForConditionalGeneration""",
"""FlaxBlenderbotModel""",
"""FlaxBlenderbotPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 662 | 1 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
SCREAMING_SNAKE_CASE_:int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Dict = TypeVar("""DatasetType""", Dataset, IterableDataset)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = "first_exhausted" , ) -> DatasetType:
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("""Unable to interleave an empty list of datasets.""" )
for i, dataset in enumerate(_lowerCAmelCase ):
if not isinstance(_lowerCAmelCase , (Dataset, IterableDataset) ):
if isinstance(_lowerCAmelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
"""is an empty dataset dictionary.""" )
raise ValueError(
f'''Dataset at position {i} has at least one split: {list(_lowerCAmelCase )}\n'''
f'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_lowerCAmelCase ) )}\']''' )
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_lowerCAmelCase ).__name__}.''' )
if i == 0:
A , A : List[Any] = (
(Dataset, IterableDataset) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError(
f'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f'''{stopping_strategy} is not supported. Please enter a valid stopping_strategy.''' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , info=_lowerCAmelCase , split=_lowerCAmelCase , stopping_strategy=_lowerCAmelCase )
else:
return _interleave_iterable_datasets(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , info=_lowerCAmelCase , split=_lowerCAmelCase , stopping_strategy=_lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 0 , ) -> DatasetType:
"""simple docstring"""
if not dsets:
raise ValueError("""Unable to concatenate an empty list of datasets.""" )
for i, dataset in enumerate(_lowerCAmelCase ):
if not isinstance(_lowerCAmelCase , (Dataset, IterableDataset) ):
if isinstance(_lowerCAmelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
"""is an empty dataset dictionary.""" )
raise ValueError(
f'''Dataset at position {i} has at least one split: {list(_lowerCAmelCase )}\n'''
f'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_lowerCAmelCase ) )}\']''' )
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_lowerCAmelCase ).__name__}.''' )
if i == 0:
A , A : Optional[int] = (
(Dataset, IterableDataset) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError(
f'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_lowerCAmelCase , info=_lowerCAmelCase , split=_lowerCAmelCase , axis=_lowerCAmelCase )
else:
return _concatenate_iterable_datasets(_lowerCAmelCase , info=_lowerCAmelCase , split=_lowerCAmelCase , axis=_lowerCAmelCase )
| 662 |
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> list[int]:
"""simple docstring"""
A : Optional[int] = int(_lowerCAmelCase )
# Initialize Result
A : int = []
# Traverse through all denomination
for denomination in reversed(_lowerCAmelCase ):
# Find denominations
while int(_lowerCAmelCase ) >= int(_lowerCAmelCase ):
total_value -= int(_lowerCAmelCase )
answer.append(_lowerCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:List[Any] = []
SCREAMING_SNAKE_CASE_:Dict = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
SCREAMING_SNAKE_CASE_:Optional[int] = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
SCREAMING_SNAKE_CASE_:Tuple = [1, 2, 5, 10, 20, 50, 100, 500, 2_000]
SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F"""Following is minimal change for {value}: """)
SCREAMING_SNAKE_CASE_:str = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 662 | 1 |
from __future__ import annotations
from PIL import Image
# Define glider example
SCREAMING_SNAKE_CASE_:int = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
SCREAMING_SNAKE_CASE_:str = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def __UpperCamelCase ( _lowerCAmelCase ) -> list[list[int]]:
"""simple docstring"""
A : Any = []
for i in range(len(_lowerCAmelCase ) ):
A : List[str] = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
A : Optional[Any] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(_lowerCAmelCase ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(_lowerCAmelCase ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(_lowerCAmelCase ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
A : Dict = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(_lowerCAmelCase )
return next_generation
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> list[Image.Image]:
"""simple docstring"""
A : List[Any] = []
for _ in range(_lowerCAmelCase ):
# Create output image
A : str = Image.new("""RGB""" , (len(cells[0] ), len(_lowerCAmelCase )) )
A : List[Any] = img.load()
# Save cells to image
for x in range(len(_lowerCAmelCase ) ):
for y in range(len(cells[0] ) ):
A : List[str] = 255 - cells[y][x] * 255
A : Union[str, Any] = (colour, colour, colour)
# Save image
images.append(_lowerCAmelCase )
A : Optional[int] = new_generation(_lowerCAmelCase )
return images
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[int] = generate_images(GLIDER, 16)
images[0].save("""out.gif""", save_all=True, append_images=images[1:])
| 662 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_:Union[str, Any] = """tiny-wmt19-en-ru"""
# Build
# borrowed from a test
SCREAMING_SNAKE_CASE_:Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
SCREAMING_SNAKE_CASE_:Any = dict(zip(vocab, range(len(vocab))))
SCREAMING_SNAKE_CASE_:Dict = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_:List[Any] = Path(tmpdirname)
SCREAMING_SNAKE_CASE_:str = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""]
SCREAMING_SNAKE_CASE_:Union[str, Any] = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""]
SCREAMING_SNAKE_CASE_:Any = build_dir / VOCAB_FILES_NAMES["""merges_file"""]
with open(src_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, """w""") as fp:
fp.write("""\n""".join(merges))
SCREAMING_SNAKE_CASE_:Optional[int] = FSMTTokenizer(
langs=["""en""", """ru"""],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
SCREAMING_SNAKE_CASE_:Optional[int] = FSMTConfig(
langs=["""ru""", """en"""],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
SCREAMING_SNAKE_CASE_:Optional[Any] = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
SCREAMING_SNAKE_CASE_:Tuple = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
SCREAMING_SNAKE_CASE_:str = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 662 | 1 |
def __UpperCamelCase ( _lowerCAmelCase = 50 ) -> int:
"""simple docstring"""
A : List[Any] = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 662 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:int = """Hello, World!"""
SCREAMING_SNAKE_CASE_:List[Any] = """en_XX"""
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Optional[int] = Path("""data_bin""" )
A : Optional[Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowerCAmelCase ).parent ) , checkpoint_file=Path(_lowerCAmelCase ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(_lowerCAmelCase ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(_lowerCAmelCase ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(_lowerCAmelCase )
A : Any = xmod.model.encoder.sentence_encoder
A : Optional[int] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , _lowerCAmelCase )
A : int = XmodForSequenceClassification(_lowerCAmelCase ) if classification_head else XmodForMaskedLM(_lowerCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
A : Any = xmod_sent_encoder.embed_tokens.weight
A : int = xmod_sent_encoder.embed_positions.weight
A : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
A : Dict = xmod_sent_encoder.layernorm_embedding.weight
A : int = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
A : str = model.roberta.encoder.layer[i]
A : Tuple = xmod_sent_encoder.layers[i]
# self attention
A : Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
A : List[str] = xmod_layer.self_attn.q_proj.weight
A : Optional[int] = xmod_layer.self_attn.q_proj.bias
A : List[Any] = xmod_layer.self_attn.k_proj.weight
A : Union[str, Any] = xmod_layer.self_attn.k_proj.bias
A : Optional[int] = xmod_layer.self_attn.v_proj.weight
A : Dict = xmod_layer.self_attn.v_proj.bias
# self-attention output
A : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
A : Optional[Any] = xmod_layer.self_attn.out_proj.weight
A : Dict = xmod_layer.self_attn.out_proj.bias
A : Union[str, Any] = xmod_layer.self_attn_layer_norm.weight
A : str = xmod_layer.self_attn_layer_norm.bias
# intermediate
A : str = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
A : Optional[int] = xmod_layer.fca.weight
A : Optional[int] = xmod_layer.fca.bias
# output
A : Dict = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
A : Union[str, Any] = xmod_layer.fca.weight
A : int = xmod_layer.fca.bias
A : List[str] = xmod_layer.final_layer_norm.weight
A : Optional[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
A : str = xmod_layer.adapter_layer_norm.weight
A : str = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
A : Optional[int] = bert_output.adapter_modules[lang_code]
A : int = xmod_layer.adapter_modules[lang_code]
A : Optional[Any] = from_adapter.fca.weight
A : Optional[Any] = from_adapter.fca.bias
A : List[str] = from_adapter.fca.weight
A : Any = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
A : Dict = xmod_sent_encoder.layer_norm.weight
A : int = xmod_sent_encoder.layer_norm.bias
if classification_head:
A : int = xmod.model.classification_heads["""mnli"""].dense.weight
A : Optional[Any] = xmod.model.classification_heads["""mnli"""].dense.bias
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
A : Any = xmod.model.encoder.lm_head.dense.weight
A : Tuple = xmod.model.encoder.lm_head.dense.bias
A : Any = xmod.model.encoder.lm_head.layer_norm.weight
A : List[str] = xmod.model.encoder.lm_head.layer_norm.bias
A : Union[str, Any] = xmod.model.encoder.lm_head.weight
A : Tuple = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
A : Optional[int] = xmod.encode(_lowerCAmelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowerCAmelCase )
A : List[str] = model(_lowerCAmelCase )[0]
if classification_head:
A : Dict = xmod.model.classification_heads["""mnli"""](xmod.extract_features(_lowerCAmelCase ) )
else:
A : Optional[Any] = xmod.model(_lowerCAmelCase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
A : str = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
A : Optional[Any] = torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(_lowerCAmelCase ).mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 662 | 1 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_:Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
SCREAMING_SNAKE_CASE_:str = 250_004
SCREAMING_SNAKE_CASE_:Tuple = 250_020
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = MBartaaTokenizer
__lowerCamelCase : Dict = MBartaaTokenizerFast
__lowerCamelCase : Optional[int] = True
__lowerCamelCase : Dict = True
def _lowerCAmelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
A : Union[str, Any] = MBartaaTokenizer(lowerCamelCase__, src_lang="""en_XX""", tgt_lang="""ro_RO""", keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self ):
A : List[Any] = """<s>"""
A : Any = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ), lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ), lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], """<s>""" )
self.assertEqual(vocab_keys[1], """<pad>""" )
self.assertEqual(vocab_keys[-1], """<mask>""" )
self.assertEqual(len(lowerCamelCase__ ), 1054 )
def _lowerCAmelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size, 1054 )
def _lowerCAmelCase ( self ):
A : str = MBartaaTokenizer(lowerCamelCase__, src_lang="""en_XX""", tgt_lang="""ro_RO""", keep_accents=lowerCamelCase__ )
A : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCamelCase__, ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
A : int = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCamelCase__, [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""], )
A : str = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
], )
A : Optional[int] = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__, [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""], )
@slow
def _lowerCAmelCase ( self ):
# fmt: off
A : int = {"""input_ids""": [[25_0004, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [25_0004, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_0004, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__, model_name="""facebook/mbart-large-50""", revision="""d3913889c59cd5c9e456b269c376325eabad57e2""", )
def _lowerCAmelCase ( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
A : int = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart50""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A : int = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__, **lowerCamelCase__ )
A : Tuple = self.tokenizer_class.from_pretrained(lowerCamelCase__, **lowerCamelCase__ )
A : List[str] = tempfile.mkdtemp()
A : Tuple = tokenizer_r.save_pretrained(lowerCamelCase__ )
A : Union[str, Any] = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
A : Optional[int] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(lowerCamelCase__, lowerCamelCase__ )
# Checks everything loads correctly in the same way
A : Optional[Any] = tokenizer_r.from_pretrained(lowerCamelCase__ )
A : List[str] = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__, lowerCamelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase__ )
# Save tokenizer rust, legacy_format=True
A : Any = tempfile.mkdtemp()
A : Any = tokenizer_r.save_pretrained(lowerCamelCase__, legacy_format=lowerCamelCase__ )
A : List[Any] = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase__, lowerCamelCase__ )
# Checks everything loads correctly in the same way
A : Dict = tokenizer_r.from_pretrained(lowerCamelCase__ )
A : Union[str, Any] = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__, lowerCamelCase__ ) )
shutil.rmtree(lowerCamelCase__ )
# Save tokenizer rust, legacy_format=False
A : Dict = tempfile.mkdtemp()
A : str = tokenizer_r.save_pretrained(lowerCamelCase__, legacy_format=lowerCamelCase__ )
A : List[Any] = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A : Dict = tokenizer_r.from_pretrained(lowerCamelCase__ )
A : List[Any] = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__, lowerCamelCase__ ) )
shutil.rmtree(lowerCamelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Dict = "facebook/mbart-large-50-one-to-many-mmt"
__lowerCamelCase : List[Any] = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
__lowerCamelCase : Optional[Any] = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
__lowerCamelCase : Union[str, Any] = [EN_CODE, 8274, 12_7873, 2_5916, 7, 8622, 2071, 438, 6_7485, 53, 18_7895, 23, 5_1712, 2]
@classmethod
def _lowerCAmelCase ( cls ):
A : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name, src_lang="""en_XX""", tgt_lang="""ro_RO""" )
A : Union[str, Any] = 1
return cls
def _lowerCAmelCase ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""], 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""], 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""], 25_0020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""mr_IN"""], 25_0038 )
def _lowerCAmelCase ( self ):
A : List[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
self.assertIn(lowerCamelCase__, self.tokenizer.all_special_ids )
A : Union[str, Any] = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
A : Union[str, Any] = self.tokenizer.decode(lowerCamelCase__, skip_special_tokens=lowerCamelCase__ )
A : List[str] = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__, lowerCamelCase__ )
self.assertNotIn(self.tokenizer.eos_token, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[Any] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0], lowerCamelCase__ )
A : str = 10
A : Optional[Any] = self.tokenizer(lowerCamelCase__, max_length=lowerCamelCase__, truncation=lowerCamelCase__ ).input_ids[0]
self.assertEqual(ids[0], lowerCamelCase__ )
self.assertEqual(ids[-1], 2 )
self.assertEqual(len(lowerCamelCase__ ), lowerCamelCase__ )
def _lowerCAmelCase ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ), [25_0053, 25_0001] )
def _lowerCAmelCase ( self ):
A : List[Any] = tempfile.mkdtemp()
A : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase__ )
A : Tuple = MBartaaTokenizer.from_pretrained(lowerCamelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, lowerCamelCase__ )
@require_torch
def _lowerCAmelCase ( self ):
A : List[str] = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=lowerCamelCase__, return_tensors="""pt""" )
A : str = shift_tokens_right(batch["""labels"""], self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def _lowerCAmelCase ( self ):
A : List[Any] = self.tokenizer(
self.src_text, text_target=self.tgt_text, padding=lowerCamelCase__, truncation=lowerCamelCase__, max_length=len(self.expected_src_tokens ), return_tensors="""pt""", )
A : List[str] = shift_tokens_right(batch["""labels"""], self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCamelCase__, lowerCamelCase__ )
self.assertEqual((2, 14), batch.input_ids.shape )
self.assertEqual((2, 14), batch.attention_mask.shape )
A : Optional[int] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, lowerCamelCase__ )
self.assertEqual(2, batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] )
def _lowerCAmelCase ( self ):
A : List[Any] = self.tokenizer(self.src_text, padding=lowerCamelCase__, truncation=lowerCamelCase__, max_length=3, return_tensors="""pt""" )
A : int = self.tokenizer(
text_target=self.tgt_text, padding=lowerCamelCase__, truncation=lowerCamelCase__, max_length=10, return_tensors="""pt""" )
A : int = targets["""input_ids"""]
A : Dict = shift_tokens_right(lowerCamelCase__, self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.decoder_input_ids.shape[1], 10 )
@require_torch
def _lowerCAmelCase ( self ):
A : Optional[Any] = self.tokenizer._build_translation_inputs(
"""A test""", return_tensors="""pt""", src_lang="""en_XX""", tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(lowerCamelCase__ ), {
# en_XX, A, test, EOS
"""input_ids""": [[25_0004, 62, 3034, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 25_0001,
}, )
| 662 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : Any = tempfile.mkdtemp()
A : List[str] = BlipImageProcessor()
A : Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
A : str = BlipProcessor(lowerCamelCase__, lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ ).tokenizer
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ ).image_processor
def _lowerCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
A : Any = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
A : Any = [Image.fromarray(np.moveaxis(lowerCamelCase__, 0, -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ):
A : int = BlipProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : Any = self.get_tokenizer(bos_token="""(BOS)""", eos_token="""(EOS)""" )
A : Union[str, Any] = self.get_image_processor(do_normalize=lowerCamelCase__, padding_value=1.0 )
A : Dict = BlipProcessor.from_pretrained(
self.tmpdirname, bos_token="""(BOS)""", eos_token="""(EOS)""", do_normalize=lowerCamelCase__, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.get_image_processor()
A : str = self.get_tokenizer()
A : Dict = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Any = self.prepare_image_inputs()
A : int = image_processor(lowerCamelCase__, return_tensors="""np""" )
A : Optional[Any] = processor(images=lowerCamelCase__, return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 )
def _lowerCAmelCase ( self ):
A : List[str] = self.get_image_processor()
A : int = self.get_tokenizer()
A : str = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[Any] = """lower newer"""
A : List[Any] = processor(text=lowerCamelCase__ )
A : str = tokenizer(lowerCamelCase__, return_token_type_ids=lowerCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def _lowerCAmelCase ( self ):
A : List[Any] = self.get_image_processor()
A : Dict = self.get_tokenizer()
A : Union[str, Any] = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[int] = """lower newer"""
A : Union[str, Any] = self.prepare_image_inputs()
A : str = processor(text=lowerCamelCase__, images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ), ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _lowerCAmelCase ( self ):
A : List[Any] = self.get_image_processor()
A : Dict = self.get_tokenizer()
A : Dict = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A : Optional[int] = processor.batch_decode(lowerCamelCase__ )
A : Dict = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.get_image_processor()
A : int = self.get_tokenizer()
A : Optional[int] = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[int] = """lower newer"""
A : List[str] = self.prepare_image_inputs()
A : Optional[int] = processor(text=lowerCamelCase__, images=lowerCamelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ), ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 662 | 1 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
SCREAMING_SNAKE_CASE_:str = [
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
SCREAMING_SNAKE_CASE_:Tuple = [
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
SCREAMING_SNAKE_CASE_:Union[str, Any] = (
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_:Tuple = (
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_:List[str] = [
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
for tf_name, hf_name in patterns:
A : Optional[int] = k.replace(_lowerCAmelCase , _lowerCAmelCase )
return k
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> BigBirdPegasusForConditionalGeneration:
"""simple docstring"""
A : Any = BigBirdPegasusConfig(**_lowerCAmelCase )
A : Any = BigBirdPegasusForConditionalGeneration(_lowerCAmelCase )
A : Union[str, Any] = torch_model.state_dict()
A : Tuple = {}
# separating decoder weights
A : Any = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )}
A : Dict = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )}
for k, v in tqdm(decoder_weights.items() , """tf -> hf conversion""" ):
A : Dict = [k.endswith(_lowerCAmelCase ) for ending in KEYS_TO_IGNORE]
if any(_lowerCAmelCase ):
continue
A : List[str] = DECODER_PATTERNS
A : List[Any] = rename_state_dict_key(_lowerCAmelCase , _lowerCAmelCase )
if new_k not in state_dict:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
A : Optional[int] = v.T
A : Tuple = torch.from_numpy(_lowerCAmelCase )
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items() , """tf -> hf conversion""" ):
A : Optional[int] = [k.endswith(_lowerCAmelCase ) for ending in KEYS_TO_IGNORE]
if any(_lowerCAmelCase ):
continue
A : str = REMAINING_PATTERNS
A : int = rename_state_dict_key(_lowerCAmelCase , _lowerCAmelCase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
A : Tuple = v.T
A : Dict = torch.from_numpy(_lowerCAmelCase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
A : int = mapping["""model.embed_positions.weight"""]
A : str = mapping.pop("""model.embed_positions.weight""" )
A , A : Tuple = torch_model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
A : str = [
k
for k in missing
if k
not in [
"""final_logits_bias""",
"""model.encoder.embed_tokens.weight""",
"""model.decoder.embed_tokens.weight""",
"""lm_head.weight""",
]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def __UpperCamelCase ( _lowerCAmelCase ) -> Dict:
"""simple docstring"""
A : Any = tf.train.list_variables(_lowerCAmelCase )
A : int = {}
A : Union[str, Any] = ["""global_step"""]
for name, shape in tqdm(_lowerCAmelCase , desc="""converting tf checkpoint to dict""" ):
A : Any = any(pat in name for pat in ignore_name )
if skip_key:
continue
A : Optional[int] = tf.train.load_variable(_lowerCAmelCase , _lowerCAmelCase )
A : int = array
return tf_weights
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
A : Any = get_tf_weights_as_numpy(_lowerCAmelCase )
A : Union[str, Any] = convert_bigbird_pegasus(_lowerCAmelCase , _lowerCAmelCase )
torch_model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE_:Any = parser.parse_args()
SCREAMING_SNAKE_CASE_:Dict = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 662 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
return f'''gaussian_noise_s={seed}_shape={"_".join([str(lowerCamelCase__ ) for s in shape] )}.npy'''
def _lowerCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 4, 64, 64), lowerCamelCase__=False ):
A : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
A : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ )
return image
def _lowerCAmelCase ( self, lowerCamelCase__=False, lowerCamelCase__="CompVis/stable-diffusion-v1-4" ):
A : str = jnp.bfloataa if fpaa else jnp.floataa
A : Union[str, Any] = """bf16""" if fpaa else None
A , A : str = FlaxUNetaDConditionModel.from_pretrained(
lowerCamelCase__, subfolder="""unet""", dtype=lowerCamelCase__, revision=lowerCamelCase__ )
return model, params
def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 77, 768), lowerCamelCase__=False ):
A : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
A : List[str] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A , A : List[str] = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""", fpaa=lowerCamelCase__ )
A : str = self.get_latents(lowerCamelCase__, fpaa=lowerCamelCase__ )
A : int = self.get_encoder_hidden_states(lowerCamelCase__, fpaa=lowerCamelCase__ )
A : Optional[Any] = model.apply(
{"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample
assert sample.shape == latents.shape
A : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
A : Dict = jnp.array(lowerCamelCase__, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A , A : Tuple = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""", fpaa=lowerCamelCase__ )
A : int = self.get_latents(lowerCamelCase__, shape=(4, 4, 96, 96), fpaa=lowerCamelCase__ )
A : Union[str, Any] = self.get_encoder_hidden_states(lowerCamelCase__, shape=(4, 77, 1024), fpaa=lowerCamelCase__ )
A : Dict = model.apply(
{"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample
assert sample.shape == latents.shape
A : Dict = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
A : List[Any] = jnp.array(lowerCamelCase__, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 )
| 662 | 1 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : List[str] = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self, **lowerCamelCase__ ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
A : List[str] = deprecated_arg[3:]
A : List[str] = not kwargs.pop(lowerCamelCase__ )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
A : Optional[Any] = kwargs.pop("""tpu_name""", self.tpu_name )
A : Tuple = kwargs.pop("""device_idx""", self.device_idx )
A : Union[str, Any] = kwargs.pop("""eager_mode""", self.eager_mode )
A : Tuple = kwargs.pop("""use_xla""", self.use_xla )
super().__init__(**lowerCamelCase__ )
__lowerCamelCase : str = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Name of TPU"} , )
__lowerCamelCase : int = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
__lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Benchmark models in eager model."} )
__lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def _lowerCAmelCase ( self ):
requires_backends(self, ["""tf"""] )
A : List[str] = None
if self.tpu:
try:
if self.tpu_name:
A : str = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
A : Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
A : Union[str, Any] = None
return tpu
@cached_property
def _lowerCAmelCase ( self ):
requires_backends(self, ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
A : Optional[int] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx], """GPU""" )
A : Union[str, Any] = tf.distribute.OneDeviceStrategy(device=f'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([], """GPU""" ) # disable GPU
A : Union[str, Any] = tf.distribute.OneDeviceStrategy(device=f'''/cpu:{self.device_idx}''' )
return strategy
@property
def _lowerCAmelCase ( self ):
requires_backends(self, ["""tf"""] )
return self._setup_tpu is not None
@property
def _lowerCAmelCase ( self ):
requires_backends(self, ["""tf"""] )
return self._setup_strategy
@property
def _lowerCAmelCase ( self ):
requires_backends(self, ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def _lowerCAmelCase ( self ):
requires_backends(self, ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _lowerCAmelCase ( self ):
return self.n_gpu > 0
| 662 |
from typing import Any
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase ) -> bool:
"""simple docstring"""
return np.array_equal(_lowerCAmelCase , matrix.conjugate().T )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Any = v.conjugate().T
A : List[Any] = v_star.dot(_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , np.ndarray )
return (v_star_dot.dot(_lowerCAmelCase )) / (v_star.dot(_lowerCAmelCase ))
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
A : Any = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
A : str = np.array([[1], [2], [3]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) )
A : Tuple = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 662 | 1 |
import torch
def __UpperCamelCase ( ) -> Dict:
"""simple docstring"""
if torch.cuda.is_available():
A : str = torch.cuda.device_count()
else:
A : Optional[Any] = 0
print(f'''Successfully ran on {num_gpus} GPUs''' )
if __name__ == "__main__":
main()
| 662 |
from __future__ import annotations
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase ) -> tuple[np.ndarray, np.ndarray]:
"""simple docstring"""
A , A : int = np.shape(_lowerCAmelCase )
if rows != columns:
A : Union[str, Any] = (
"""'table' has to be of square shaped array but got a """
f'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(_lowerCAmelCase )
A : Union[str, Any] = np.zeros((rows, columns) )
A : Dict = np.zeros((rows, columns) )
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
A : Any = (table[i][j] - total) / upper[j][j]
A : Union[str, Any] = 1
for j in range(_lowerCAmelCase , _lowerCAmelCase ):
A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) )
A : str = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 1 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
SCREAMING_SNAKE_CASE_:Dict = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
SCREAMING_SNAKE_CASE_:int = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
SCREAMING_SNAKE_CASE_:Tuple = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""", id="""token""" ), id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""", id="""token""" ), id="""sequence""" ), id="""references""" ),
} ), )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = 1, lowerCamelCase__ = 4, ):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCamelCase__, hypotheses=lowerCamelCase__, min_len=lowerCamelCase__, max_len=lowerCamelCase__ )
}
| 662 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple[int, int]:
"""simple docstring"""
def constraint_to_multiple_of(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=0 , _lowerCAmelCase=None ):
A : Optional[int] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
A : Optional[Any] = math.floor(val / multiple ) * multiple
if x < min_val:
A : Any = math.ceil(val / multiple ) * multiple
return x
A : Optional[Any] = (output_size, output_size) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else output_size
A , A : List[Any] = get_image_size(_lowerCAmelCase )
A , A : List[Any] = output_size
# determine new height and width
A : Optional[int] = output_height / input_height
A : Optional[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
A : Any = scale_width
else:
# fit height
A : int = scale_height
A : Any = constraint_to_multiple_of(scale_height * input_height , multiple=_lowerCAmelCase )
A : int = constraint_to_multiple_of(scale_width * input_width , multiple=_lowerCAmelCase )
return (new_height, new_width)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ["pixel_values"]
def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : int = size if size is not None else {"""height""": 384, """width""": 384}
A : str = get_size_dict(lowerCamelCase__ )
A : Optional[Any] = do_resize
A : Optional[int] = size
A : Union[str, Any] = keep_aspect_ratio
A : int = ensure_multiple_of
A : Dict = resample
A : Optional[Any] = do_rescale
A : Any = rescale_factor
A : str = do_normalize
A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Dict = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
A : Optional[Any] = get_resize_output_image_size(
lowerCamelCase__, output_size=(size["""height"""], size["""width"""]), keep_aspect_ratio=lowerCamelCase__, multiple=lowerCamelCase__, )
return resize(lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ):
A : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
A : str = size if size is not None else self.size
A : str = get_size_dict(lowerCamelCase__ )
A : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
A : Optional[int] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
A : Tuple = resample if resample is not None else self.resample
A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A : int = rescale_factor if rescale_factor is not None else self.rescale_factor
A : int = do_normalize if do_normalize is not None else self.do_normalize
A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
A : Optional[int] = image_std if image_std is not None else self.image_std
A : Any = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A : str = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
A : Dict = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images]
if do_rescale:
A : Optional[Any] = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images]
if do_normalize:
A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images]
A : Dict = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images]
A : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : Any = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(lowerCamelCase__ ):
A : int = target_sizes.numpy()
A : Union[str, Any] = []
for idx in range(len(lowerCamelCase__ ) ):
A : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ), size=target_sizes[idx], mode="""bilinear""", align_corners=lowerCamelCase__ )
A : Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCamelCase__ )
else:
A : List[str] = logits.argmax(dim=1 )
A : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 662 | 1 |
from __future__ import annotations
SCREAMING_SNAKE_CASE_:Union[str, Any] = list[tuple[int, int]]
SCREAMING_SNAKE_CASE_:Any = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
SCREAMING_SNAKE_CASE_:Optional[int] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, ):
A : Tuple = pos_x
A : Optional[int] = pos_y
A : Union[str, Any] = (pos_y, pos_x)
A : Optional[int] = goal_x
A : Tuple = goal_y
A : Dict = g_cost
A : Any = parent
A : str = self.calculate_heuristic()
def _lowerCAmelCase ( self ):
A : Dict = abs(self.pos_x - self.goal_x )
A : str = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self, lowerCamelCase__ ):
return self.f_cost < other.f_cost
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ ):
A : List[Any] = Node(start[1], start[0], goal[1], goal[0], 0, lowerCamelCase__ )
A : Dict = Node(goal[1], goal[0], goal[1], goal[0], 9_9999, lowerCamelCase__ )
A : List[str] = [self.start]
A : list[Node] = []
A : int = False
def _lowerCAmelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
A : int = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
A : Tuple = True
return self.retrace_path(lowerCamelCase__ )
self.closed_nodes.append(lowerCamelCase__ )
A : Optional[Any] = self.get_successors(lowerCamelCase__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCamelCase__ )
else:
# retrieve the best current path
A : str = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCamelCase__ )
else:
self.open_nodes.append(lowerCamelCase__ )
if not self.reached:
return [self.start.pos]
return None
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Optional[int] = []
for action in delta:
A : List[str] = parent.pos_x + action[1]
A : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCamelCase__, lowerCamelCase__, self.target.pos_y, self.target.pos_x, parent.g_cost + 1, lowerCamelCase__, ) )
return successors
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Any = node
A : Tuple = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
A : Union[str, Any] = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Tuple = (0, 0)
SCREAMING_SNAKE_CASE_:Dict = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
SCREAMING_SNAKE_CASE_:Optional[Any] = GreedyBestFirst(init, goal)
SCREAMING_SNAKE_CASE_:Any = greedy_bf.search()
if path:
for pos_x, pos_y in path:
SCREAMING_SNAKE_CASE_:Optional[int] = 2
for elem in grid:
print(elem)
| 662 |
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__ ):
# we need a list not a string, so do something to change the type
A : List[Any] = arr.split(""",""" )
def _lowerCAmelCase ( self ):
A : int = [int(self.array[0] )] * len(self.array )
A : Optional[Any] = [int(self.array[0] )] * len(self.array )
for i in range(1, len(self.array ) ):
A : Union[str, Any] = max(
int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) )
A : Dict = max(sum_value[i], rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:int = input("""please input some numbers:""")
SCREAMING_SNAKE_CASE_:Dict = SubArray(whole_array)
SCREAMING_SNAKE_CASE_:Optional[int] = array.solve_sub_array()
print(("""the results is:""", re))
| 662 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE_:str = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Dict = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Optional[int] = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Dict = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 662 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:List[Any] = {
"""google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = "bit"
__lowerCamelCase : Union[str, Any] = ["preactivation", "bottleneck"]
__lowerCamelCase : Union[str, Any] = ["SAME", "VALID"]
def __init__( self, lowerCamelCase__=3, lowerCamelCase__=64, lowerCamelCase__=[256, 512, 1024, 2048], lowerCamelCase__=[3, 4, 6, 3], lowerCamelCase__="preactivation", lowerCamelCase__="relu", lowerCamelCase__=None, lowerCamelCase__=32, lowerCamelCase__=0.0, lowerCamelCase__=False, lowerCamelCase__=32, lowerCamelCase__=1, lowerCamelCase__=None, lowerCamelCase__=None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
A : List[Any] = global_padding.upper()
else:
raise ValueError(f'''Padding strategy {global_padding} not supported''' )
A : Dict = num_channels
A : List[Any] = embedding_size
A : Optional[Any] = hidden_sizes
A : str = depths
A : str = layer_type
A : Union[str, Any] = hidden_act
A : Any = global_padding
A : Optional[int] = num_groups
A : Dict = drop_path_rate
A : List[Any] = embedding_dynamic_padding
A : List[Any] = output_stride
A : Union[str, Any] = width_factor
A : Dict = ["""stem"""] + [f'''stage{idx}''' for idx in range(1, len(lowerCamelCase__ ) + 1 )]
A , A : Any = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__, out_indices=lowerCamelCase__, stage_names=self.stage_names )
| 662 | 1 |
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase = False ) -> str:
"""simple docstring"""
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A : Optional[Any] = f'''Expected string as input, found {type(_lowerCAmelCase )}'''
raise ValueError(_lowerCAmelCase )
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A : Union[str, Any] = f'''Expected boolean as use_pascal parameter, found {type(_lowerCAmelCase )}'''
raise ValueError(_lowerCAmelCase )
A : Optional[Any] = input_str.split("""_""" )
A : Tuple = 0 if use_pascal else 1
A : Union[str, Any] = words[start_index:]
A : Tuple = [word[0].upper() + word[1:] for word in words_to_capitalize]
A : Tuple = """""" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 662 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=13, lowerCamelCase__=7, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=99, lowerCamelCase__=32, lowerCamelCase__=5, lowerCamelCase__=4, lowerCamelCase__=37, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=50, lowerCamelCase__=0.02, lowerCamelCase__=True, lowerCamelCase__=None, ):
A : List[str] = parent
A : List[str] = batch_size
A : Optional[int] = seq_length
A : Optional[int] = is_training
A : Tuple = use_input_mask
A : Optional[Any] = vocab_size
A : str = hidden_size
A : Any = num_hidden_layers
A : List[Any] = num_attention_heads
A : Optional[int] = intermediate_size
A : int = hidden_act
A : Dict = hidden_dropout_prob
A : Optional[Any] = attention_probs_dropout_prob
A : List[Any] = max_position_embeddings
A : int = initializer_range
A : Tuple = use_labels
A : List[str] = scope
def _lowerCAmelCase ( self ):
A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : int = None
if self.use_input_mask:
A : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
A : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : List[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def _lowerCAmelCase ( self ):
return BertGenerationConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, is_decoder=lowerCamelCase__, initializer_range=self.initializer_range, )
def _lowerCAmelCase ( self ):
(
(
A
) , (
A
) , (
A
) , (
A
) ,
) : List[Any] = self.prepare_config_and_inputs()
A : Any = True
A : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : str = BertGenerationEncoder(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Optional[int] = model(lowerCamelCase__, attention_mask=lowerCamelCase__ )
A : List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : List[str] = True
A : Union[str, Any] = BertGenerationEncoder(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Any = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, )
A : Optional[Any] = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : Union[str, Any] = True
A : Optional[int] = True
A : Optional[int] = BertGenerationDecoder(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
# first forward pass
A : int = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, use_cache=lowerCamelCase__, )
A : List[str] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A : Optional[Any] = ids_tensor((self.batch_size, 3), config.vocab_size )
A : int = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
A : List[str] = torch.cat([input_ids, next_tokens], dim=-1 )
A : Union[str, Any] = torch.cat([input_mask, next_mask], dim=-1 )
A : List[str] = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0]
A : Any = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, past_key_values=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0]
# select random slice
A : Any = ids_tensor((1,), output_from_past.shape[-1] ).item()
A : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
A : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-3 ) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, *lowerCamelCase__, ):
A : Optional[int] = BertGenerationDecoder(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : List[str] = model(lowerCamelCase__, attention_mask=lowerCamelCase__, labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self ):
A , A , A , A : str = self.prepare_config_and_inputs()
A : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Any = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
__lowerCamelCase : int = (BertGenerationDecoder,) if is_torch_available() else ()
__lowerCamelCase : List[Any] = (
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self ):
A : Any = BertGenerationEncoderTester(self )
A : Optional[int] = ConfigTester(self, config_class=lowerCamelCase__, hidden_size=37 )
def _lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ):
A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A , A , A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
A : Any = """bert"""
self.model_tester.create_and_check_model(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
# This regression test was failing with PyTorch < 1.3
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
A : int = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, )
def _lowerCAmelCase ( self ):
A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ )
@slow
def _lowerCAmelCase ( self ):
A : Tuple = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(lowerCamelCase__ )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
A : Optional[int] = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A : Optional[int] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
A : Union[str, Any] = model(lowerCamelCase__ )[0]
A : List[Any] = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape, lowerCamelCase__ )
A : Tuple = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase__, atol=1e-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
A : Optional[Any] = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
A : Dict = model(lowerCamelCase__ )[0]
A : List[str] = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape, lowerCamelCase__ )
A : Optional[Any] = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase__, atol=1e-4 ) )
| 662 | 1 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
SCREAMING_SNAKE_CASE_:Optional[int] = """
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=8 ) -> List[str]:
"""simple docstring"""
A : Union[str, Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
A : List[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, ):
super().__init__()
self.register_modules(
unet=lowerCamelCase__, scheduler=lowerCamelCase__, movq=lowerCamelCase__, )
A : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
if latents is None:
A : int = randn_tensor(lowerCamelCase__, generator=lowerCamelCase__, device=lowerCamelCase__, dtype=lowerCamelCase__ )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
A : Optional[Any] = latents.to(lowerCamelCase__ )
A : Optional[int] = latents * scheduler.init_noise_sigma
return latents
def _lowerCAmelCase ( self, lowerCamelCase__=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
A : Optional[int] = torch.device(f'''cuda:{gpu_id}''' )
A : Tuple = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""", """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
A : Optional[int] = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("""cpu""", silence_dtype_warnings=lowerCamelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
A : str = None
for cpu_offloaded_model in [self.unet, self.movq]:
A , A : Union[str, Any] = cpu_offload_with_hook(lowerCamelCase__, lowerCamelCase__, prev_module_hook=lowerCamelCase__ )
# We'll offload the last model manually.
A : List[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowerCAmelCase ( self ):
if not hasattr(self.unet, """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase__, """_hf_hook""" )
and hasattr(module._hf_hook, """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCamelCase__ )
def __call__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = 512, lowerCamelCase__ = 512, lowerCamelCase__ = 100, lowerCamelCase__ = 4.0, lowerCamelCase__ = 1, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = "pil", lowerCamelCase__ = True, ):
A : Optional[Any] = self._execution_device
A : List[str] = guidance_scale > 1.0
if isinstance(lowerCamelCase__, lowerCamelCase__ ):
A : Optional[Any] = torch.cat(lowerCamelCase__, dim=0 )
A : str = image_embeds.shape[0] * num_images_per_prompt
if isinstance(lowerCamelCase__, lowerCamelCase__ ):
A : List[str] = torch.cat(lowerCamelCase__, dim=0 )
if do_classifier_free_guidance:
A : Optional[Any] = image_embeds.repeat_interleave(lowerCamelCase__, dim=0 )
A : List[Any] = negative_image_embeds.repeat_interleave(lowerCamelCase__, dim=0 )
A : Dict = torch.cat([negative_image_embeds, image_embeds], dim=0 ).to(dtype=self.unet.dtype, device=lowerCamelCase__ )
self.scheduler.set_timesteps(lowerCamelCase__, device=lowerCamelCase__ )
A : Tuple = self.scheduler.timesteps
A : Any = self.unet.config.in_channels
A , A : int = downscale_height_and_width(lowerCamelCase__, lowerCamelCase__, self.movq_scale_factor )
# create initial latent
A : Tuple = self.prepare_latents(
(batch_size, num_channels_latents, height, width), image_embeds.dtype, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, self.scheduler, )
for i, t in enumerate(self.progress_bar(lowerCamelCase__ ) ):
# expand the latents if we are doing classifier free guidance
A : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A : int = {"""image_embeds""": image_embeds}
A : Tuple = self.unet(
sample=lowerCamelCase__, timestep=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, added_cond_kwargs=lowerCamelCase__, return_dict=lowerCamelCase__, )[0]
if do_classifier_free_guidance:
A , A : List[Any] = noise_pred.split(latents.shape[1], dim=1 )
A , A : Optional[Any] = noise_pred.chunk(2 )
A , A : Optional[Any] = variance_pred.chunk(2 )
A : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
A : Dict = torch.cat([noise_pred, variance_pred_text], dim=1 )
if not (
hasattr(self.scheduler.config, """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
A , A : Optional[Any] = noise_pred.split(latents.shape[1], dim=1 )
# compute the previous noisy sample x_t -> x_t-1
A : Dict = self.scheduler.step(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, generator=lowerCamelCase__, )[0]
# post-processing
A : List[str] = self.movq.decode(lowerCamelCase__, force_not_quantize=lowerCamelCase__ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
A : int = image * 0.5 + 0.5
A : List[str] = image.clamp(0, 1 )
A : Any = image.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
A : Dict = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase__ )
| 662 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_:Union[str, Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : str = ["pixel_values"]
def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 384}
A : Optional[Any] = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Optional[Any] = do_resize
A : Dict = size
# Default value set here for backwards compatibility where the value in config is None
A : Dict = crop_pct if crop_pct is not None else 224 / 256
A : Optional[int] = resample
A : List[str] = do_rescale
A : Tuple = rescale_factor
A : Optional[int] = do_normalize
A : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Tuple = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
A : List[str] = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
A : int = int(shortest_edge / crop_pct )
A : List[Any] = get_resize_output_image_size(lowerCamelCase__, size=lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Any = resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCamelCase__, size=(shortest_edge, shortest_edge), data_format=lowerCamelCase__, **lowerCamelCase__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCamelCase__, size=(shortest_edge, shortest_edge), resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ):
A : Dict = do_resize if do_resize is not None else self.do_resize
A : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct
A : str = resample if resample is not None else self.resample
A : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
A : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
A : Dict = do_normalize if do_normalize is not None else self.do_normalize
A : List[str] = image_mean if image_mean is not None else self.image_mean
A : Optional[Any] = image_std if image_std is not None else self.image_std
A : Optional[Any] = size if size is not None else self.size
A : str = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Any = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A : List[Any] = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
A : Any = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, crop_pct=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images]
if do_rescale:
A : str = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images]
if do_normalize:
A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images]
A : Tuple = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images]
A : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
| 662 | 1 |
SCREAMING_SNAKE_CASE_:List[str] = {str(digit): digit**5 for digit in range(10)}
def __UpperCamelCase ( _lowerCAmelCase ) -> int:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_lowerCAmelCase ) )
def __UpperCamelCase ( ) -> int:
"""simple docstring"""
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(_lowerCAmelCase ) )
if __name__ == "__main__":
print(solution())
| 662 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Tuple = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any:
"""simple docstring"""
A : Dict = """backbone.""" if is_semantic else """"""
A : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', """beit.embeddings.cls_token"""),
(f'''{prefix}patch_embed.proj.weight''', """beit.embeddings.patch_embeddings.projection.weight"""),
(f'''{prefix}patch_embed.proj.bias''', """beit.embeddings.patch_embeddings.projection.bias"""),
(f'''{prefix}pos_embed''', """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
A : Dict = """backbone.""" if is_semantic else """"""
# queries, keys and values
A : Union[str, Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
A : Tuple = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
A : Optional[int] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
A : int = in_proj_weight[
: config.hidden_size, :
]
A : Any = q_bias
A : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A : Tuple = in_proj_weight[
-config.hidden_size :, :
]
A : Union[str, Any] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
A : str = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
A : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
A : Dict = gamma_a
A : Dict = gamma_a
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
"""simple docstring"""
A : List[str] = dct.pop(_lowerCAmelCase )
A : Optional[Any] = val
def __UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
A : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A : Optional[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> str:
"""simple docstring"""
A : Dict = False if """rvlcdip""" in checkpoint_url else True
A : Union[str, Any] = BeitConfig(use_absolute_position_embeddings=_lowerCAmelCase , use_mask_token=_lowerCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
A : Dict = 1024
A : List[Any] = 4096
A : int = 24
A : int = 16
# labels
if "rvlcdip" in checkpoint_url:
A : List[Any] = 16
A : List[Any] = """huggingface/label-files"""
A : int = """rvlcdip-id2label.json"""
A : Dict = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
A : List[str] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
A : int = idalabel
A : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
A : List[str] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="""cpu""" )["""model"""]
A : str = create_rename_keys(_lowerCAmelCase , has_lm_head=_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , has_lm_head=_lowerCAmelCase )
# load HuggingFace model
A : Any = BeitForMaskedImageModeling(_lowerCAmelCase ) if has_lm_head else BeitForImageClassification(_lowerCAmelCase )
model.eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image
A : Any = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_lowerCAmelCase )
A : int = prepare_img()
A : Tuple = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" )
A : str = encoding["""pixel_values"""]
A : Tuple = model(_lowerCAmelCase )
A : Optional[int] = outputs.logits
# verify logits
A : Tuple = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(_lowerCAmelCase ), "Shape of logits not as expected"
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
if has_lm_head:
A : Any = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
A : List[Any] = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_lowerCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_lowerCAmelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 662 | 1 |
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = PriorTransformer
__lowerCamelCase : Any = "hidden_states"
@property
def _lowerCAmelCase ( self ):
A : List[Any] = 4
A : Union[str, Any] = 8
A : List[str] = 7
A : Optional[Any] = floats_tensor((batch_size, embedding_dim) ).to(lowerCamelCase__ )
A : Tuple = floats_tensor((batch_size, embedding_dim) ).to(lowerCamelCase__ )
A : str = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowerCamelCase__ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _lowerCAmelCase ( self, lowerCamelCase__=0 ):
torch.manual_seed(lowerCamelCase__ )
A : str = 4
A : Optional[int] = 8
A : List[str] = 7
A : List[Any] = torch.randn((batch_size, embedding_dim) ).to(lowerCamelCase__ )
A : int = torch.randn((batch_size, embedding_dim) ).to(lowerCamelCase__ )
A : Union[str, Any] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCamelCase__ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def _lowerCAmelCase ( self ):
return (4, 8)
@property
def _lowerCAmelCase ( self ):
return (4, 8)
def _lowerCAmelCase ( self ):
A : Optional[int] = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 4,
"""num_layers""": 2,
"""embedding_dim""": 8,
"""num_embeddings""": 7,
"""additional_embeddings""": 4,
}
A : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def _lowerCAmelCase ( self ):
A , A : int = PriorTransformer.from_pretrained(
"""hf-internal-testing/prior-dummy""", output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertEqual(len(loading_info["""missing_keys"""] ), 0 )
model.to(lowerCamelCase__ )
A : Optional[Any] = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def _lowerCAmelCase ( self ):
A , A : int = self.prepare_init_args_and_inputs_for_common()
A : Any = self.model_class(**lowerCamelCase__ )
A : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Union[str, Any] = [*signature.parameters.keys()]
A : Optional[Any] = ["""hidden_states""", """timestep"""]
self.assertListEqual(arg_names[:2], lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Dict = PriorTransformer.from_pretrained("""hf-internal-testing/prior-dummy""" )
A : Any = model.to(lowerCamelCase__ )
if hasattr(lowerCamelCase__, """set_default_attn_processor""" ):
model.set_default_attn_processor()
A : Union[str, Any] = self.get_dummy_seed_input()
with torch.no_grad():
A : Union[str, Any] = model(**lowerCamelCase__ )[0]
A : Dict = output[0, :5].flatten().cpu()
print(lowerCamelCase__ )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
A : List[str] = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] )
self.assertTrue(torch_all_close(lowerCamelCase__, lowerCamelCase__, rtol=1e-2 ) )
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self, lowerCamelCase__=1, lowerCamelCase__=768, lowerCamelCase__=77, lowerCamelCase__=0 ):
torch.manual_seed(lowerCamelCase__ )
A : List[str] = batch_size
A : str = embedding_dim
A : Any = num_embeddings
A : Dict = torch.randn((batch_size, embedding_dim) ).to(lowerCamelCase__ )
A : Union[str, Any] = torch.randn((batch_size, embedding_dim) ).to(lowerCamelCase__ )
A : Tuple = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCamelCase__ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _lowerCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
] )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
A : int = PriorTransformer.from_pretrained("""kandinsky-community/kandinsky-2-1-prior""", subfolder="""prior""" )
model.to(lowerCamelCase__ )
A : List[Any] = self.get_dummy_seed_input(seed=lowerCamelCase__ )
with torch.no_grad():
A : Optional[int] = model(**lowerCamelCase__ )[0]
assert list(sample.shape ) == [1, 768]
A : Union[str, Any] = sample[0, :8].flatten().cpu()
print(lowerCamelCase__ )
A : int = torch.tensor(lowerCamelCase__ )
assert torch_all_close(lowerCamelCase__, lowerCamelCase__, atol=1e-3 )
| 662 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE_:Optional[int] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""", lowerCamelCase__, )
super().__init__(*lowerCamelCase__, **lowerCamelCase__ )
| 662 | 1 |
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> bool:
"""simple docstring"""
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> bool:
"""simple docstring"""
if curr_ind == len(_lowerCAmelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(_lowerCAmelCase ) ):
if valid_connection(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# Insert current vertex into path as next transition
A : List[Any] = next_ver
# Validate created path
if util_hamilton_cycle(_lowerCAmelCase , _lowerCAmelCase , curr_ind + 1 ):
return True
# Backtrack
A : Tuple = -1
return False
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase = 0 ) -> list[int]:
"""simple docstring"""
A : List[str] = [-1] * (len(_lowerCAmelCase ) + 1)
# initialize start and end of path with starting index
A : Dict = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(_lowerCAmelCase , _lowerCAmelCase , 1 ) else []
| 662 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(
lowerCamelCase__, split=lowerCamelCase__, features=lowerCamelCase__, cache_dir=lowerCamelCase__, keep_in_memory=lowerCamelCase__, streaming=lowerCamelCase__, num_proc=lowerCamelCase__, **lowerCamelCase__, )
A : List[Any] = path_or_paths if isinstance(lowerCamelCase__, lowerCamelCase__ ) else {self.split: path_or_paths}
A : str = Text(
cache_dir=lowerCamelCase__, data_files=lowerCamelCase__, features=lowerCamelCase__, **lowerCamelCase__, )
def _lowerCAmelCase ( self ):
# Build iterable dataset
if self.streaming:
A : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A : List[str] = None
A : Dict = None
A : Tuple = None
A : Tuple = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__, download_mode=lowerCamelCase__, verification_mode=lowerCamelCase__, base_path=lowerCamelCase__, num_proc=self.num_proc, )
A : List[str] = self.builder.as_dataset(
split=self.split, verification_mode=lowerCamelCase__, in_memory=self.keep_in_memory )
return dataset
| 662 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
SCREAMING_SNAKE_CASE_:Union[str, Any] = None
SCREAMING_SNAKE_CASE_:int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Optional[int] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE_:Any = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE_:Tuple = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
SCREAMING_SNAKE_CASE_:List[Any] = """▁"""
# Segments (not really needed)
SCREAMING_SNAKE_CASE_:Dict = 0
SCREAMING_SNAKE_CASE_:Any = 1
SCREAMING_SNAKE_CASE_:Optional[Any] = 2
SCREAMING_SNAKE_CASE_:Dict = 3
SCREAMING_SNAKE_CASE_:str = 4
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
__lowerCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Optional[Any] = "left"
__lowerCamelCase : Any = XLNetTokenizer
def __init__( self, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=False, lowerCamelCase__=True, lowerCamelCase__=False, lowerCamelCase__="<s>", lowerCamelCase__="</s>", lowerCamelCase__="<unk>", lowerCamelCase__="<sep>", lowerCamelCase__="<pad>", lowerCamelCase__="<cls>", lowerCamelCase__="<mask>", lowerCamelCase__=["<eop>", "<eod>"], **lowerCamelCase__, ):
# Mask token behave like a normal word, i.e. include the space before it
A : Optional[int] = AddedToken(lowerCamelCase__, lstrip=lowerCamelCase__, rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__, lowerCamelCase__ ) else mask_token
super().__init__(
vocab_file=lowerCamelCase__, tokenizer_file=lowerCamelCase__, do_lower_case=lowerCamelCase__, remove_space=lowerCamelCase__, keep_accents=lowerCamelCase__, bos_token=lowerCamelCase__, eos_token=lowerCamelCase__, unk_token=lowerCamelCase__, sep_token=lowerCamelCase__, pad_token=lowerCamelCase__, cls_token=lowerCamelCase__, mask_token=lowerCamelCase__, additional_special_tokens=lowerCamelCase__, **lowerCamelCase__, )
A : Any = 3
A : Optional[Any] = do_lower_case
A : Dict = remove_space
A : Any = keep_accents
A : str = vocab_file
A : Any = False if not self.vocab_file else True
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : Tuple = [self.sep_token_id]
A : int = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : List[Any] = [self.sep_token_id]
A : Optional[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A : Optional[int] = os.path.join(
lowerCamelCase__, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file, lowerCamelCase__ )
return (out_vocab_file,)
| 662 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
SCREAMING_SNAKE_CASE_:int = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
SCREAMING_SNAKE_CASE_:Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 662 | 1 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_:Union[str, Any] = """tiny-wmt19-en-ru"""
# Build
# borrowed from a test
SCREAMING_SNAKE_CASE_:Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
SCREAMING_SNAKE_CASE_:Any = dict(zip(vocab, range(len(vocab))))
SCREAMING_SNAKE_CASE_:Dict = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_:List[Any] = Path(tmpdirname)
SCREAMING_SNAKE_CASE_:str = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""]
SCREAMING_SNAKE_CASE_:Union[str, Any] = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""]
SCREAMING_SNAKE_CASE_:Any = build_dir / VOCAB_FILES_NAMES["""merges_file"""]
with open(src_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, """w""") as fp:
fp.write("""\n""".join(merges))
SCREAMING_SNAKE_CASE_:Optional[int] = FSMTTokenizer(
langs=["""en""", """ru"""],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
SCREAMING_SNAKE_CASE_:Optional[int] = FSMTConfig(
langs=["""ru""", """en"""],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
SCREAMING_SNAKE_CASE_:Optional[Any] = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
SCREAMING_SNAKE_CASE_:Tuple = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
SCREAMING_SNAKE_CASE_:str = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 662 |
def __UpperCamelCase ( _lowerCAmelCase = 1000 ) -> int:
"""simple docstring"""
A , A : str = 1, 1
A : List[Any] = []
for i in range(1 , n + 1 ):
A : Optional[int] = prev_numerator + 2 * prev_denominator
A : Any = prev_numerator + prev_denominator
if len(str(_lowerCAmelCase ) ) > len(str(_lowerCAmelCase ) ):
result.append(_lowerCAmelCase )
A : int = numerator
A : int = denominator
return len(_lowerCAmelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 662 | 1 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = XLMTokenizer
__lowerCamelCase : Union[str, Any] = False
def _lowerCAmelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A : int = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
A : Optional[int] = dict(zip(lowerCamelCase__, range(len(lowerCamelCase__ ) ) ) )
A : int = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
A : Tuple = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
A : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file, """w""" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) )
with open(self.merges_file, """w""" ) as fp:
fp.write("""\n""".join(lowerCamelCase__ ) )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Tuple = """lower newer"""
A : Any = """lower newer"""
return input_text, output_text
def _lowerCAmelCase ( self ):
A : Tuple = XLMTokenizer(self.vocab_file, self.merges_file )
A : int = """lower"""
A : Union[str, Any] = ["""low""", """er</w>"""]
A : List[str] = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
A : List[Any] = tokens + ["""<unk>"""]
A : Optional[int] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ), lowerCamelCase__ )
@slow
def _lowerCAmelCase ( self ):
A : List[str] = XLMTokenizer.from_pretrained("""xlm-mlm-en-2048""" )
A : Tuple = tokenizer.encode("""sequence builders""", add_special_tokens=lowerCamelCase__ )
A : Any = tokenizer.encode("""multi-sequence build""", add_special_tokens=lowerCamelCase__ )
A : Dict = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
A : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__, lowerCamelCase__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 662 |
import re
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
if len(re.findall("""[ATCG]""" , _lowerCAmelCase ) ) != len(_lowerCAmelCase ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 1 |
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self ):
A : List[str] = """"""
A : Optional[int] = """"""
A : Tuple = []
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
A : List[Any] = self.__min_dist_top_down_dp(m - 1, n - 1 )
else:
A : Union[str, Any] = self.__min_dist_top_down_dp(lowerCamelCase__, n - 1 )
A : Optional[int] = self.__min_dist_top_down_dp(m - 1, lowerCamelCase__ )
A : Any = self.__min_dist_top_down_dp(m - 1, n - 1 )
A : Any = 1 + min(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
return self.dp[m][n]
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
A : str = worda
A : str = worda
A : Optional[int] = [[-1 for _ in range(len(lowerCamelCase__ ) )] for _ in range(len(lowerCamelCase__ ) )]
return self.__min_dist_top_down_dp(len(lowerCamelCase__ ) - 1, len(lowerCamelCase__ ) - 1 )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
A : Union[str, Any] = worda
A : Optional[Any] = worda
A : List[Any] = len(lowerCamelCase__ )
A : List[str] = len(lowerCamelCase__ )
A : Any = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
A : Any = j
elif j == 0: # second string is empty
A : Any = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
A : List[Any] = self.dp[i - 1][j - 1]
else:
A : List[Any] = self.dp[i][j - 1]
A : int = self.dp[i - 1][j]
A : Dict = self.dp[i - 1][j - 1]
A : Dict = 1 + min(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
return self.dp[m][n]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[Any] = EditDistance()
print("""****************** Testing Edit Distance DP Algorithm ******************""")
print()
SCREAMING_SNAKE_CASE_:str = input("""Enter the first string: """).strip()
SCREAMING_SNAKE_CASE_:Dict = input("""Enter the second string: """).strip()
print()
print(F"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""")
print(F"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""")
print()
print("""*************** End of Testing Edit Distance DP Algorithm ***************""")
| 662 |
from __future__ import annotations
SCREAMING_SNAKE_CASE_:Tuple = """#"""
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self ):
A : dict = {}
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : List[Any] = self._trie
for char in text:
if char not in trie:
A : str = {}
A : str = trie[char]
A : Optional[int] = True
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Dict = self._trie
for char in prefix:
if char in trie:
A : Optional[Any] = trie[char]
else:
return []
return self._elements(lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : int = []
for c, v in d.items():
A : List[Any] = [""" """] if c == END else [(c + s) for s in self._elements(lowerCamelCase__ )]
result.extend(lowerCamelCase__ )
return tuple(lowerCamelCase__ )
SCREAMING_SNAKE_CASE_:Any = Trie()
SCREAMING_SNAKE_CASE_:Tuple = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def __UpperCamelCase ( _lowerCAmelCase ) -> tuple:
"""simple docstring"""
A : List[str] = trie.find_word(_lowerCAmelCase )
return tuple(string + word for word in suffixes )
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
print(autocomplete_using_trie("""de""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 662 | 1 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = LongformerTokenizer
__lowerCamelCase : int = True
__lowerCamelCase : Optional[int] = LongformerTokenizerFast
__lowerCamelCase : Tuple = True
def _lowerCAmelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A : Any = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
A : List[Any] = dict(zip(lowerCamelCase__, range(len(lowerCamelCase__ ) ) ) )
A : Dict = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
A : Dict = {"""unk_token""": """<unk>"""}
A : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
A : int = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + """\n""" )
with open(self.merges_file, """w""", encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase__ ) )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname, **lowerCamelCase__ )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Optional[int] = """lower newer"""
A : str = """lower newer"""
return input_text, output_text
def _lowerCAmelCase ( self ):
A : str = self.tokenizer_class(self.vocab_file, self.merges_file, **self.special_tokens_map )
A : Dict = """lower newer"""
A : str = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
A : List[Any] = tokenizer.tokenize(lowerCamelCase__ ) # , add_prefix_space=True)
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
A : Optional[Any] = tokens + [tokenizer.unk_token]
A : List[Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ), lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Union[str, Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""", add_special_tokens=lowerCamelCase__ ), [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""", add_special_tokens=lowerCamelCase__ ), [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2], )
@slow
def _lowerCAmelCase ( self ):
A : Tuple = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
A : Optional[Any] = tokenizer.encode("""sequence builders""", add_special_tokens=lowerCamelCase__ )
A : List[str] = tokenizer.encode("""multi-sequence build""", add_special_tokens=lowerCamelCase__ )
A : Tuple = tokenizer.encode(
"""sequence builders""", add_special_tokens=lowerCamelCase__, add_prefix_space=lowerCamelCase__ )
A : Optional[Any] = tokenizer.encode(
"""sequence builders""", """multi-sequence build""", add_special_tokens=lowerCamelCase__, add_prefix_space=lowerCamelCase__ )
A : List[Any] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
A : Dict = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__, lowerCamelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _lowerCAmelCase ( self ):
A : Optional[Any] = self.get_tokenizer()
A : Tuple = """Encode this sequence."""
A : Union[str, Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
A : Dict = tokenizer.encode(lowerCamelCase__, add_special_tokens=lowerCamelCase__, add_prefix_space=lowerCamelCase__ )
A : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowerCamelCase__, lowerCamelCase__ )
A : int = tokenizer.encode(lowerCamelCase__, add_special_tokens=lowerCamelCase__, add_prefix_space=lowerCamelCase__ )
A : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowerCamelCase__, lowerCamelCase__ )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
A : Any = tokenizer.encode(lowerCamelCase__, add_special_tokens=lowerCamelCase__ )
A : Dict = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowerCamelCase__, lowerCamelCase__ )
# Testing spaces after special tokens
A : str = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(lowerCamelCase__, lstrip=lowerCamelCase__, rstrip=lowerCamelCase__ )} ) # mask token has a left space
A : Any = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
A : int = """Encode <mask> sequence"""
A : Any = """Encode <mask>sequence"""
A : Union[str, Any] = tokenizer.encode(lowerCamelCase__ )
A : Any = encoded.index(lowerCamelCase__ )
A : Dict = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowerCamelCase__, lowerCamelCase__ )
A : List[Any] = tokenizer.encode(lowerCamelCase__ )
A : Dict = encoded.index(lowerCamelCase__ )
A : Any = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
pass
def _lowerCAmelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A : List[str] = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__, **lowerCamelCase__ )
A : List[str] = self.tokenizer_class.from_pretrained(lowerCamelCase__, **lowerCamelCase__ )
A : Dict = """A, <mask> AllenNLP sentence."""
A : Tuple = tokenizer_r.encode_plus(lowerCamelCase__, add_special_tokens=lowerCamelCase__, return_token_type_ids=lowerCamelCase__ )
A : Optional[Any] = tokenizer_p.encode_plus(lowerCamelCase__, add_special_tokens=lowerCamelCase__, return_token_type_ids=lowerCamelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ), sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ), sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ), )
A : Optional[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
A : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""], [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""], [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
lowerCamelCase__, ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
lowerCamelCase__, ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def _lowerCAmelCase ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False], repeat=2 ):
A : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname, use_fast=lowerCamelCase__, add_prefix_space=lowerCamelCase__, trim_offsets=lowerCamelCase__ )
A : List[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
A : str = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""], lowerCamelCase__ )
self.assertEqual(post_processor_state["""add_prefix_space"""], lowerCamelCase__ )
self.assertEqual(post_processor_state["""trim_offsets"""], lowerCamelCase__ )
def _lowerCAmelCase ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A : Tuple = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
A : Union[str, Any] = f'''{text_of_1_token} {text_of_1_token}'''
A : Tuple = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__, use_fast=lowerCamelCase__, add_prefix_space=lowerCamelCase__, trim_offsets=lowerCamelCase__ )
A : Union[str, Any] = tokenizer_r(lowerCamelCase__, return_offsets_mapping=lowerCamelCase__, add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0], (0, len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1], (len(lowerCamelCase__ ) + 1, len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )), )
A : List[str] = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__, use_fast=lowerCamelCase__, add_prefix_space=lowerCamelCase__, trim_offsets=lowerCamelCase__ )
A : Any = tokenizer_r(lowerCamelCase__, return_offsets_mapping=lowerCamelCase__, add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0], (0, len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1], (len(lowerCamelCase__ ) + 1, len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )), )
A : List[Any] = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__, use_fast=lowerCamelCase__, add_prefix_space=lowerCamelCase__, trim_offsets=lowerCamelCase__ )
A : int = tokenizer_r(lowerCamelCase__, return_offsets_mapping=lowerCamelCase__, add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0], (0, len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1], (len(lowerCamelCase__ ), len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )), )
A : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__, use_fast=lowerCamelCase__, add_prefix_space=lowerCamelCase__, trim_offsets=lowerCamelCase__ )
A : Dict = tokenizer_r(lowerCamelCase__, return_offsets_mapping=lowerCamelCase__, add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0], (0, len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1], (len(lowerCamelCase__ ), len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )), )
A : Tuple = f''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
A : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__, use_fast=lowerCamelCase__, add_prefix_space=lowerCamelCase__, trim_offsets=lowerCamelCase__ )
A : Tuple = tokenizer_r(lowerCamelCase__, return_offsets_mapping=lowerCamelCase__, add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(lowerCamelCase__ ) + 1, 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )), )
A : Dict = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__, use_fast=lowerCamelCase__, add_prefix_space=lowerCamelCase__, trim_offsets=lowerCamelCase__ )
A : List[Any] = tokenizer_r(lowerCamelCase__, return_offsets_mapping=lowerCamelCase__, add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(lowerCamelCase__ ), 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )), )
A : str = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__, use_fast=lowerCamelCase__, add_prefix_space=lowerCamelCase__, trim_offsets=lowerCamelCase__ )
A : List[str] = tokenizer_r(lowerCamelCase__, return_offsets_mapping=lowerCamelCase__, add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(lowerCamelCase__ ), 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )), )
| 662 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
SCREAMING_SNAKE_CASE_:Optional[int] = logging.getLogger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[int] = bnb_quantization_config.load_in_abit
A : int = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
A : Any = []
# custom device map
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(device_map.keys() ) > 1:
A : Optional[int] = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
A : int = get_keys_to_not_convert(_lowerCAmelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_lowerCAmelCase )
A : Optional[Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
A : Dict = []
A : Tuple = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_lowerCAmelCase )
# compatibility with peft
A : Union[str, Any] = load_in_abit
A : Tuple = load_in_abit
A : List[str] = get_parameter_device(_lowerCAmelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
A : Optional[int] = replace_with_bnb_layers(_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase )
# convert param to the right dtype
A : Tuple = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
A : Optional[Any] = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
A : int = getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_lowerCAmelCase ):
param.to(_lowerCAmelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
A : str = replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase )
A : Optional[Any] = get_quantized_model_device_map(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , max_memory=_lowerCAmelCase , no_split_module_classes=_lowerCAmelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
A : Tuple = True
A : int = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=_lowerCAmelCase , offload_state_dict=_lowerCAmelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_lowerCAmelCase , device_map=_lowerCAmelCase , offload_dir=_lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[int]:
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
A : Optional[int] = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
A : Tuple = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
A : Any = {}
A : List[str] = special_dtypes
A : Any = no_split_module_classes
A : Union[str, Any] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
A : Tuple = get_balanced_memory(
_lowerCAmelCase , low_zero=(device_map == """balanced_low_0""") , max_memory=_lowerCAmelCase , **_lowerCAmelCase , )
A : int = max_memory
A : Any = infer_auto_device_map(_lowerCAmelCase , **_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# check if don't have any quantized module on the cpu
A : Optional[Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
A : Optional[int] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]:
"""simple docstring"""
if modules_to_not_convert is None:
A : Optional[Any] = []
A , A : Dict = _replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , ) -> int:
"""simple docstring"""
A : Optional[int] = False
for name, module in model.named_children():
if current_key_name is None:
A : int = []
current_key_name.append(_lowerCAmelCase )
if isinstance(_lowerCAmelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
A : Dict = """.""".join(_lowerCAmelCase )
A : Optional[Any] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
A : Dict = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
A : Optional[Any] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_lowerCAmelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
A : Dict = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
A : Any = module.weight.data
if module.bias is not None:
A : Any = module.bias.data
bnb_module.requires_grad_(_lowerCAmelCase )
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : Dict = True
if len(list(module.children() ) ) > 0:
A , A : Dict = _replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : Union[str, Any] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __UpperCamelCase ( _lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
with init_empty_weights():
A : Tuple = deepcopy(_lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
A : Optional[int] = find_tied_parameters(_lowerCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A : int = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
A : Optional[int] = sum(_lowerCAmelCase , [] )
A : Tuple = len(_lowerCAmelCase ) > 0
# Check if it is a base model
A : List[str] = False
if hasattr(_lowerCAmelCase , """base_model_prefix""" ):
A : Optional[Any] = not hasattr(_lowerCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
A : str = list(model.named_children() )
A : Tuple = [list_modules[-1][0]]
# add last module together with tied weights
A : int = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
A : Optional[Any] = list(set(_lowerCAmelCase ) ) + list(_lowerCAmelCase )
# remove ".weight" from the keys
A : Union[str, Any] = [""".weight""", """.bias"""]
A : Optional[int] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
A : List[str] = name.replace(_lowerCAmelCase , """""" )
filtered_module_names.append(_lowerCAmelCase )
return filtered_module_names
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
for m in model.modules():
if isinstance(_lowerCAmelCase , bnb.nn.Linearabit ):
return True
return False
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
return next(parameter.parameters() ).device
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , 0 , dtype=_lowerCAmelCase , value=_lowerCAmelCase )
A : Tuple = param_name
A : Union[str, Any] = model
if "." in tensor_name:
A : int = tensor_name.split(""".""" )
for split in splits[:-1]:
A : Union[str, Any] = getattr(_lowerCAmelCase , _lowerCAmelCase )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
A : Optional[Any] = new_module
A : List[str] = splits[-1]
# offload weights
A : Optional[int] = False
offload_weight(module._parameters[tensor_name] , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _lowerCAmelCase , index=_lowerCAmelCase , )
else:
offload_weight(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase )
offload_weight(_lowerCAmelCase , param_name.replace("""weight""" , """SCB""" ) , _lowerCAmelCase , index=_lowerCAmelCase )
set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , """meta""" , dtype=_lowerCAmelCase , value=torch.empty(*param.size() ) )
| 662 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE_:str = {
"""configuration_layoutlmv3""": [
"""LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LayoutLMv3Config""",
"""LayoutLMv3OnnxConfig""",
],
"""processing_layoutlmv3""": ["""LayoutLMv3Processor"""],
"""tokenization_layoutlmv3""": ["""LayoutLMv3Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:List[Any] = ["""LayoutLMv3TokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Optional[int] = [
"""LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv3ForQuestionAnswering""",
"""LayoutLMv3ForSequenceClassification""",
"""LayoutLMv3ForTokenClassification""",
"""LayoutLMv3Model""",
"""LayoutLMv3PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Optional[int] = [
"""TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLayoutLMv3ForQuestionAnswering""",
"""TFLayoutLMv3ForSequenceClassification""",
"""TFLayoutLMv3ForTokenClassification""",
"""TFLayoutLMv3Model""",
"""TFLayoutLMv3PreTrainedModel""",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:List[Any] = ["""LayoutLMv3FeatureExtractor"""]
SCREAMING_SNAKE_CASE_:List[Any] = ["""LayoutLMv3ImageProcessor"""]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
SCREAMING_SNAKE_CASE_:Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 662 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __UpperCamelCase ( ) -> Dict:
"""simple docstring"""
A : Tuple = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
A : Dict = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(_lowerCAmelCase )
DownloadCommand.register_subcommand(_lowerCAmelCase )
EnvironmentCommand.register_subcommand(_lowerCAmelCase )
RunCommand.register_subcommand(_lowerCAmelCase )
ServeCommand.register_subcommand(_lowerCAmelCase )
UserCommands.register_subcommand(_lowerCAmelCase )
AddNewModelCommand.register_subcommand(_lowerCAmelCase )
AddNewModelLikeCommand.register_subcommand(_lowerCAmelCase )
LfsCommands.register_subcommand(_lowerCAmelCase )
PTtoTFCommand.register_subcommand(_lowerCAmelCase )
# Let's go
A : Tuple = parser.parse_args()
if not hasattr(_lowerCAmelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
A : Any = args.func(_lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 662 | 1 |
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Any = ["torch", "scipy"]
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(self, ["""torch""", """scipy"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """scipy"""] )
@classmethod
def _lowerCAmelCase ( cls, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(cls, ["""torch""", """scipy"""] )
| 662 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_:int = {
"""configuration_blenderbot""": [
"""BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotConfig""",
"""BlenderbotOnnxConfig""",
],
"""tokenization_blenderbot""": ["""BlenderbotTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Union[str, Any] = ["""BlenderbotTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Optional[int] = [
"""BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotForCausalLM""",
"""BlenderbotForConditionalGeneration""",
"""BlenderbotModel""",
"""BlenderbotPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Union[str, Any] = [
"""TFBlenderbotForConditionalGeneration""",
"""TFBlenderbotModel""",
"""TFBlenderbotPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Any = [
"""FlaxBlenderbotForConditionalGeneration""",
"""FlaxBlenderbotModel""",
"""FlaxBlenderbotPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 662 | 1 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
SCREAMING_SNAKE_CASE_:Tuple = 2
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, *, # begin keyword-only arguments
lowerCamelCase__="<s>", lowerCamelCase__="<pad>", lowerCamelCase__="</s>", lowerCamelCase__="<unk>", lowerCamelCase__=None, ):
A , A , A , A : Tuple = bos, unk, pad, eos
A : Any = []
A : int = []
A : Union[str, Any] = {}
A : List[str] = self.add_symbol(lowerCamelCase__ )
A : Optional[int] = self.add_symbol(lowerCamelCase__ )
A : List[str] = self.add_symbol(lowerCamelCase__ )
A : int = self.add_symbol(lowerCamelCase__ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(lowerCamelCase__ )
A : Any = len(self.symbols )
def __eq__( self, lowerCamelCase__ ):
return self.indices == other.indices
def __getitem__( self, lowerCamelCase__ ):
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ):
return len(self.symbols )
def __contains__( self, lowerCamelCase__ ):
return sym in self.indices
@classmethod
def _lowerCAmelCase ( cls, lowerCamelCase__ ):
A : Tuple = cls()
d.add_from_file(lowerCamelCase__ )
return d
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=1, lowerCamelCase__=False ):
if word in self.indices and not overwrite:
A : str = self.indices[word]
A : List[Any] = self.count[idx] + n
return idx
else:
A : int = len(self.symbols )
A : Optional[Any] = idx
self.symbols.append(lowerCamelCase__ )
self.count.append(lowerCamelCase__ )
return idx
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return 0
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if isinstance(lowerCamelCase__, lowerCamelCase__ ):
try:
with open(lowerCamelCase__, """r""", encoding="""utf-8""" ) as fd:
self.add_from_file(lowerCamelCase__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(lowerCamelCase__ ) )
return
A : List[str] = f.readlines()
A : Optional[int] = self._load_meta(lowerCamelCase__ )
for line in lines[indices_start_line:]:
try:
A , A : Optional[Any] = line.rstrip().rsplit(""" """, 1 )
if field == "#fairseq:overwrite":
A : int = True
A , A : int = line.rsplit(""" """, 1 )
else:
A : int = False
A : List[Any] = int(lowerCamelCase__ )
A : List[str] = line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: '{}'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(lowerCamelCase__ ) )
self.add_symbol(lowerCamelCase__, n=lowerCamelCase__, overwrite=lowerCamelCase__ )
except ValueError:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" )
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
A : Optional[Any] = dict((re.sub(R"""@@$""" , """""" , _lowerCAmelCase ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" , """</w>""" , _lowerCAmelCase ), v) for k, v in d.items() )
A : Any = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
A : Optional[Any] = d[k] # restore
return da
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
"""simple docstring"""
if not os.path.exists(_lowerCAmelCase ):
raise ValueError(f'''path {biogpt_checkpoint_path} does not exist!''' )
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
A : Union[str, Any] = os.path.join(_lowerCAmelCase , """checkpoint.pt""" )
if not os.path.isfile(_lowerCAmelCase ):
raise ValueError(f'''path to the file {checkpoint_file} does not exist!''' )
A : Optional[int] = torch.load(_lowerCAmelCase , map_location="""cpu""" )
A : Optional[Any] = chkpt["""cfg"""]["""model"""]
# dicts
A : List[Any] = os.path.join(_lowerCAmelCase , """dict.txt""" )
if not os.path.isfile(_lowerCAmelCase ):
raise ValueError(f'''path to the file {dict_file} does not exist!''' )
A : Optional[Any] = Dictionary.load(_lowerCAmelCase )
A : Optional[Any] = rewrite_dict_keys(src_dict.indices )
A : List[Any] = len(_lowerCAmelCase )
A : Optional[int] = os.path.join(_lowerCAmelCase , VOCAB_FILES_NAMES["""vocab_file"""] )
print(f'''Generating {src_vocab_file} of {src_vocab_size} records''' )
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(_lowerCAmelCase , ensure_ascii=_lowerCAmelCase , indent=_lowerCAmelCase ) )
# merges_file (bpecodes)
A : Union[str, Any] = os.path.join(_lowerCAmelCase , """bpecodes""" )
if not os.path.isfile(_lowerCAmelCase ):
raise ValueError(f'''path to the file {bpecodes_file} does not exist!''' )
A : Union[str, Any] = os.path.join(_lowerCAmelCase , VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(_lowerCAmelCase , _lowerCAmelCase )
# model config
A : Optional[int] = os.path.join(_lowerCAmelCase , """config.json""" )
A : str = {
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.02,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1e-12,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(f'''Generating {biogpt_model_config_file}''' )
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(_lowerCAmelCase , ensure_ascii=_lowerCAmelCase , indent=_lowerCAmelCase ) )
# tokenizer config
A : Optional[Any] = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
A : Any = {
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 1024,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(f'''Generating {biogpt_tokenizer_config_file}''' )
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(_lowerCAmelCase , ensure_ascii=_lowerCAmelCase , indent=_lowerCAmelCase ) )
# model
A : Optional[Any] = chkpt["""model"""]
# remove unneeded keys
A : int = [
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
A : Tuple = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
A : str = model_state_dict.pop(_lowerCAmelCase )
else:
A : str = model_state_dict.pop(_lowerCAmelCase )
A : Dict = BioGptConfig.from_pretrained(_lowerCAmelCase )
A : Optional[int] = BioGptForCausalLM(_lowerCAmelCase )
# check that it loads ok
model_new.load_state_dict(_lowerCAmelCase )
# save
A : Optional[int] = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(_lowerCAmelCase , _lowerCAmelCase )
print("""Conversion is done!""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE_:Any = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 662 |
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> list[int]:
"""simple docstring"""
A : Optional[int] = int(_lowerCAmelCase )
# Initialize Result
A : int = []
# Traverse through all denomination
for denomination in reversed(_lowerCAmelCase ):
# Find denominations
while int(_lowerCAmelCase ) >= int(_lowerCAmelCase ):
total_value -= int(_lowerCAmelCase )
answer.append(_lowerCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:List[Any] = []
SCREAMING_SNAKE_CASE_:Dict = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
SCREAMING_SNAKE_CASE_:Optional[int] = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
SCREAMING_SNAKE_CASE_:Tuple = [1, 2, 5, 10, 20, 50, 100, 500, 2_000]
SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F"""Following is minimal change for {value}: """)
SCREAMING_SNAKE_CASE_:str = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 662 | 1 |
import argparse
import os
import re
SCREAMING_SNAKE_CASE_:List[str] = """src/transformers/models/auto"""
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
SCREAMING_SNAKE_CASE_:List[str] = re.compile(R"""[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict""")
# re pattern that matches identifiers in mappings
SCREAMING_SNAKE_CASE_:str = re.compile(R"""\s*\(\s*\"(\S[^\"]+)\"""")
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase = False ) -> str:
"""simple docstring"""
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as f:
A : str = f.read()
A : Union[str, Any] = content.split("""\n""" )
A : Union[str, Any] = []
A : Dict = 0
while line_idx < len(_lowerCAmelCase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
A : Union[str, Any] = len(re.search(R"""^(\s*)\S""" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(""" """ * indent + """(""" ):
new_lines.append(lines[line_idx] )
line_idx += 1
A : Optional[int] = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
A : List[str] = line_idx
while not lines[line_idx].startswith(""" """ * indent + """)""" ):
line_idx += 1
blocks.append("""\n""".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
A : List[str] = sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : _re_identifier.search(_lowerCAmelCase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write("""\n""".join(_lowerCAmelCase ) )
elif "\n".join(_lowerCAmelCase ) != content:
return True
def __UpperCamelCase ( _lowerCAmelCase = False ) -> List[Any]:
"""simple docstring"""
A : Tuple = [os.path.join(_lowerCAmelCase , _lowerCAmelCase ) for f in os.listdir(_lowerCAmelCase ) if f.endswith(""".py""" )]
A : Optional[Any] = [sort_auto_mapping(_lowerCAmelCase , overwrite=_lowerCAmelCase ) for fname in fnames]
if not overwrite and any(_lowerCAmelCase ):
A : Dict = [f for f, d in zip(_lowerCAmelCase , _lowerCAmelCase ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {", ".join(_lowerCAmelCase )}. Run `make style` to fix'''
""" this.""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:List[str] = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
SCREAMING_SNAKE_CASE_:List[Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 662 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_:Union[str, Any] = """tiny-wmt19-en-ru"""
# Build
# borrowed from a test
SCREAMING_SNAKE_CASE_:Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
SCREAMING_SNAKE_CASE_:Any = dict(zip(vocab, range(len(vocab))))
SCREAMING_SNAKE_CASE_:Dict = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_:List[Any] = Path(tmpdirname)
SCREAMING_SNAKE_CASE_:str = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""]
SCREAMING_SNAKE_CASE_:Union[str, Any] = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""]
SCREAMING_SNAKE_CASE_:Any = build_dir / VOCAB_FILES_NAMES["""merges_file"""]
with open(src_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, """w""") as fp:
fp.write("""\n""".join(merges))
SCREAMING_SNAKE_CASE_:Optional[int] = FSMTTokenizer(
langs=["""en""", """ru"""],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
SCREAMING_SNAKE_CASE_:Optional[int] = FSMTConfig(
langs=["""ru""", """en"""],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
SCREAMING_SNAKE_CASE_:Optional[Any] = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
SCREAMING_SNAKE_CASE_:Tuple = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
SCREAMING_SNAKE_CASE_:str = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 662 | 1 |
import json
import sys
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
"""simple docstring"""
with open(_lowerCAmelCase , encoding="""utf-8""" ) as f:
A : Tuple = json.load(_lowerCAmelCase )
A : List[Any] = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(_lowerCAmelCase ):
A : Optional[int] = results[benchmark_name]
A : Optional[Any] = benchmark_name.split("""/""" )[-1]
output_md.append(f'''### Benchmark: {benchmark_file_name}''' )
A : str = """| metric |"""
A : List[Any] = """|--------|"""
A : Optional[Any] = """| new / old (diff) |"""
for metric_name in sorted(_lowerCAmelCase ):
A : Any = benchmark_res[metric_name]
A : int = metric_vals["""new"""]
A : str = metric_vals.get("""old""" , _lowerCAmelCase )
A : Dict = metric_vals.get("""diff""" , _lowerCAmelCase )
A : Any = f''' {new_val:f}''' if isinstance(_lowerCAmelCase , (int, float) ) else """None"""
if old_val is not None:
val_str += f''' / {old_val:f}''' if isinstance(_lowerCAmelCase , (int, float) ) else "None"
if dif_val is not None:
val_str += f''' ({dif_val:f})''' if isinstance(_lowerCAmelCase , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(_lowerCAmelCase ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[Any] = sys.argv[1]
SCREAMING_SNAKE_CASE_:int = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 662 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:int = """Hello, World!"""
SCREAMING_SNAKE_CASE_:List[Any] = """en_XX"""
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Optional[int] = Path("""data_bin""" )
A : Optional[Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowerCAmelCase ).parent ) , checkpoint_file=Path(_lowerCAmelCase ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(_lowerCAmelCase ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(_lowerCAmelCase ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(_lowerCAmelCase )
A : Any = xmod.model.encoder.sentence_encoder
A : Optional[int] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , _lowerCAmelCase )
A : int = XmodForSequenceClassification(_lowerCAmelCase ) if classification_head else XmodForMaskedLM(_lowerCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
A : Any = xmod_sent_encoder.embed_tokens.weight
A : int = xmod_sent_encoder.embed_positions.weight
A : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
A : Dict = xmod_sent_encoder.layernorm_embedding.weight
A : int = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
A : str = model.roberta.encoder.layer[i]
A : Tuple = xmod_sent_encoder.layers[i]
# self attention
A : Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
A : List[str] = xmod_layer.self_attn.q_proj.weight
A : Optional[int] = xmod_layer.self_attn.q_proj.bias
A : List[Any] = xmod_layer.self_attn.k_proj.weight
A : Union[str, Any] = xmod_layer.self_attn.k_proj.bias
A : Optional[int] = xmod_layer.self_attn.v_proj.weight
A : Dict = xmod_layer.self_attn.v_proj.bias
# self-attention output
A : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
A : Optional[Any] = xmod_layer.self_attn.out_proj.weight
A : Dict = xmod_layer.self_attn.out_proj.bias
A : Union[str, Any] = xmod_layer.self_attn_layer_norm.weight
A : str = xmod_layer.self_attn_layer_norm.bias
# intermediate
A : str = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
A : Optional[int] = xmod_layer.fca.weight
A : Optional[int] = xmod_layer.fca.bias
# output
A : Dict = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
A : Union[str, Any] = xmod_layer.fca.weight
A : int = xmod_layer.fca.bias
A : List[str] = xmod_layer.final_layer_norm.weight
A : Optional[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
A : str = xmod_layer.adapter_layer_norm.weight
A : str = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
A : Optional[int] = bert_output.adapter_modules[lang_code]
A : int = xmod_layer.adapter_modules[lang_code]
A : Optional[Any] = from_adapter.fca.weight
A : Optional[Any] = from_adapter.fca.bias
A : List[str] = from_adapter.fca.weight
A : Any = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
A : Dict = xmod_sent_encoder.layer_norm.weight
A : int = xmod_sent_encoder.layer_norm.bias
if classification_head:
A : int = xmod.model.classification_heads["""mnli"""].dense.weight
A : Optional[Any] = xmod.model.classification_heads["""mnli"""].dense.bias
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
A : Any = xmod.model.encoder.lm_head.dense.weight
A : Tuple = xmod.model.encoder.lm_head.dense.bias
A : Any = xmod.model.encoder.lm_head.layer_norm.weight
A : List[str] = xmod.model.encoder.lm_head.layer_norm.bias
A : Union[str, Any] = xmod.model.encoder.lm_head.weight
A : Tuple = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
A : Optional[int] = xmod.encode(_lowerCAmelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowerCAmelCase )
A : List[str] = model(_lowerCAmelCase )[0]
if classification_head:
A : Dict = xmod.model.classification_heads["""mnli"""](xmod.extract_features(_lowerCAmelCase ) )
else:
A : Optional[Any] = xmod.model(_lowerCAmelCase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
A : str = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
A : Optional[Any] = torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(_lowerCAmelCase ).mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 662 | 1 |
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
A : Optional[int] = _modexpt(_lowerCAmelCase , exponent // 2 , _lowerCAmelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(_lowerCAmelCase , exponent - 1 , _lowerCAmelCase )) % modulo_value
def __UpperCamelCase ( _lowerCAmelCase = 1777 , _lowerCAmelCase = 1855 , _lowerCAmelCase = 8 ) -> int:
"""simple docstring"""
A : Union[str, Any] = base
for _ in range(1 , _lowerCAmelCase ):
A : Union[str, Any] = _modexpt(_lowerCAmelCase , _lowerCAmelCase , 10**digits )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 662 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : Any = tempfile.mkdtemp()
A : List[str] = BlipImageProcessor()
A : Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
A : str = BlipProcessor(lowerCamelCase__, lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ ).tokenizer
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ ).image_processor
def _lowerCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
A : Any = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
A : Any = [Image.fromarray(np.moveaxis(lowerCamelCase__, 0, -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ):
A : int = BlipProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : Any = self.get_tokenizer(bos_token="""(BOS)""", eos_token="""(EOS)""" )
A : Union[str, Any] = self.get_image_processor(do_normalize=lowerCamelCase__, padding_value=1.0 )
A : Dict = BlipProcessor.from_pretrained(
self.tmpdirname, bos_token="""(BOS)""", eos_token="""(EOS)""", do_normalize=lowerCamelCase__, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.get_image_processor()
A : str = self.get_tokenizer()
A : Dict = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Any = self.prepare_image_inputs()
A : int = image_processor(lowerCamelCase__, return_tensors="""np""" )
A : Optional[Any] = processor(images=lowerCamelCase__, return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 )
def _lowerCAmelCase ( self ):
A : List[str] = self.get_image_processor()
A : int = self.get_tokenizer()
A : str = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[Any] = """lower newer"""
A : List[Any] = processor(text=lowerCamelCase__ )
A : str = tokenizer(lowerCamelCase__, return_token_type_ids=lowerCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def _lowerCAmelCase ( self ):
A : List[Any] = self.get_image_processor()
A : Dict = self.get_tokenizer()
A : Union[str, Any] = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[int] = """lower newer"""
A : Union[str, Any] = self.prepare_image_inputs()
A : str = processor(text=lowerCamelCase__, images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ), ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _lowerCAmelCase ( self ):
A : List[Any] = self.get_image_processor()
A : Dict = self.get_tokenizer()
A : Dict = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A : Optional[int] = processor.batch_decode(lowerCamelCase__ )
A : Dict = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.get_image_processor()
A : int = self.get_tokenizer()
A : Optional[int] = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[int] = """lower newer"""
A : List[str] = self.prepare_image_inputs()
A : Optional[int] = processor(text=lowerCamelCase__, images=lowerCamelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ), ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 662 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : List[str] = tempfile.mkdtemp()
A : Any = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""的""",
"""价""",
"""格""",
"""是""",
"""15""",
"""便""",
"""alex""",
"""##andra""",
""",""",
"""。""",
"""-""",
"""t""",
"""shirt""",
]
A : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
A : List[Any] = {
"""do_resize""": True,
"""size""": {"""height""": 224, """width""": 224},
"""do_center_crop""": True,
"""crop_size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
"""do_convert_rgb""": True,
}
A : List[Any] = os.path.join(self.tmpdirname, lowerCamelCase__ )
with open(self.image_processor_file, """w""", encoding="""utf-8""" ) as fp:
json.dump(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return BertTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase__ )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return BertTokenizerFast.from_pretrained(self.tmpdirname, **lowerCamelCase__ )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ )
def _lowerCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
A : Optional[Any] = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
A : Union[str, Any] = [Image.fromarray(np.moveaxis(lowerCamelCase__, 0, -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ):
A : List[str] = self.get_tokenizer()
A : Any = self.get_rust_tokenizer()
A : Any = self.get_image_processor()
A : Optional[int] = ChineseCLIPProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
A : List[Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCamelCase__ )
A : Optional[int] = ChineseCLIPProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
A : List[Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer, lowerCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer, lowerCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor, lowerCamelCase__ )
self.assertIsInstance(processor_fast.image_processor, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Dict = ChineseCLIPProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : Dict = self.get_tokenizer(cls_token="""(CLS)""", sep_token="""(SEP)""" )
A : List[str] = self.get_image_processor(do_normalize=lowerCamelCase__ )
A : List[Any] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname, cls_token="""(CLS)""", sep_token="""(SEP)""", do_normalize=lowerCamelCase__ )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Tuple = self.get_image_processor()
A : int = self.get_tokenizer()
A : int = ChineseCLIPProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : str = self.prepare_image_inputs()
A : List[str] = image_processor(lowerCamelCase__, return_tensors="""np""" )
A : Optional[Any] = processor(images=lowerCamelCase__, return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 )
def _lowerCAmelCase ( self ):
A : Dict = self.get_image_processor()
A : Tuple = self.get_tokenizer()
A : str = ChineseCLIPProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Any = """Alexandra,T-shirt的价格是15便士。"""
A : Any = processor(text=lowerCamelCase__ )
A : Optional[int] = tokenizer(lowerCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.get_image_processor()
A : int = self.get_tokenizer()
A : Optional[int] = ChineseCLIPProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[int] = """Alexandra,T-shirt的价格是15便士。"""
A : Union[str, Any] = self.prepare_image_inputs()
A : List[str] = processor(text=lowerCamelCase__, images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ), ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _lowerCAmelCase ( self ):
A : Optional[int] = self.get_image_processor()
A : List[Any] = self.get_tokenizer()
A : Dict = ChineseCLIPProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A : int = processor.batch_decode(lowerCamelCase__ )
A : Any = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : int = self.get_image_processor()
A : List[Any] = self.get_tokenizer()
A : int = ChineseCLIPProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[Any] = """Alexandra,T-shirt的价格是15便士。"""
A : Dict = self.prepare_image_inputs()
A : Optional[Any] = processor(text=lowerCamelCase__, images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
| 662 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
return f'''gaussian_noise_s={seed}_shape={"_".join([str(lowerCamelCase__ ) for s in shape] )}.npy'''
def _lowerCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 4, 64, 64), lowerCamelCase__=False ):
A : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
A : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ )
return image
def _lowerCAmelCase ( self, lowerCamelCase__=False, lowerCamelCase__="CompVis/stable-diffusion-v1-4" ):
A : str = jnp.bfloataa if fpaa else jnp.floataa
A : Union[str, Any] = """bf16""" if fpaa else None
A , A : str = FlaxUNetaDConditionModel.from_pretrained(
lowerCamelCase__, subfolder="""unet""", dtype=lowerCamelCase__, revision=lowerCamelCase__ )
return model, params
def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 77, 768), lowerCamelCase__=False ):
A : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
A : List[str] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A , A : List[str] = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""", fpaa=lowerCamelCase__ )
A : str = self.get_latents(lowerCamelCase__, fpaa=lowerCamelCase__ )
A : int = self.get_encoder_hidden_states(lowerCamelCase__, fpaa=lowerCamelCase__ )
A : Optional[Any] = model.apply(
{"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample
assert sample.shape == latents.shape
A : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
A : Dict = jnp.array(lowerCamelCase__, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A , A : Tuple = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""", fpaa=lowerCamelCase__ )
A : int = self.get_latents(lowerCamelCase__, shape=(4, 4, 96, 96), fpaa=lowerCamelCase__ )
A : Union[str, Any] = self.get_encoder_hidden_states(lowerCamelCase__, shape=(4, 77, 1024), fpaa=lowerCamelCase__ )
A : Dict = model.apply(
{"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample
assert sample.shape == latents.shape
A : Dict = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
A : List[Any] = jnp.array(lowerCamelCase__, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 )
| 662 | 1 |
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Dict = "ssube/stable-diffusion-x4-upscaler-onnx"
def _lowerCAmelCase ( self, lowerCamelCase__=0 ):
A : List[str] = floats_tensor((1, 3, 128, 128), rng=random.Random(lowerCamelCase__ ) )
A : Dict = torch.manual_seed(lowerCamelCase__ )
A : Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _lowerCAmelCase ( self ):
A : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
A : Any = self.get_dummy_inputs()
A : List[str] = pipe(**lowerCamelCase__ ).images
A : List[str] = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
A : Tuple = np.array(
[0.697_4782, 0.6890_2093, 0.7013_5885, 0.758_3618, 0.780_4545, 0.785_4912, 0.7866_7426, 0.7874_3863, 0.7807_0223] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _lowerCAmelCase ( self ):
A : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="""CPUExecutionProvider""" )
A : Any = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
A : List[Any] = self.get_dummy_inputs()
A : Union[str, Any] = pipe(**lowerCamelCase__ ).images
A : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A : List[str] = np.array(
[0.689_8892, 0.5924_0556, 0.5249_9527, 0.5886_6215, 0.5225_8235, 0.5257_2715, 0.6241_4473, 0.617_4387, 0.621_4964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCAmelCase ( self ):
A : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="""CPUExecutionProvider""" )
A : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
A : Optional[Any] = self.get_dummy_inputs()
A : Optional[int] = pipe(**lowerCamelCase__ ).images
A : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A : Optional[int] = np.array(
[0.765_9278, 0.7643_7664, 0.7557_9107, 0.769_1116, 0.7766_6986, 0.772_7672, 0.775_8664, 0.781_2226, 0.7694_2515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCAmelCase ( self ):
A : Any = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="""CPUExecutionProvider""" )
A : Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
A : Optional[Any] = self.get_dummy_inputs()
A : int = pipe(**lowerCamelCase__ ).images
A : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A : Dict = np.array(
[0.697_4782, 0.6890_2093, 0.7013_5885, 0.758_3618, 0.780_4545, 0.785_4912, 0.7866_7426, 0.7874_3863, 0.7807_0223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCAmelCase ( self ):
A : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="""CPUExecutionProvider""" )
A : List[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
A : Tuple = self.get_dummy_inputs()
A : str = pipe(**lowerCamelCase__ ).images
A : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A : int = np.array(
[0.7742_4496, 0.77_3601, 0.764_5288, 0.776_9598, 0.777_2739, 0.773_8688, 0.7818_7233, 0.7787_9584, 0.76_7043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@property
def _lowerCAmelCase ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowerCAmelCase ( self ):
A : str = ort.SessionOptions()
A : Optional[int] = False
return options
def _lowerCAmelCase ( self ):
A : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
A : Optional[Any] = init_image.resize((128, 128) )
# using the PNDM scheduler by default
A : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""", provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
A : str = """A fantasy landscape, trending on artstation"""
A : Union[str, Any] = torch.manual_seed(0 )
A : List[Any] = pipe(
prompt=lowerCamelCase__, image=lowerCamelCase__, guidance_scale=7.5, num_inference_steps=10, generator=lowerCamelCase__, output_type="""np""", )
A : List[str] = output.images
A : Tuple = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
A : Tuple = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _lowerCAmelCase ( self ):
A : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
A : str = init_image.resize((128, 128) )
A : List[str] = LMSDiscreteScheduler.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""", subfolder="""scheduler""" )
A : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""", scheduler=lowerCamelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
A : List[str] = """A fantasy landscape, trending on artstation"""
A : Union[str, Any] = torch.manual_seed(0 )
A : Union[str, Any] = pipe(
prompt=lowerCamelCase__, image=lowerCamelCase__, guidance_scale=7.5, num_inference_steps=20, generator=lowerCamelCase__, output_type="""np""", )
A : Dict = output.images
A : str = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
A : Optional[int] = np.array(
[0.5017_3753, 0.5022_3356, 0.50_2039, 0.5023_3036, 0.502_3725, 0.502_2601, 0.501_8758, 0.5023_4085, 0.5024_1566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 662 |
from typing import Any
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase ) -> bool:
"""simple docstring"""
return np.array_equal(_lowerCAmelCase , matrix.conjugate().T )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Any = v.conjugate().T
A : List[Any] = v_star.dot(_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , np.ndarray )
return (v_star_dot.dot(_lowerCAmelCase )) / (v_star.dot(_lowerCAmelCase ))
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
A : Any = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
A : str = np.array([[1], [2], [3]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) )
A : Tuple = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 662 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=7, lowerCamelCase__=3, lowerCamelCase__=18, lowerCamelCase__=30, lowerCamelCase__=400, lowerCamelCase__=True, lowerCamelCase__=32, lowerCamelCase__=True, ):
A : Tuple = parent
A : Dict = batch_size
A : Dict = num_channels
A : Optional[Any] = image_size
A : Union[str, Any] = min_resolution
A : Dict = max_resolution
A : List[str] = do_resize
A : Union[str, Any] = size_divisor
A : Any = do_rescale
def _lowerCAmelCase ( self ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[str] = GLPNImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self ):
A : Dict = GLPNImageProcessingTester(self )
@property
def _lowerCAmelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self ):
A : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__, """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """size_divisor""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """resample""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """do_rescale""" ) )
def _lowerCAmelCase ( self ):
pass
def _lowerCAmelCase ( self ):
# Initialize image_processing
A : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A : Tuple = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__, Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
A : Optional[Any] = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _lowerCAmelCase ( self ):
# Initialize image_processing
A : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A : Dict = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase__, numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__, np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
A : Union[str, Any] = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _lowerCAmelCase ( self ):
# Initialize image_processing
A : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A : Tuple = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase__, torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__, torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
A : Tuple = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 662 |
from __future__ import annotations
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase ) -> tuple[np.ndarray, np.ndarray]:
"""simple docstring"""
A , A : int = np.shape(_lowerCAmelCase )
if rows != columns:
A : Union[str, Any] = (
"""'table' has to be of square shaped array but got a """
f'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(_lowerCAmelCase )
A : Union[str, Any] = np.zeros((rows, columns) )
A : Dict = np.zeros((rows, columns) )
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
A : Any = (table[i][j] - total) / upper[j][j]
A : Union[str, Any] = 1
for j in range(_lowerCAmelCase , _lowerCAmelCase ):
A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) )
A : str = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 1 |
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
if not all(char in """01""" for char in bin_string ):
raise ValueError("""Non-binary value was passed to the function""" )
if not bin_string:
raise ValueError("""Empty string was passed to the function""" )
A : Dict = """"""
while len(_lowerCAmelCase ) % 3 != 0:
A : Union[str, Any] = """0""" + bin_string
A : List[str] = [
bin_string[index : index + 3]
for index in range(len(_lowerCAmelCase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
A : Dict = 0
for index, val in enumerate(_lowerCAmelCase ):
oct_val += int(2 ** (2 - index) * int(_lowerCAmelCase ) )
oct_string += str(_lowerCAmelCase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 662 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple[int, int]:
"""simple docstring"""
def constraint_to_multiple_of(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=0 , _lowerCAmelCase=None ):
A : Optional[int] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
A : Optional[Any] = math.floor(val / multiple ) * multiple
if x < min_val:
A : Any = math.ceil(val / multiple ) * multiple
return x
A : Optional[Any] = (output_size, output_size) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else output_size
A , A : List[Any] = get_image_size(_lowerCAmelCase )
A , A : List[Any] = output_size
# determine new height and width
A : Optional[int] = output_height / input_height
A : Optional[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
A : Any = scale_width
else:
# fit height
A : int = scale_height
A : Any = constraint_to_multiple_of(scale_height * input_height , multiple=_lowerCAmelCase )
A : int = constraint_to_multiple_of(scale_width * input_width , multiple=_lowerCAmelCase )
return (new_height, new_width)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ["pixel_values"]
def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : int = size if size is not None else {"""height""": 384, """width""": 384}
A : str = get_size_dict(lowerCamelCase__ )
A : Optional[Any] = do_resize
A : Optional[int] = size
A : Union[str, Any] = keep_aspect_ratio
A : int = ensure_multiple_of
A : Dict = resample
A : Optional[Any] = do_rescale
A : Any = rescale_factor
A : str = do_normalize
A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Dict = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
A : Optional[Any] = get_resize_output_image_size(
lowerCamelCase__, output_size=(size["""height"""], size["""width"""]), keep_aspect_ratio=lowerCamelCase__, multiple=lowerCamelCase__, )
return resize(lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ):
A : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
A : str = size if size is not None else self.size
A : str = get_size_dict(lowerCamelCase__ )
A : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
A : Optional[int] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
A : Tuple = resample if resample is not None else self.resample
A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A : int = rescale_factor if rescale_factor is not None else self.rescale_factor
A : int = do_normalize if do_normalize is not None else self.do_normalize
A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
A : Optional[int] = image_std if image_std is not None else self.image_std
A : Any = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A : str = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
A : Dict = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images]
if do_rescale:
A : Optional[Any] = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images]
if do_normalize:
A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images]
A : Dict = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images]
A : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : Any = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(lowerCamelCase__ ):
A : int = target_sizes.numpy()
A : Union[str, Any] = []
for idx in range(len(lowerCamelCase__ ) ):
A : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ), size=target_sizes[idx], mode="""bilinear""", align_corners=lowerCamelCase__ )
A : Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCamelCase__ )
else:
A : List[str] = logits.argmax(dim=1 )
A : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 662 | 1 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Any = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
"""simple docstring"""
A : List[str] = RobertaPreLayerNormConfig.from_pretrained(
_lowerCAmelCase , architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
A : str = torch.load(hf_hub_download(repo_id=_lowerCAmelCase , filename="""pytorch_model.bin""" ) )
A : str = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
A : Any = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
A : Union[str, Any] = tensor_value
A : Optional[Any] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=_lowerCAmelCase , config=_lowerCAmelCase , state_dict=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
# convert tokenizer
A : str = AutoTokenizer.from_pretrained(_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 662 |
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__ ):
# we need a list not a string, so do something to change the type
A : List[Any] = arr.split(""",""" )
def _lowerCAmelCase ( self ):
A : int = [int(self.array[0] )] * len(self.array )
A : Optional[Any] = [int(self.array[0] )] * len(self.array )
for i in range(1, len(self.array ) ):
A : Union[str, Any] = max(
int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) )
A : Dict = max(sum_value[i], rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:int = input("""please input some numbers:""")
SCREAMING_SNAKE_CASE_:Dict = SubArray(whole_array)
SCREAMING_SNAKE_CASE_:Optional[int] = array.solve_sub_array()
print(("""the results is:""", re))
| 662 | 1 |
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[int] = input("""Enter image url: """).strip()
print(F"""Downloading image from {url} ...""")
SCREAMING_SNAKE_CASE_:Optional[int] = BeautifulSoup(requests.get(url).content, """html.parser""")
# The image URL is in the content field of the first meta tag with property og:image
SCREAMING_SNAKE_CASE_:Union[str, Any] = soup.find("""meta""", {"""property""": """og:image"""})["""content"""]
SCREAMING_SNAKE_CASE_:str = requests.get(image_url).content
SCREAMING_SNAKE_CASE_:int = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, """wb""") as fp:
fp.write(image_data)
print(F"""Done. Image saved to disk as {file_name}.""")
| 662 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:List[Any] = {
"""google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = "bit"
__lowerCamelCase : Union[str, Any] = ["preactivation", "bottleneck"]
__lowerCamelCase : Union[str, Any] = ["SAME", "VALID"]
def __init__( self, lowerCamelCase__=3, lowerCamelCase__=64, lowerCamelCase__=[256, 512, 1024, 2048], lowerCamelCase__=[3, 4, 6, 3], lowerCamelCase__="preactivation", lowerCamelCase__="relu", lowerCamelCase__=None, lowerCamelCase__=32, lowerCamelCase__=0.0, lowerCamelCase__=False, lowerCamelCase__=32, lowerCamelCase__=1, lowerCamelCase__=None, lowerCamelCase__=None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
A : List[Any] = global_padding.upper()
else:
raise ValueError(f'''Padding strategy {global_padding} not supported''' )
A : Dict = num_channels
A : List[Any] = embedding_size
A : Optional[Any] = hidden_sizes
A : str = depths
A : str = layer_type
A : Union[str, Any] = hidden_act
A : Any = global_padding
A : Optional[int] = num_groups
A : Dict = drop_path_rate
A : List[Any] = embedding_dynamic_padding
A : List[Any] = output_stride
A : Union[str, Any] = width_factor
A : Dict = ["""stem"""] + [f'''stage{idx}''' for idx in range(1, len(lowerCamelCase__ ) + 1 )]
A , A : Any = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__, out_indices=lowerCamelCase__, stage_names=self.stage_names )
| 662 | 1 |
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> tuple[int | None, int | None, float]:
"""simple docstring"""
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
A : int = (low + high) // 2
A , A , A : int = max_subarray(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A , A , A : int = max_subarray(_lowerCAmelCase , mid + 1 , _lowerCAmelCase )
A , A , A : Dict = max_cross_sum(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> tuple[int, int, float]:
"""simple docstring"""
A , A : Tuple = float("""-inf""" ), -1
A , A : List[Any] = float("""-inf""" ), -1
A : int | float = 0
for i in range(_lowerCAmelCase , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
A : int = summ
A : int = i
A : str = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
A : Union[str, Any] = summ
A : Dict = i
return max_left, max_right, (left_sum + right_sum)
def __UpperCamelCase ( _lowerCAmelCase ) -> float:
"""simple docstring"""
A : Union[str, Any] = [randint(1 , _lowerCAmelCase ) for _ in range(_lowerCAmelCase )]
A : int = time.time()
max_subarray(_lowerCAmelCase , 0 , input_size - 1 )
A : Any = time.time()
return end - start
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
A : Tuple = [10, 100, 1000, 1_0000, 5_0000, 10_0000, 20_0000, 30_0000, 40_0000, 50_0000]
A : Optional[int] = [time_max_subarray(_lowerCAmelCase ) for input_size in input_sizes]
print("""No of Inputs\t\tTime Taken""" )
for input_size, runtime in zip(_lowerCAmelCase , _lowerCAmelCase ):
print(_lowerCAmelCase , """\t\t""" , _lowerCAmelCase )
plt.plot(_lowerCAmelCase , _lowerCAmelCase )
plt.xlabel("""Number of Inputs""" )
plt.ylabel("""Time taken in seconds""" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 662 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=13, lowerCamelCase__=7, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=99, lowerCamelCase__=32, lowerCamelCase__=5, lowerCamelCase__=4, lowerCamelCase__=37, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=50, lowerCamelCase__=0.02, lowerCamelCase__=True, lowerCamelCase__=None, ):
A : List[str] = parent
A : List[str] = batch_size
A : Optional[int] = seq_length
A : Optional[int] = is_training
A : Tuple = use_input_mask
A : Optional[Any] = vocab_size
A : str = hidden_size
A : Any = num_hidden_layers
A : List[Any] = num_attention_heads
A : Optional[int] = intermediate_size
A : int = hidden_act
A : Dict = hidden_dropout_prob
A : Optional[Any] = attention_probs_dropout_prob
A : List[Any] = max_position_embeddings
A : int = initializer_range
A : Tuple = use_labels
A : List[str] = scope
def _lowerCAmelCase ( self ):
A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : int = None
if self.use_input_mask:
A : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
A : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : List[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def _lowerCAmelCase ( self ):
return BertGenerationConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, is_decoder=lowerCamelCase__, initializer_range=self.initializer_range, )
def _lowerCAmelCase ( self ):
(
(
A
) , (
A
) , (
A
) , (
A
) ,
) : List[Any] = self.prepare_config_and_inputs()
A : Any = True
A : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : str = BertGenerationEncoder(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Optional[int] = model(lowerCamelCase__, attention_mask=lowerCamelCase__ )
A : List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : List[str] = True
A : Union[str, Any] = BertGenerationEncoder(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Any = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, )
A : Optional[Any] = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : Union[str, Any] = True
A : Optional[int] = True
A : Optional[int] = BertGenerationDecoder(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
# first forward pass
A : int = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, use_cache=lowerCamelCase__, )
A : List[str] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A : Optional[Any] = ids_tensor((self.batch_size, 3), config.vocab_size )
A : int = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
A : List[str] = torch.cat([input_ids, next_tokens], dim=-1 )
A : Union[str, Any] = torch.cat([input_mask, next_mask], dim=-1 )
A : List[str] = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0]
A : Any = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, past_key_values=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0]
# select random slice
A : Any = ids_tensor((1,), output_from_past.shape[-1] ).item()
A : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
A : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-3 ) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, *lowerCamelCase__, ):
A : Optional[int] = BertGenerationDecoder(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : List[str] = model(lowerCamelCase__, attention_mask=lowerCamelCase__, labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self ):
A , A , A , A : str = self.prepare_config_and_inputs()
A : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Any = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
__lowerCamelCase : int = (BertGenerationDecoder,) if is_torch_available() else ()
__lowerCamelCase : List[Any] = (
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self ):
A : Any = BertGenerationEncoderTester(self )
A : Optional[int] = ConfigTester(self, config_class=lowerCamelCase__, hidden_size=37 )
def _lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ):
A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A , A , A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
A : Any = """bert"""
self.model_tester.create_and_check_model(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
# This regression test was failing with PyTorch < 1.3
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
A : int = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, )
def _lowerCAmelCase ( self ):
A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ )
@slow
def _lowerCAmelCase ( self ):
A : Tuple = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(lowerCamelCase__ )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
A : Optional[int] = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A : Optional[int] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
A : Union[str, Any] = model(lowerCamelCase__ )[0]
A : List[Any] = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape, lowerCamelCase__ )
A : Tuple = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase__, atol=1e-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
A : Optional[Any] = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
A : Dict = model(lowerCamelCase__ )[0]
A : List[str] = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape, lowerCamelCase__ )
A : Optional[Any] = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase__, atol=1e-4 ) )
| 662 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_:Union[str, Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : str = ["pixel_values"]
def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 384}
A : Optional[Any] = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Optional[Any] = do_resize
A : Dict = size
# Default value set here for backwards compatibility where the value in config is None
A : Dict = crop_pct if crop_pct is not None else 224 / 256
A : Optional[int] = resample
A : List[str] = do_rescale
A : Tuple = rescale_factor
A : Optional[int] = do_normalize
A : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Tuple = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
A : List[str] = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
A : int = int(shortest_edge / crop_pct )
A : List[Any] = get_resize_output_image_size(lowerCamelCase__, size=lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Any = resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCamelCase__, size=(shortest_edge, shortest_edge), data_format=lowerCamelCase__, **lowerCamelCase__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCamelCase__, size=(shortest_edge, shortest_edge), resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ):
A : Dict = do_resize if do_resize is not None else self.do_resize
A : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct
A : str = resample if resample is not None else self.resample
A : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
A : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
A : Dict = do_normalize if do_normalize is not None else self.do_normalize
A : List[str] = image_mean if image_mean is not None else self.image_mean
A : Optional[Any] = image_std if image_std is not None else self.image_std
A : Optional[Any] = size if size is not None else self.size
A : str = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Any = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A : List[Any] = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
A : Any = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, crop_pct=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images]
if do_rescale:
A : str = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images]
if do_normalize:
A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images]
A : Tuple = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images]
A : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
| 662 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_:Union[str, Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : str = ["pixel_values"]
def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 384}
A : Optional[Any] = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Optional[Any] = do_resize
A : Dict = size
# Default value set here for backwards compatibility where the value in config is None
A : Dict = crop_pct if crop_pct is not None else 224 / 256
A : Optional[int] = resample
A : List[str] = do_rescale
A : Tuple = rescale_factor
A : Optional[int] = do_normalize
A : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Tuple = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
A : List[str] = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
A : int = int(shortest_edge / crop_pct )
A : List[Any] = get_resize_output_image_size(lowerCamelCase__, size=lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Any = resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCamelCase__, size=(shortest_edge, shortest_edge), data_format=lowerCamelCase__, **lowerCamelCase__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCamelCase__, size=(shortest_edge, shortest_edge), resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ):
A : Dict = do_resize if do_resize is not None else self.do_resize
A : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct
A : str = resample if resample is not None else self.resample
A : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
A : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
A : Dict = do_normalize if do_normalize is not None else self.do_normalize
A : List[str] = image_mean if image_mean is not None else self.image_mean
A : Optional[Any] = image_std if image_std is not None else self.image_std
A : Optional[Any] = size if size is not None else self.size
A : str = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Any = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A : List[Any] = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
A : Any = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, crop_pct=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images]
if do_rescale:
A : str = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images]
if do_normalize:
A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images]
A : Tuple = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images]
A : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
| 662 | 1 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=100, lowerCamelCase__=13, lowerCamelCase__=30, lowerCamelCase__=2, lowerCamelCase__=3, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=32, lowerCamelCase__=4, lowerCamelCase__=4, lowerCamelCase__=37, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=10, lowerCamelCase__=0.02, lowerCamelCase__=3, lowerCamelCase__=None, lowerCamelCase__=[0, 1, 2, 3], ):
A : str = parent
A : int = 100
A : int = batch_size
A : List[str] = image_size
A : Optional[Any] = patch_size
A : Any = num_channels
A : str = is_training
A : Any = use_labels
A : str = hidden_size
A : Tuple = num_hidden_layers
A : Tuple = num_attention_heads
A : Tuple = intermediate_size
A : Optional[int] = hidden_act
A : Tuple = hidden_dropout_prob
A : int = attention_probs_dropout_prob
A : Union[str, Any] = type_sequence_label_size
A : Any = initializer_range
A : Optional[Any] = scope
A : Optional[int] = out_indices
A : Optional[Any] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A : Any = (image_size // patch_size) ** 2
A : Dict = num_patches + 1
def _lowerCAmelCase ( self ):
A : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : int = None
A : Tuple = None
if self.use_labels:
A : Union[str, Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
A : int = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
A : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowerCAmelCase ( self ):
return BeitConfig(
vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase__, initializer_range=self.initializer_range, out_indices=self.out_indices, )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Union[str, Any] = BeitModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : List[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Tuple = BeitForMaskedImageModeling(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Tuple = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Any = self.type_sequence_label_size
A : List[Any] = BeitForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : List[str] = model(lowerCamelCase__, labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A : Optional[int] = 1
A : int = BeitForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A : Optional[Any] = model(lowerCamelCase__, labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Any = self.num_labels
A : Tuple = BeitForSemanticSegmentation(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
A : Dict = model(lowerCamelCase__, labels=lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _lowerCAmelCase ( self ):
A : str = self.prepare_config_and_inputs()
A , A , A , A : Any = config_and_inputs
A : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : int = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__lowerCamelCase : int = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : List[str] = False
__lowerCamelCase : Any = False
def _lowerCAmelCase ( self ):
A : Dict = BeitModelTester(self )
A : Any = ConfigTester(self, config_class=lowerCamelCase__, has_text_modality=lowerCamelCase__, hidden_size=37 )
def _lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""BEiT does not use inputs_embeds""" )
def _lowerCAmelCase ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="""BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def _lowerCAmelCase ( self ):
pass
def _lowerCAmelCase ( self ):
A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Any = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
A : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__, nn.Linear ) )
def _lowerCAmelCase ( self ):
A , A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Any = model_class(lowerCamelCase__ )
A : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Optional[int] = [*signature.parameters.keys()]
A : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
if not self.model_tester.is_training:
return
A , A : int = self.model_tester.prepare_config_and_inputs_for_common()
A : Dict = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling]:
continue
A : List[str] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
A : Dict = self._prepare_for_class(lowerCamelCase__, lowerCamelCase__, return_labels=lowerCamelCase__ )
A : List[Any] = model(**lowerCamelCase__ ).loss
loss.backward()
def _lowerCAmelCase ( self ):
A , A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
A : Dict = False
A : List[Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
A : int = model_class(lowerCamelCase__ )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase__ )
model.train()
A : Optional[int] = self._prepare_for_class(lowerCamelCase__, lowerCamelCase__, return_labels=lowerCamelCase__ )
A : Tuple = model(**lowerCamelCase__ ).loss
loss.backward()
def _lowerCAmelCase ( self ):
A , A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
A : int = _config_zero_init(lowerCamelCase__ )
for model_class in self.all_model_classes:
A : int = model_class(config=lowerCamelCase__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f'''Parameter {name} of model {model_class} seems not properly initialized''', )
@slow
def _lowerCAmelCase ( self ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Any = BeitModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def __UpperCamelCase ( ) -> int:
"""simple docstring"""
A : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCAmelCase ( self ):
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self ):
A : int = BeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ).to(lowerCamelCase__ )
A : Union[str, Any] = self.default_image_processor
A : Optional[Any] = prepare_img()
A : Dict = image_processor(images=lowerCamelCase__, return_tensors="""pt""" ).pixel_values.to(lowerCamelCase__ )
# prepare bool_masked_pos
A : Optional[Any] = torch.ones((1, 196), dtype=torch.bool ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
A : Dict = model(pixel_values=lowerCamelCase__, bool_masked_pos=lowerCamelCase__ )
A : int = outputs.logits
# verify the logits
A : Any = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape, lowerCamelCase__ )
A : Union[str, Any] = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3], lowerCamelCase__, atol=1e-2 ) )
@slow
def _lowerCAmelCase ( self ):
A : Any = BeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ).to(lowerCamelCase__ )
A : List[str] = self.default_image_processor
A : List[Any] = prepare_img()
A : List[str] = image_processor(images=lowerCamelCase__, return_tensors="""pt""" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
A : Union[str, Any] = model(**lowerCamelCase__ )
A : Tuple = outputs.logits
# verify the logits
A : str = torch.Size((1, 1000) )
self.assertEqual(logits.shape, lowerCamelCase__ )
A : str = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(logits[0, :3], lowerCamelCase__, atol=1e-4 ) )
A : Optional[int] = 281
self.assertEqual(logits.argmax(-1 ).item(), lowerCamelCase__ )
@slow
def _lowerCAmelCase ( self ):
A : List[str] = BeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ).to(
lowerCamelCase__ )
A : Dict = self.default_image_processor
A : List[Any] = prepare_img()
A : Union[str, Any] = image_processor(images=lowerCamelCase__, return_tensors="""pt""" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
A : Tuple = model(**lowerCamelCase__ )
A : Any = outputs.logits
# verify the logits
A : List[Any] = torch.Size((1, 2_1841) )
self.assertEqual(logits.shape, lowerCamelCase__ )
A : Optional[int] = torch.tensor([1.6881, -0.2787, 0.5901] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(logits[0, :3], lowerCamelCase__, atol=1e-4 ) )
A : List[str] = 2396
self.assertEqual(logits.argmax(-1 ).item(), lowerCamelCase__ )
@slow
def _lowerCAmelCase ( self ):
A : Tuple = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
A : List[str] = model.to(lowerCamelCase__ )
A : str = BeitImageProcessor(do_resize=lowerCamelCase__, size=640, do_center_crop=lowerCamelCase__ )
A : Optional[Any] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" )
A : Optional[Any] = Image.open(ds[0]["""file"""] )
A : Dict = image_processor(images=lowerCamelCase__, return_tensors="""pt""" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
A : str = model(**lowerCamelCase__ )
A : int = outputs.logits
# verify the logits
A : List[str] = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape, lowerCamelCase__ )
A : Optional[Any] = version.parse(PIL.__version__ ) < version.parse("""9.0.0""" )
if is_pillow_less_than_a:
A : Any = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
], device=lowerCamelCase__, )
else:
A : Any = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
], device=lowerCamelCase__, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], lowerCamelCase__, atol=1e-4 ) )
@slow
def _lowerCAmelCase ( self ):
A : Optional[int] = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
A : int = model.to(lowerCamelCase__ )
A : Optional[Any] = BeitImageProcessor(do_resize=lowerCamelCase__, size=640, do_center_crop=lowerCamelCase__ )
A : Dict = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" )
A : Dict = Image.open(ds[0]["""file"""] )
A : Optional[Any] = image_processor(images=lowerCamelCase__, return_tensors="""pt""" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
A : Optional[int] = model(**lowerCamelCase__ )
A : List[str] = outputs.logits.detach().cpu()
A : Optional[int] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__, target_sizes=[(500, 300)] )
A : Any = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape, lowerCamelCase__ )
A : Tuple = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ )
A : Dict = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape, lowerCamelCase__ )
| 662 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Tuple = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any:
"""simple docstring"""
A : Dict = """backbone.""" if is_semantic else """"""
A : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', """beit.embeddings.cls_token"""),
(f'''{prefix}patch_embed.proj.weight''', """beit.embeddings.patch_embeddings.projection.weight"""),
(f'''{prefix}patch_embed.proj.bias''', """beit.embeddings.patch_embeddings.projection.bias"""),
(f'''{prefix}pos_embed''', """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
A : Dict = """backbone.""" if is_semantic else """"""
# queries, keys and values
A : Union[str, Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
A : Tuple = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
A : Optional[int] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
A : int = in_proj_weight[
: config.hidden_size, :
]
A : Any = q_bias
A : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A : Tuple = in_proj_weight[
-config.hidden_size :, :
]
A : Union[str, Any] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
A : str = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
A : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
A : Dict = gamma_a
A : Dict = gamma_a
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
"""simple docstring"""
A : List[str] = dct.pop(_lowerCAmelCase )
A : Optional[Any] = val
def __UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
A : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A : Optional[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> str:
"""simple docstring"""
A : Dict = False if """rvlcdip""" in checkpoint_url else True
A : Union[str, Any] = BeitConfig(use_absolute_position_embeddings=_lowerCAmelCase , use_mask_token=_lowerCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
A : Dict = 1024
A : List[Any] = 4096
A : int = 24
A : int = 16
# labels
if "rvlcdip" in checkpoint_url:
A : List[Any] = 16
A : List[Any] = """huggingface/label-files"""
A : int = """rvlcdip-id2label.json"""
A : Dict = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
A : List[str] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
A : int = idalabel
A : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
A : List[str] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="""cpu""" )["""model"""]
A : str = create_rename_keys(_lowerCAmelCase , has_lm_head=_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , has_lm_head=_lowerCAmelCase )
# load HuggingFace model
A : Any = BeitForMaskedImageModeling(_lowerCAmelCase ) if has_lm_head else BeitForImageClassification(_lowerCAmelCase )
model.eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image
A : Any = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_lowerCAmelCase )
A : int = prepare_img()
A : Tuple = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" )
A : str = encoding["""pixel_values"""]
A : Tuple = model(_lowerCAmelCase )
A : Optional[int] = outputs.logits
# verify logits
A : Tuple = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(_lowerCAmelCase ), "Shape of logits not as expected"
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
if has_lm_head:
A : Any = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
A : List[Any] = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_lowerCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_lowerCAmelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 662 | 1 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : int = 0
__lowerCamelCase : bool = False
__lowerCamelCase : float = 3.0
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs(), {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs(), {"""a""": 2} )
self.assertDictEqual(MockClass(a=2, b=lowerCamelCase__ ).to_kwargs(), {"""a""": 2, """b""": True} )
self.assertDictEqual(MockClass(a=2, c=2.25 ).to_kwargs(), {"""a""": 2, """c""": 2.25} )
@require_cuda
def _lowerCAmelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
A : List[str] = GradScalerKwargs(init_scale=1024, growth_factor=2 )
AcceleratorState._reset_state()
A : Optional[Any] = Accelerator(mixed_precision="""fp16""", kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
A : Optional[int] = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale, 1024.0 )
self.assertEqual(scaler._growth_factor, 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor, 0.5 )
self.assertEqual(scaler._growth_interval, 2000 )
self.assertEqual(scaler._enabled, lowerCamelCase__ )
@require_multi_gpu
def _lowerCAmelCase ( self ):
A : Optional[Any] = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(lowerCamelCase__, env=os.environ.copy() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Union[str, Any] = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
SCREAMING_SNAKE_CASE_:List[Any] = Accelerator(kwargs_handlers=[ddp_scaler])
SCREAMING_SNAKE_CASE_:List[str] = torch.nn.Linear(100, 200)
SCREAMING_SNAKE_CASE_:Optional[int] = accelerator.prepare(model)
# Check the values changed in kwargs
SCREAMING_SNAKE_CASE_:Union[str, Any] = """"""
SCREAMING_SNAKE_CASE_:List[str] = model.bucket_bytes_cap // (1_024 * 1_024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 662 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE_:Optional[int] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""", lowerCamelCase__, )
super().__init__(*lowerCamelCase__, **lowerCamelCase__ )
| 662 | 1 |
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, ):
A : Any = parent
A : Tuple = 13
A : Dict = 7
A : Dict = True
A : str = True
A : Optional[int] = False
A : int = True
A : List[Any] = 99
A : Optional[int] = 32
A : int = 2
A : Dict = 4
A : Any = 37
A : Optional[int] = """gelu"""
A : List[Any] = 0.1
A : Dict = 0.1
A : Dict = 512
A : int = 16
A : List[str] = 2
A : Optional[int] = 0.02
A : Optional[Any] = 3
A : List[str] = 4
A : Union[str, Any] = None
def _lowerCAmelCase ( self ):
A : int = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : int = None
if self.use_input_mask:
A : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
A : List[Any] = None
A : Optional[int] = None
A : Dict = None
if self.use_labels:
A : Union[str, Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
A : str = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
A : List[Any] = ids_tensor([self.batch_size], self.num_choices )
A : int = DistilBertConfig(
vocab_size=self.vocab_size, dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, hidden_dim=self.intermediate_size, hidden_act=self.hidden_act, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Union[str, Any] = TFDistilBertModel(config=lowerCamelCase__ )
A : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
A : Union[str, Any] = model(lowerCamelCase__ )
A : int = [input_ids, input_mask]
A : Optional[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : int = TFDistilBertForMaskedLM(config=lowerCamelCase__ )
A : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
A : List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : List[str] = TFDistilBertForQuestionAnswering(config=lowerCamelCase__ )
A : Optional[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
}
A : Union[str, Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : int = self.num_labels
A : str = TFDistilBertForSequenceClassification(lowerCamelCase__ )
A : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
A : int = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Any = self.num_choices
A : Dict = TFDistilBertForMultipleChoice(lowerCamelCase__ )
A : Optional[Any] = tf.tile(tf.expand_dims(lowerCamelCase__, 1 ), (1, self.num_choices, 1) )
A : Union[str, Any] = tf.tile(tf.expand_dims(lowerCamelCase__, 1 ), (1, self.num_choices, 1) )
A : Any = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
}
A : Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Any = self.num_labels
A : Union[str, Any] = TFDistilBertForTokenClassification(lowerCamelCase__ )
A : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask}
A : List[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self ):
A : int = self.prepare_config_and_inputs()
((A) , (A) , (A) , (A) , (A) , (A)) : Any = config_and_inputs
A : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Dict = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__lowerCamelCase : List[str] = (
{
"feature-extraction": TFDistilBertModel,
"fill-mask": TFDistilBertForMaskedLM,
"question-answering": TFDistilBertForQuestionAnswering,
"text-classification": TFDistilBertForSequenceClassification,
"token-classification": TFDistilBertForTokenClassification,
"zero-shot": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowerCamelCase : List[str] = False
__lowerCamelCase : List[str] = False
def _lowerCAmelCase ( self ):
A : List[str] = TFDistilBertModelTester(self )
A : Any = ConfigTester(self, config_class=lowerCamelCase__, dim=37 )
def _lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ):
A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCamelCase__ )
@slow
def _lowerCAmelCase ( self ):
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
A : int = TFDistilBertModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
A : List[str] = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
A : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
A : Any = model(lowerCamelCase__ )[0]
A : List[Any] = [1, 6, 768]
self.assertEqual(output.shape, lowerCamelCase__ )
A : Optional[Any] = tf.constant(
[
[
[0.1926_1885, -0.1373_2955, 0.411_9799],
[0.2215_0156, -0.0742_2661, 0.3903_7204],
[0.2275_6018, -0.089_6414, 0.370_1467],
]
] )
tf.debugging.assert_near(output[:, :3, :3], lowerCamelCase__, atol=1e-4 )
| 662 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(
lowerCamelCase__, split=lowerCamelCase__, features=lowerCamelCase__, cache_dir=lowerCamelCase__, keep_in_memory=lowerCamelCase__, streaming=lowerCamelCase__, num_proc=lowerCamelCase__, **lowerCamelCase__, )
A : List[Any] = path_or_paths if isinstance(lowerCamelCase__, lowerCamelCase__ ) else {self.split: path_or_paths}
A : str = Text(
cache_dir=lowerCamelCase__, data_files=lowerCamelCase__, features=lowerCamelCase__, **lowerCamelCase__, )
def _lowerCAmelCase ( self ):
# Build iterable dataset
if self.streaming:
A : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A : List[str] = None
A : Dict = None
A : Tuple = None
A : Tuple = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__, download_mode=lowerCamelCase__, verification_mode=lowerCamelCase__, base_path=lowerCamelCase__, num_proc=self.num_proc, )
A : List[str] = self.builder.as_dataset(
split=self.split, verification_mode=lowerCamelCase__, in_memory=self.keep_in_memory )
return dataset
| 662 | 1 |
def __UpperCamelCase ( _lowerCAmelCase ) -> int:
"""simple docstring"""
A : int = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def __UpperCamelCase ( _lowerCAmelCase = 100 ) -> int:
"""simple docstring"""
A : Tuple = 1
A : Dict = 2
for i in range(2 , max_n + 1 ):
A : Union[str, Any] = pre_numerator
A : Optional[Any] = 2 * i // 3 if i % 3 == 0 else 1
A : int = cur_numerator
A : Optional[int] = e_cont * pre_numerator + temp
return sum_digits(_lowerCAmelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 662 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
SCREAMING_SNAKE_CASE_:int = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
SCREAMING_SNAKE_CASE_:Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 662 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_:int = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:List[str] = [
"""UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""UniSpeechForCTC""",
"""UniSpeechForPreTraining""",
"""UniSpeechForSequenceClassification""",
"""UniSpeechModel""",
"""UniSpeechPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 662 |
def __UpperCamelCase ( _lowerCAmelCase = 1000 ) -> int:
"""simple docstring"""
A , A : str = 1, 1
A : List[Any] = []
for i in range(1 , n + 1 ):
A : Optional[int] = prev_numerator + 2 * prev_denominator
A : Any = prev_numerator + prev_denominator
if len(str(_lowerCAmelCase ) ) > len(str(_lowerCAmelCase ) ):
result.append(_lowerCAmelCase )
A : int = numerator
A : int = denominator
return len(_lowerCAmelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 662 | 1 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
SCREAMING_SNAKE_CASE_:int = 3
def __UpperCamelCase ( _lowerCAmelCase ) -> int:
"""simple docstring"""
print("""Generating primitive root of p""" )
while True:
A : str = random.randrange(3 , _lowerCAmelCase )
if pow(_lowerCAmelCase , 2 , _lowerCAmelCase ) == 1:
continue
if pow(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) == 1:
continue
return g
def __UpperCamelCase ( _lowerCAmelCase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
"""simple docstring"""
print("""Generating prime p...""" )
A : Optional[Any] = rabin_miller.generate_large_prime(_lowerCAmelCase ) # select large prime number.
A : List[str] = primitive_root(_lowerCAmelCase ) # one primitive root on modulo p.
A : List[Any] = random.randrange(3 , _lowerCAmelCase ) # private_key -> have to be greater than 2 for safety.
A : str = cryptomath.find_mod_inverse(pow(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
A : Optional[int] = (key_size, e_a, e_a, p)
A : List[str] = (key_size, d)
return public_key, private_key
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> None:
"""simple docstring"""
if os.path.exists(f'''{name}_pubkey.txt''' ) or os.path.exists(f'''{name}_privkey.txt''' ):
print("""\nWARNING:""" )
print(
f'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
A , A : int = generate_key(_lowerCAmelCase )
print(f'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(f'''{name}_pubkey.txt''' , """w""" ) as fo:
fo.write(f'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' )
print(f'''Writing private key to file {name}_privkey.txt...''' )
with open(f'''{name}_privkey.txt''' , """w""" ) as fo:
fo.write(f'''{private_key[0]},{private_key[1]}''' )
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
print("""Making key files...""" )
make_key_files("""elgamal""" , 2048 )
print("""Key files generation successful""" )
if __name__ == "__main__":
main()
| 662 |
import re
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
if len(re.findall("""[ATCG]""" , _lowerCAmelCase ) ) != len(_lowerCAmelCase ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 1 |
from __future__ import annotations
import math
def __UpperCamelCase ( _lowerCAmelCase ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
SCREAMING_SNAKE_CASE_:List[str] = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def __UpperCamelCase ( _lowerCAmelCase ) -> list[int]:
"""simple docstring"""
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
A : Any = []
for num in range(len(_lowerCAmelCase ) ):
A : List[str] = 0
while 2 * i * i <= odd_composites[num]:
A : Any = odd_composites[num] - 2 * i * i
if is_prime(_lowerCAmelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(_lowerCAmelCase ) == n:
return list_nums
return []
def __UpperCamelCase ( ) -> int:
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 662 |
from __future__ import annotations
SCREAMING_SNAKE_CASE_:Tuple = """#"""
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self ):
A : dict = {}
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : List[Any] = self._trie
for char in text:
if char not in trie:
A : str = {}
A : str = trie[char]
A : Optional[int] = True
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Dict = self._trie
for char in prefix:
if char in trie:
A : Optional[Any] = trie[char]
else:
return []
return self._elements(lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : int = []
for c, v in d.items():
A : List[Any] = [""" """] if c == END else [(c + s) for s in self._elements(lowerCamelCase__ )]
result.extend(lowerCamelCase__ )
return tuple(lowerCamelCase__ )
SCREAMING_SNAKE_CASE_:Any = Trie()
SCREAMING_SNAKE_CASE_:Tuple = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def __UpperCamelCase ( _lowerCAmelCase ) -> tuple:
"""simple docstring"""
A : List[str] = trie.find_word(_lowerCAmelCase )
return tuple(string + word for word in suffixes )
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
print(autocomplete_using_trie("""de""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 662 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_:Optional[int] = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:str = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:List[Any] = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Optional[Any] = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:List[Any] = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Optional[int] = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 662 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
SCREAMING_SNAKE_CASE_:Optional[int] = logging.getLogger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[int] = bnb_quantization_config.load_in_abit
A : int = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
A : Any = []
# custom device map
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(device_map.keys() ) > 1:
A : Optional[int] = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
A : int = get_keys_to_not_convert(_lowerCAmelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_lowerCAmelCase )
A : Optional[Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
A : Dict = []
A : Tuple = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_lowerCAmelCase )
# compatibility with peft
A : Union[str, Any] = load_in_abit
A : Tuple = load_in_abit
A : List[str] = get_parameter_device(_lowerCAmelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
A : Optional[int] = replace_with_bnb_layers(_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase )
# convert param to the right dtype
A : Tuple = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
A : Optional[Any] = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
A : int = getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_lowerCAmelCase ):
param.to(_lowerCAmelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
A : str = replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase )
A : Optional[Any] = get_quantized_model_device_map(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , max_memory=_lowerCAmelCase , no_split_module_classes=_lowerCAmelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
A : Tuple = True
A : int = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=_lowerCAmelCase , offload_state_dict=_lowerCAmelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_lowerCAmelCase , device_map=_lowerCAmelCase , offload_dir=_lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[int]:
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
A : Optional[int] = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
A : Tuple = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
A : Any = {}
A : List[str] = special_dtypes
A : Any = no_split_module_classes
A : Union[str, Any] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
A : Tuple = get_balanced_memory(
_lowerCAmelCase , low_zero=(device_map == """balanced_low_0""") , max_memory=_lowerCAmelCase , **_lowerCAmelCase , )
A : int = max_memory
A : Any = infer_auto_device_map(_lowerCAmelCase , **_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# check if don't have any quantized module on the cpu
A : Optional[Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
A : Optional[int] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]:
"""simple docstring"""
if modules_to_not_convert is None:
A : Optional[Any] = []
A , A : Dict = _replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , ) -> int:
"""simple docstring"""
A : Optional[int] = False
for name, module in model.named_children():
if current_key_name is None:
A : int = []
current_key_name.append(_lowerCAmelCase )
if isinstance(_lowerCAmelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
A : Dict = """.""".join(_lowerCAmelCase )
A : Optional[Any] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
A : Dict = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
A : Optional[Any] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_lowerCAmelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
A : Dict = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
A : Any = module.weight.data
if module.bias is not None:
A : Any = module.bias.data
bnb_module.requires_grad_(_lowerCAmelCase )
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : Dict = True
if len(list(module.children() ) ) > 0:
A , A : Dict = _replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : Union[str, Any] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __UpperCamelCase ( _lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
with init_empty_weights():
A : Tuple = deepcopy(_lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
A : Optional[int] = find_tied_parameters(_lowerCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A : int = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
A : Optional[int] = sum(_lowerCAmelCase , [] )
A : Tuple = len(_lowerCAmelCase ) > 0
# Check if it is a base model
A : List[str] = False
if hasattr(_lowerCAmelCase , """base_model_prefix""" ):
A : Optional[Any] = not hasattr(_lowerCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
A : str = list(model.named_children() )
A : Tuple = [list_modules[-1][0]]
# add last module together with tied weights
A : int = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
A : Optional[Any] = list(set(_lowerCAmelCase ) ) + list(_lowerCAmelCase )
# remove ".weight" from the keys
A : Union[str, Any] = [""".weight""", """.bias"""]
A : Optional[int] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
A : List[str] = name.replace(_lowerCAmelCase , """""" )
filtered_module_names.append(_lowerCAmelCase )
return filtered_module_names
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
for m in model.modules():
if isinstance(_lowerCAmelCase , bnb.nn.Linearabit ):
return True
return False
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
return next(parameter.parameters() ).device
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , 0 , dtype=_lowerCAmelCase , value=_lowerCAmelCase )
A : Tuple = param_name
A : Union[str, Any] = model
if "." in tensor_name:
A : int = tensor_name.split(""".""" )
for split in splits[:-1]:
A : Union[str, Any] = getattr(_lowerCAmelCase , _lowerCAmelCase )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
A : Optional[Any] = new_module
A : List[str] = splits[-1]
# offload weights
A : Optional[int] = False
offload_weight(module._parameters[tensor_name] , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _lowerCAmelCase , index=_lowerCAmelCase , )
else:
offload_weight(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase )
offload_weight(_lowerCAmelCase , param_name.replace("""weight""" , """SCB""" ) , _lowerCAmelCase , index=_lowerCAmelCase )
set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , """meta""" , dtype=_lowerCAmelCase , value=torch.empty(*param.size() ) )
| 662 | 1 |
from collections import deque
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Dict = process_name # process name
A : Optional[Any] = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
A : Dict = arrival_time
A : Tuple = burst_time # remaining burst time
A : List[Any] = 0 # total time of the process wait in ready queue
A : Optional[Any] = 0 # time from arrival time to completion time
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, ):
# total number of mlfq's queues
A : Any = number_of_queues
# time slice of queues that round robin algorithm applied
A : Dict = time_slices
# unfinished process is in this ready_queue
A : str = queue
# current time
A : Union[str, Any] = current_time
# finished process is in this sequence queue
A : deque[Process] = deque()
def _lowerCAmelCase ( self ):
A : int = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : List[Any] = []
for i in range(len(lowerCamelCase__ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : int = []
for i in range(len(lowerCamelCase__ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Optional[int] = []
for i in range(len(lowerCamelCase__ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return [q.burst_time for q in queue]
def _lowerCAmelCase ( self, lowerCamelCase__ ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : deque[Process] = deque() # sequence deque of finished process
while len(lowerCamelCase__ ) != 0:
A : str = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(lowerCamelCase__ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
A : Tuple = 0
# set the process's turnaround time because it is finished
A : Dict = self.current_time - cp.arrival_time
# set the completion time
A : Optional[Any] = self.current_time
# add the process to queue that has finished queue
finished.append(lowerCamelCase__ )
self.finish_queue.extend(lowerCamelCase__ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
A : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(lowerCamelCase__ ) ):
A : Any = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(lowerCamelCase__ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
A : Tuple = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(lowerCamelCase__ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
A : int = 0
# set the finish time
A : List[Any] = self.current_time
# update the process' turnaround time because it is finished
A : Optional[int] = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(lowerCamelCase__ )
self.finish_queue.extend(lowerCamelCase__ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def _lowerCAmelCase ( self ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
A , A : Any = self.round_robin(
self.ready_queue, self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
SCREAMING_SNAKE_CASE_:Tuple = Process("""P1""", 0, 53)
SCREAMING_SNAKE_CASE_:Any = Process("""P2""", 0, 17)
SCREAMING_SNAKE_CASE_:Optional[Any] = Process("""P3""", 0, 68)
SCREAMING_SNAKE_CASE_:Optional[int] = Process("""P4""", 0, 24)
SCREAMING_SNAKE_CASE_:str = 3
SCREAMING_SNAKE_CASE_:List[str] = [17, 25]
SCREAMING_SNAKE_CASE_:Optional[int] = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"""queue""": deque([Pa, Pa, Pa, Pa])})
SCREAMING_SNAKE_CASE_:Optional[Any] = Process("""P1""", 0, 53)
SCREAMING_SNAKE_CASE_:List[str] = Process("""P2""", 0, 17)
SCREAMING_SNAKE_CASE_:Tuple = Process("""P3""", 0, 68)
SCREAMING_SNAKE_CASE_:Union[str, Any] = Process("""P4""", 0, 24)
SCREAMING_SNAKE_CASE_:str = 3
SCREAMING_SNAKE_CASE_:Tuple = [17, 25]
SCREAMING_SNAKE_CASE_:Union[str, Any] = deque([Pa, Pa, Pa, Pa])
SCREAMING_SNAKE_CASE_:List[Any] = MLFQ(number_of_queues, time_slices, queue, 0)
SCREAMING_SNAKE_CASE_:int = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
F"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
F"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 662 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __UpperCamelCase ( ) -> Dict:
"""simple docstring"""
A : Tuple = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
A : Dict = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(_lowerCAmelCase )
DownloadCommand.register_subcommand(_lowerCAmelCase )
EnvironmentCommand.register_subcommand(_lowerCAmelCase )
RunCommand.register_subcommand(_lowerCAmelCase )
ServeCommand.register_subcommand(_lowerCAmelCase )
UserCommands.register_subcommand(_lowerCAmelCase )
AddNewModelCommand.register_subcommand(_lowerCAmelCase )
AddNewModelLikeCommand.register_subcommand(_lowerCAmelCase )
LfsCommands.register_subcommand(_lowerCAmelCase )
PTtoTFCommand.register_subcommand(_lowerCAmelCase )
# Let's go
A : Tuple = parser.parse_args()
if not hasattr(_lowerCAmelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
A : Any = args.func(_lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 662 | 1 |
def __UpperCamelCase ( _lowerCAmelCase = 1000 ) -> int:
"""simple docstring"""
A , A : str = 1, 1
A : List[Any] = []
for i in range(1 , n + 1 ):
A : Optional[int] = prev_numerator + 2 * prev_denominator
A : Any = prev_numerator + prev_denominator
if len(str(_lowerCAmelCase ) ) > len(str(_lowerCAmelCase ) ):
result.append(_lowerCAmelCase )
A : int = numerator
A : int = denominator
return len(_lowerCAmelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 662 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_:int = {
"""configuration_blenderbot""": [
"""BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotConfig""",
"""BlenderbotOnnxConfig""",
],
"""tokenization_blenderbot""": ["""BlenderbotTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Union[str, Any] = ["""BlenderbotTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Optional[int] = [
"""BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotForCausalLM""",
"""BlenderbotForConditionalGeneration""",
"""BlenderbotModel""",
"""BlenderbotPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Union[str, Any] = [
"""TFBlenderbotForConditionalGeneration""",
"""TFBlenderbotModel""",
"""TFBlenderbotPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Any = [
"""FlaxBlenderbotForConditionalGeneration""",
"""FlaxBlenderbotModel""",
"""FlaxBlenderbotPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 662 | 1 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[str] = TransfoXLTokenizer
__lowerCamelCase : Dict = False
__lowerCamelCase : Tuple = False
def _lowerCAmelCase ( self ):
super().setUp()
A : List[Any] = [
"""<unk>""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""unwanted""",
"""wa""",
"""un""",
"""running""",
""",""",
"""low""",
"""l""",
]
A : List[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
A : Optional[Any] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Union[str, Any] = """<unk> UNwanted , running"""
A : Any = """<unk> unwanted, running"""
return input_text, output_text
def _lowerCAmelCase ( self ):
A : Optional[int] = TransfoXLTokenizer(vocab_file=self.vocab_file, lower_case=lowerCamelCase__ )
A : int = tokenizer.tokenize("""<unk> UNwanted , running""" )
self.assertListEqual(lowerCamelCase__, ["""<unk>""", """unwanted""", """,""", """running"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ), [0, 4, 8, 7] )
def _lowerCAmelCase ( self ):
A : Dict = TransfoXLTokenizer(lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ), ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
def _lowerCAmelCase ( self ):
A : List[Any] = TransfoXLTokenizer(lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ), ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _lowerCAmelCase ( self ):
A : Optional[int] = TransfoXLTokenizer(lower_case=lowerCamelCase__ )
A : Any = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"""
A : Optional[Any] = [
"""Hello""",
"""(""",
"""bracket""",
""")""",
"""and""",
"""side""",
"""@-@""",
"""scrolled""",
"""[""",
"""and""",
"""]""",
"""Henry""",
"""'s""",
"""$""",
"""5""",
"""@,@""",
"""000""",
"""with""",
"""3""",
"""@.@""",
"""34""",
"""m""",
""".""",
"""What""",
"""'s""",
"""up""",
"""!""",
"""?""",
]
self.assertListEqual(tokenizer.tokenize(lowerCamelCase__ ), lowerCamelCase__ )
self.assertEqual(tokenizer.convert_tokens_to_string(lowerCamelCase__ ), lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Union[str, Any] = self.get_tokenizer()
A : int = len(lowerCamelCase__ )
tokenizer.add_tokens(["""new1""", """new2"""] )
tokenizer.move_added_token("""new1""", 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(lowerCamelCase__ ), original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("""new1""" ), [1] )
self.assertEqual(tokenizer.decode([1] ), """new1""" )
| 662 |
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> list[int]:
"""simple docstring"""
A : Optional[int] = int(_lowerCAmelCase )
# Initialize Result
A : int = []
# Traverse through all denomination
for denomination in reversed(_lowerCAmelCase ):
# Find denominations
while int(_lowerCAmelCase ) >= int(_lowerCAmelCase ):
total_value -= int(_lowerCAmelCase )
answer.append(_lowerCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:List[Any] = []
SCREAMING_SNAKE_CASE_:Dict = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
SCREAMING_SNAKE_CASE_:Optional[int] = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
SCREAMING_SNAKE_CASE_:Tuple = [1, 2, 5, 10, 20, 50, 100, 500, 2_000]
SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F"""Following is minimal change for {value}: """)
SCREAMING_SNAKE_CASE_:str = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 662 | 1 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE_:Optional[int] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""", lowerCamelCase__, )
super().__init__(*lowerCamelCase__, **lowerCamelCase__ )
| 662 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_:Union[str, Any] = """tiny-wmt19-en-ru"""
# Build
# borrowed from a test
SCREAMING_SNAKE_CASE_:Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
SCREAMING_SNAKE_CASE_:Any = dict(zip(vocab, range(len(vocab))))
SCREAMING_SNAKE_CASE_:Dict = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_:List[Any] = Path(tmpdirname)
SCREAMING_SNAKE_CASE_:str = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""]
SCREAMING_SNAKE_CASE_:Union[str, Any] = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""]
SCREAMING_SNAKE_CASE_:Any = build_dir / VOCAB_FILES_NAMES["""merges_file"""]
with open(src_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, """w""") as fp:
fp.write("""\n""".join(merges))
SCREAMING_SNAKE_CASE_:Optional[int] = FSMTTokenizer(
langs=["""en""", """ru"""],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
SCREAMING_SNAKE_CASE_:Optional[int] = FSMTConfig(
langs=["""ru""", """en"""],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
SCREAMING_SNAKE_CASE_:Optional[Any] = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
SCREAMING_SNAKE_CASE_:Tuple = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
SCREAMING_SNAKE_CASE_:str = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 662 | 1 |
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__ ):
# we need a list not a string, so do something to change the type
A : List[Any] = arr.split(""",""" )
def _lowerCAmelCase ( self ):
A : int = [int(self.array[0] )] * len(self.array )
A : Optional[Any] = [int(self.array[0] )] * len(self.array )
for i in range(1, len(self.array ) ):
A : Union[str, Any] = max(
int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) )
A : Dict = max(sum_value[i], rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:int = input("""please input some numbers:""")
SCREAMING_SNAKE_CASE_:Dict = SubArray(whole_array)
SCREAMING_SNAKE_CASE_:Optional[int] = array.solve_sub_array()
print(("""the results is:""", re))
| 662 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:int = """Hello, World!"""
SCREAMING_SNAKE_CASE_:List[Any] = """en_XX"""
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Optional[int] = Path("""data_bin""" )
A : Optional[Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowerCAmelCase ).parent ) , checkpoint_file=Path(_lowerCAmelCase ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(_lowerCAmelCase ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(_lowerCAmelCase ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(_lowerCAmelCase )
A : Any = xmod.model.encoder.sentence_encoder
A : Optional[int] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , _lowerCAmelCase )
A : int = XmodForSequenceClassification(_lowerCAmelCase ) if classification_head else XmodForMaskedLM(_lowerCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
A : Any = xmod_sent_encoder.embed_tokens.weight
A : int = xmod_sent_encoder.embed_positions.weight
A : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
A : Dict = xmod_sent_encoder.layernorm_embedding.weight
A : int = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
A : str = model.roberta.encoder.layer[i]
A : Tuple = xmod_sent_encoder.layers[i]
# self attention
A : Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
A : List[str] = xmod_layer.self_attn.q_proj.weight
A : Optional[int] = xmod_layer.self_attn.q_proj.bias
A : List[Any] = xmod_layer.self_attn.k_proj.weight
A : Union[str, Any] = xmod_layer.self_attn.k_proj.bias
A : Optional[int] = xmod_layer.self_attn.v_proj.weight
A : Dict = xmod_layer.self_attn.v_proj.bias
# self-attention output
A : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
A : Optional[Any] = xmod_layer.self_attn.out_proj.weight
A : Dict = xmod_layer.self_attn.out_proj.bias
A : Union[str, Any] = xmod_layer.self_attn_layer_norm.weight
A : str = xmod_layer.self_attn_layer_norm.bias
# intermediate
A : str = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
A : Optional[int] = xmod_layer.fca.weight
A : Optional[int] = xmod_layer.fca.bias
# output
A : Dict = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
A : Union[str, Any] = xmod_layer.fca.weight
A : int = xmod_layer.fca.bias
A : List[str] = xmod_layer.final_layer_norm.weight
A : Optional[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
A : str = xmod_layer.adapter_layer_norm.weight
A : str = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
A : Optional[int] = bert_output.adapter_modules[lang_code]
A : int = xmod_layer.adapter_modules[lang_code]
A : Optional[Any] = from_adapter.fca.weight
A : Optional[Any] = from_adapter.fca.bias
A : List[str] = from_adapter.fca.weight
A : Any = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
A : Dict = xmod_sent_encoder.layer_norm.weight
A : int = xmod_sent_encoder.layer_norm.bias
if classification_head:
A : int = xmod.model.classification_heads["""mnli"""].dense.weight
A : Optional[Any] = xmod.model.classification_heads["""mnli"""].dense.bias
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
A : Any = xmod.model.encoder.lm_head.dense.weight
A : Tuple = xmod.model.encoder.lm_head.dense.bias
A : Any = xmod.model.encoder.lm_head.layer_norm.weight
A : List[str] = xmod.model.encoder.lm_head.layer_norm.bias
A : Union[str, Any] = xmod.model.encoder.lm_head.weight
A : Tuple = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
A : Optional[int] = xmod.encode(_lowerCAmelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowerCAmelCase )
A : List[str] = model(_lowerCAmelCase )[0]
if classification_head:
A : Dict = xmod.model.classification_heads["""mnli"""](xmod.extract_features(_lowerCAmelCase ) )
else:
A : Optional[Any] = xmod.model(_lowerCAmelCase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
A : str = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
A : Optional[Any] = torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(_lowerCAmelCase ).mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 662 | 1 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
SCREAMING_SNAKE_CASE_:List[Any] = {
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
SCREAMING_SNAKE_CASE_:Optional[Any] = """ETAOINSHRDLCUMWFGYPBVKJXQZ"""
SCREAMING_SNAKE_CASE_:List[str] = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def __UpperCamelCase ( _lowerCAmelCase ) -> dict[str, int]:
"""simple docstring"""
A : str = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
return x[0]
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
A : List[Any] = get_letter_count(_lowerCAmelCase )
A : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(_lowerCAmelCase )
A : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=_lowerCAmelCase )
A : str = """""".join(freq_to_letter[freq] )
A : Optional[int] = list(freq_to_letter_str.items() )
freq_pairs.sort(key=_lowerCAmelCase , reverse=_lowerCAmelCase )
A : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(_lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase ) -> int:
"""simple docstring"""
A : Union[str, Any] = get_frequency_order(_lowerCAmelCase )
A : Optional[Any] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : Any = tempfile.mkdtemp()
A : List[str] = BlipImageProcessor()
A : Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
A : str = BlipProcessor(lowerCamelCase__, lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ ).tokenizer
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ ).image_processor
def _lowerCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
A : Any = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
A : Any = [Image.fromarray(np.moveaxis(lowerCamelCase__, 0, -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ):
A : int = BlipProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : Any = self.get_tokenizer(bos_token="""(BOS)""", eos_token="""(EOS)""" )
A : Union[str, Any] = self.get_image_processor(do_normalize=lowerCamelCase__, padding_value=1.0 )
A : Dict = BlipProcessor.from_pretrained(
self.tmpdirname, bos_token="""(BOS)""", eos_token="""(EOS)""", do_normalize=lowerCamelCase__, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.get_image_processor()
A : str = self.get_tokenizer()
A : Dict = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Any = self.prepare_image_inputs()
A : int = image_processor(lowerCamelCase__, return_tensors="""np""" )
A : Optional[Any] = processor(images=lowerCamelCase__, return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 )
def _lowerCAmelCase ( self ):
A : List[str] = self.get_image_processor()
A : int = self.get_tokenizer()
A : str = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[Any] = """lower newer"""
A : List[Any] = processor(text=lowerCamelCase__ )
A : str = tokenizer(lowerCamelCase__, return_token_type_ids=lowerCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def _lowerCAmelCase ( self ):
A : List[Any] = self.get_image_processor()
A : Dict = self.get_tokenizer()
A : Union[str, Any] = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[int] = """lower newer"""
A : Union[str, Any] = self.prepare_image_inputs()
A : str = processor(text=lowerCamelCase__, images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ), ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _lowerCAmelCase ( self ):
A : List[Any] = self.get_image_processor()
A : Dict = self.get_tokenizer()
A : Dict = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A : Optional[int] = processor.batch_decode(lowerCamelCase__ )
A : Dict = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.get_image_processor()
A : int = self.get_tokenizer()
A : Optional[int] = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[int] = """lower newer"""
A : List[str] = self.prepare_image_inputs()
A : Optional[int] = processor(text=lowerCamelCase__, images=lowerCamelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ), ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 662 | 1 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE_:str = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def __UpperCamelCase ( _lowerCAmelCase = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
A : List[str] = BeautifulSoup(requests.get(url + location ).content , """html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ):
A : Union[str, Any] = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
A : Union[str, Any] = job.find("""span""" , {"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(F"""Job {i:>2} is {job[0]} at {job[1]}""")
| 662 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
return f'''gaussian_noise_s={seed}_shape={"_".join([str(lowerCamelCase__ ) for s in shape] )}.npy'''
def _lowerCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 4, 64, 64), lowerCamelCase__=False ):
A : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
A : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ )
return image
def _lowerCAmelCase ( self, lowerCamelCase__=False, lowerCamelCase__="CompVis/stable-diffusion-v1-4" ):
A : str = jnp.bfloataa if fpaa else jnp.floataa
A : Union[str, Any] = """bf16""" if fpaa else None
A , A : str = FlaxUNetaDConditionModel.from_pretrained(
lowerCamelCase__, subfolder="""unet""", dtype=lowerCamelCase__, revision=lowerCamelCase__ )
return model, params
def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 77, 768), lowerCamelCase__=False ):
A : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
A : List[str] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A , A : List[str] = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""", fpaa=lowerCamelCase__ )
A : str = self.get_latents(lowerCamelCase__, fpaa=lowerCamelCase__ )
A : int = self.get_encoder_hidden_states(lowerCamelCase__, fpaa=lowerCamelCase__ )
A : Optional[Any] = model.apply(
{"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample
assert sample.shape == latents.shape
A : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
A : Dict = jnp.array(lowerCamelCase__, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A , A : Tuple = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""", fpaa=lowerCamelCase__ )
A : int = self.get_latents(lowerCamelCase__, shape=(4, 4, 96, 96), fpaa=lowerCamelCase__ )
A : Union[str, Any] = self.get_encoder_hidden_states(lowerCamelCase__, shape=(4, 77, 1024), fpaa=lowerCamelCase__ )
A : Dict = model.apply(
{"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample
assert sample.shape == latents.shape
A : Dict = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
A : List[Any] = jnp.array(lowerCamelCase__, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 )
| 662 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : torch.FloatTensor
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@register_to_config
def __init__( self, lowerCamelCase__ = 6_5536, lowerCamelCase__ = None, lowerCamelCase__ = 2, lowerCamelCase__ = 2, lowerCamelCase__ = 0, lowerCamelCase__ = "fourier", lowerCamelCase__ = True, lowerCamelCase__ = False, lowerCamelCase__ = 0.0, lowerCamelCase__ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D"), lowerCamelCase__ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip"), lowerCamelCase__ = "UNetMidBlock1D", lowerCamelCase__ = None, lowerCamelCase__ = (32, 32, 64), lowerCamelCase__ = None, lowerCamelCase__ = 8, lowerCamelCase__ = 1, lowerCamelCase__ = False, ):
super().__init__()
A : Tuple = sample_size
# time
if time_embedding_type == "fourier":
A : Optional[Any] = GaussianFourierProjection(
embedding_size=8, set_W_to_weight=lowerCamelCase__, log=lowerCamelCase__, flip_sin_to_cos=lowerCamelCase__ )
A : str = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
A : Union[str, Any] = Timesteps(
block_out_channels[0], flip_sin_to_cos=lowerCamelCase__, downscale_freq_shift=lowerCamelCase__ )
A : Union[str, Any] = block_out_channels[0]
if use_timestep_embedding:
A : Union[str, Any] = block_out_channels[0] * 4
A : Tuple = TimestepEmbedding(
in_channels=lowerCamelCase__, time_embed_dim=lowerCamelCase__, act_fn=lowerCamelCase__, out_dim=block_out_channels[0], )
A : Any = nn.ModuleList([] )
A : Union[str, Any] = None
A : Tuple = nn.ModuleList([] )
A : List[str] = None
# down
A : Dict = in_channels
for i, down_block_type in enumerate(lowerCamelCase__ ):
A : int = output_channel
A : Optional[Any] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
A : str = i == len(lowerCamelCase__ ) - 1
A : List[str] = get_down_block(
lowerCamelCase__, num_layers=lowerCamelCase__, in_channels=lowerCamelCase__, out_channels=lowerCamelCase__, temb_channels=block_out_channels[0], add_downsample=not is_final_block or downsample_each_block, )
self.down_blocks.append(lowerCamelCase__ )
# mid
A : List[Any] = get_mid_block(
lowerCamelCase__, in_channels=block_out_channels[-1], mid_channels=block_out_channels[-1], out_channels=block_out_channels[-1], embed_dim=block_out_channels[0], num_layers=lowerCamelCase__, add_downsample=lowerCamelCase__, )
# up
A : Any = list(reversed(lowerCamelCase__ ) )
A : List[Any] = reversed_block_out_channels[0]
if out_block_type is None:
A : str = out_channels
else:
A : str = block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase__ ):
A : int = output_channel
A : str = (
reversed_block_out_channels[i + 1] if i < len(lowerCamelCase__ ) - 1 else final_upsample_channels
)
A : str = i == len(lowerCamelCase__ ) - 1
A : str = get_up_block(
lowerCamelCase__, num_layers=lowerCamelCase__, in_channels=lowerCamelCase__, out_channels=lowerCamelCase__, temb_channels=block_out_channels[0], add_upsample=not is_final_block, )
self.up_blocks.append(lowerCamelCase__ )
A : Optional[Any] = output_channel
# out
A : Union[str, Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4, 32 )
A : Tuple = get_out_block(
out_block_type=lowerCamelCase__, num_groups_out=lowerCamelCase__, embed_dim=block_out_channels[0], out_channels=lowerCamelCase__, act_fn=lowerCamelCase__, fc_dim=block_out_channels[-1] // 4, )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = True, ):
A : Union[str, Any] = timestep
if not torch.is_tensor(lowerCamelCase__ ):
A : List[str] = torch.tensor([timesteps], dtype=torch.long, device=sample.device )
elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0:
A : int = timesteps[None].to(sample.device )
A : List[Any] = self.time_proj(lowerCamelCase__ )
if self.config.use_timestep_embedding:
A : Union[str, Any] = self.time_mlp(lowerCamelCase__ )
else:
A : int = timestep_embed[..., None]
A : Dict = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
A : Optional[int] = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
A : int = ()
for downsample_block in self.down_blocks:
A , A : Dict = downsample_block(hidden_states=lowerCamelCase__, temb=lowerCamelCase__ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
A : str = self.mid_block(lowerCamelCase__, lowerCamelCase__ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
A : Dict = down_block_res_samples[-1:]
A : List[str] = down_block_res_samples[:-1]
A : Optional[int] = upsample_block(lowerCamelCase__, res_hidden_states_tuple=lowerCamelCase__, temb=lowerCamelCase__ )
# 5. post-process
if self.out_block:
A : Optional[Any] = self.out_block(lowerCamelCase__, lowerCamelCase__ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCamelCase__ )
| 662 |
from typing import Any
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase ) -> bool:
"""simple docstring"""
return np.array_equal(_lowerCAmelCase , matrix.conjugate().T )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Any = v.conjugate().T
A : List[Any] = v_star.dot(_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , np.ndarray )
return (v_star_dot.dot(_lowerCAmelCase )) / (v_star.dot(_lowerCAmelCase ))
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
A : Any = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
A : str = np.array([[1], [2], [3]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) )
A : Tuple = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 662 | 1 |
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> float:
"""simple docstring"""
def get_matched_characters(_lowerCAmelCase , _lowerCAmelCase ) -> str:
A : str = []
A : Optional[int] = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
A : str = int(max(0 , i - limit ) )
A : Tuple = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(_lowerCAmelCase )
A : Tuple = f'''{_stra[0:_stra.index(_lowerCAmelCase )]} {_stra[_stra.index(_lowerCAmelCase ) + 1:]}'''
return "".join(_lowerCAmelCase )
# matching characters
A : Dict = get_matched_characters(_lowerCAmelCase , _lowerCAmelCase )
A : Union[str, Any] = get_matched_characters(_lowerCAmelCase , _lowerCAmelCase )
A : Dict = len(_lowerCAmelCase )
# transposition
A : str = (
len([(ca, ca) for ca, ca in zip(_lowerCAmelCase , _lowerCAmelCase ) if ca != ca] ) // 2
)
if not match_count:
A : Any = 0.0
else:
A : Any = (
1
/ 3
* (
match_count / len(_lowerCAmelCase )
+ match_count / len(_lowerCAmelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
A : str = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world"""))
| 662 |
from __future__ import annotations
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase ) -> tuple[np.ndarray, np.ndarray]:
"""simple docstring"""
A , A : int = np.shape(_lowerCAmelCase )
if rows != columns:
A : Union[str, Any] = (
"""'table' has to be of square shaped array but got a """
f'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(_lowerCAmelCase )
A : Union[str, Any] = np.zeros((rows, columns) )
A : Dict = np.zeros((rows, columns) )
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
A : Any = (table[i][j] - total) / upper[j][j]
A : Union[str, Any] = 1
for j in range(_lowerCAmelCase , _lowerCAmelCase ):
A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) )
A : str = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 1 |
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 662 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple[int, int]:
"""simple docstring"""
def constraint_to_multiple_of(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=0 , _lowerCAmelCase=None ):
A : Optional[int] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
A : Optional[Any] = math.floor(val / multiple ) * multiple
if x < min_val:
A : Any = math.ceil(val / multiple ) * multiple
return x
A : Optional[Any] = (output_size, output_size) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else output_size
A , A : List[Any] = get_image_size(_lowerCAmelCase )
A , A : List[Any] = output_size
# determine new height and width
A : Optional[int] = output_height / input_height
A : Optional[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
A : Any = scale_width
else:
# fit height
A : int = scale_height
A : Any = constraint_to_multiple_of(scale_height * input_height , multiple=_lowerCAmelCase )
A : int = constraint_to_multiple_of(scale_width * input_width , multiple=_lowerCAmelCase )
return (new_height, new_width)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ["pixel_values"]
def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : int = size if size is not None else {"""height""": 384, """width""": 384}
A : str = get_size_dict(lowerCamelCase__ )
A : Optional[Any] = do_resize
A : Optional[int] = size
A : Union[str, Any] = keep_aspect_ratio
A : int = ensure_multiple_of
A : Dict = resample
A : Optional[Any] = do_rescale
A : Any = rescale_factor
A : str = do_normalize
A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Dict = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
A : Optional[Any] = get_resize_output_image_size(
lowerCamelCase__, output_size=(size["""height"""], size["""width"""]), keep_aspect_ratio=lowerCamelCase__, multiple=lowerCamelCase__, )
return resize(lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ):
A : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
A : str = size if size is not None else self.size
A : str = get_size_dict(lowerCamelCase__ )
A : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
A : Optional[int] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
A : Tuple = resample if resample is not None else self.resample
A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A : int = rescale_factor if rescale_factor is not None else self.rescale_factor
A : int = do_normalize if do_normalize is not None else self.do_normalize
A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
A : Optional[int] = image_std if image_std is not None else self.image_std
A : Any = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A : str = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
A : Dict = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images]
if do_rescale:
A : Optional[Any] = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images]
if do_normalize:
A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images]
A : Dict = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images]
A : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : Any = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(lowerCamelCase__ ):
A : int = target_sizes.numpy()
A : Union[str, Any] = []
for idx in range(len(lowerCamelCase__ ) ):
A : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ), size=target_sizes[idx], mode="""bilinear""", align_corners=lowerCamelCase__ )
A : Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCamelCase__ )
else:
A : List[str] = logits.argmax(dim=1 )
A : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 662 | 1 |
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(_lowerCAmelCase , int(b / 2 ) ) * actual_power(_lowerCAmelCase , int(b / 2 ) )
else:
return a * actual_power(_lowerCAmelCase , int(b / 2 ) ) * actual_power(_lowerCAmelCase , int(b / 2 ) )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(_lowerCAmelCase , _lowerCAmelCase )
return actual_power(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 662 |
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__ ):
# we need a list not a string, so do something to change the type
A : List[Any] = arr.split(""",""" )
def _lowerCAmelCase ( self ):
A : int = [int(self.array[0] )] * len(self.array )
A : Optional[Any] = [int(self.array[0] )] * len(self.array )
for i in range(1, len(self.array ) ):
A : Union[str, Any] = max(
int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) )
A : Dict = max(sum_value[i], rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:int = input("""please input some numbers:""")
SCREAMING_SNAKE_CASE_:Dict = SubArray(whole_array)
SCREAMING_SNAKE_CASE_:Optional[int] = array.solve_sub_array()
print(("""the results is:""", re))
| 662 | 1 |
from __future__ import annotations
from typing import Any
def __UpperCamelCase ( _lowerCAmelCase ) -> None:
"""simple docstring"""
create_state_space_tree(_lowerCAmelCase , [] , 0 )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> None:
"""simple docstring"""
if index == len(_lowerCAmelCase ):
print(_lowerCAmelCase )
return
create_state_space_tree(_lowerCAmelCase , _lowerCAmelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_lowerCAmelCase , _lowerCAmelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq)
| 662 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:List[Any] = {
"""google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = "bit"
__lowerCamelCase : Union[str, Any] = ["preactivation", "bottleneck"]
__lowerCamelCase : Union[str, Any] = ["SAME", "VALID"]
def __init__( self, lowerCamelCase__=3, lowerCamelCase__=64, lowerCamelCase__=[256, 512, 1024, 2048], lowerCamelCase__=[3, 4, 6, 3], lowerCamelCase__="preactivation", lowerCamelCase__="relu", lowerCamelCase__=None, lowerCamelCase__=32, lowerCamelCase__=0.0, lowerCamelCase__=False, lowerCamelCase__=32, lowerCamelCase__=1, lowerCamelCase__=None, lowerCamelCase__=None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
A : List[Any] = global_padding.upper()
else:
raise ValueError(f'''Padding strategy {global_padding} not supported''' )
A : Dict = num_channels
A : List[Any] = embedding_size
A : Optional[Any] = hidden_sizes
A : str = depths
A : str = layer_type
A : Union[str, Any] = hidden_act
A : Any = global_padding
A : Optional[int] = num_groups
A : Dict = drop_path_rate
A : List[Any] = embedding_dynamic_padding
A : List[Any] = output_stride
A : Union[str, Any] = width_factor
A : Dict = ["""stem"""] + [f'''stage{idx}''' for idx in range(1, len(lowerCamelCase__ ) + 1 )]
A , A : Any = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__, out_indices=lowerCamelCase__, stage_names=self.stage_names )
| 662 | 1 |
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
SCREAMING_SNAKE_CASE_:Optional[Any] = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="""relu""")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="""relu"""))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation="""relu"""))
classifier.add(layers.Dense(units=1, activation="""sigmoid"""))
# Compiling the CNN
classifier.compile(
optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
SCREAMING_SNAKE_CASE_:List[Any] = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
SCREAMING_SNAKE_CASE_:int = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
SCREAMING_SNAKE_CASE_:str = train_datagen.flow_from_directory(
"""dataset/training_set""", target_size=(64, 64), batch_size=32, class_mode="""binary"""
)
SCREAMING_SNAKE_CASE_:Any = test_datagen.flow_from_directory(
"""dataset/test_set""", target_size=(64, 64), batch_size=32, class_mode="""binary"""
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("""cnn.h5""")
# Part 3 - Making new predictions
SCREAMING_SNAKE_CASE_:int = tf.keras.preprocessing.image.load_img(
"""dataset/single_prediction/image.png""", target_size=(64, 64)
)
SCREAMING_SNAKE_CASE_:List[Any] = tf.keras.preprocessing.image.img_to_array(test_image)
SCREAMING_SNAKE_CASE_:List[str] = np.expand_dims(test_image, axis=0)
SCREAMING_SNAKE_CASE_:List[Any] = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
SCREAMING_SNAKE_CASE_:Optional[int] = """Normal"""
if result[0][0] == 1:
SCREAMING_SNAKE_CASE_:str = """Abnormality detected"""
| 662 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=13, lowerCamelCase__=7, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=99, lowerCamelCase__=32, lowerCamelCase__=5, lowerCamelCase__=4, lowerCamelCase__=37, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=50, lowerCamelCase__=0.02, lowerCamelCase__=True, lowerCamelCase__=None, ):
A : List[str] = parent
A : List[str] = batch_size
A : Optional[int] = seq_length
A : Optional[int] = is_training
A : Tuple = use_input_mask
A : Optional[Any] = vocab_size
A : str = hidden_size
A : Any = num_hidden_layers
A : List[Any] = num_attention_heads
A : Optional[int] = intermediate_size
A : int = hidden_act
A : Dict = hidden_dropout_prob
A : Optional[Any] = attention_probs_dropout_prob
A : List[Any] = max_position_embeddings
A : int = initializer_range
A : Tuple = use_labels
A : List[str] = scope
def _lowerCAmelCase ( self ):
A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : int = None
if self.use_input_mask:
A : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
A : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : List[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def _lowerCAmelCase ( self ):
return BertGenerationConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, is_decoder=lowerCamelCase__, initializer_range=self.initializer_range, )
def _lowerCAmelCase ( self ):
(
(
A
) , (
A
) , (
A
) , (
A
) ,
) : List[Any] = self.prepare_config_and_inputs()
A : Any = True
A : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : str = BertGenerationEncoder(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Optional[int] = model(lowerCamelCase__, attention_mask=lowerCamelCase__ )
A : List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : List[str] = True
A : Union[str, Any] = BertGenerationEncoder(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Any = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, )
A : Optional[Any] = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : Union[str, Any] = True
A : Optional[int] = True
A : Optional[int] = BertGenerationDecoder(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
# first forward pass
A : int = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, use_cache=lowerCamelCase__, )
A : List[str] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A : Optional[Any] = ids_tensor((self.batch_size, 3), config.vocab_size )
A : int = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
A : List[str] = torch.cat([input_ids, next_tokens], dim=-1 )
A : Union[str, Any] = torch.cat([input_mask, next_mask], dim=-1 )
A : List[str] = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0]
A : Any = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, past_key_values=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0]
# select random slice
A : Any = ids_tensor((1,), output_from_past.shape[-1] ).item()
A : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
A : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-3 ) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, *lowerCamelCase__, ):
A : Optional[int] = BertGenerationDecoder(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : List[str] = model(lowerCamelCase__, attention_mask=lowerCamelCase__, labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self ):
A , A , A , A : str = self.prepare_config_and_inputs()
A : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Any = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
__lowerCamelCase : int = (BertGenerationDecoder,) if is_torch_available() else ()
__lowerCamelCase : List[Any] = (
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self ):
A : Any = BertGenerationEncoderTester(self )
A : Optional[int] = ConfigTester(self, config_class=lowerCamelCase__, hidden_size=37 )
def _lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ):
A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A , A , A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
A : Any = """bert"""
self.model_tester.create_and_check_model(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
# This regression test was failing with PyTorch < 1.3
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
A : int = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, )
def _lowerCAmelCase ( self ):
A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ )
@slow
def _lowerCAmelCase ( self ):
A : Tuple = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(lowerCamelCase__ )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
A : Optional[int] = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A : Optional[int] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
A : Union[str, Any] = model(lowerCamelCase__ )[0]
A : List[Any] = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape, lowerCamelCase__ )
A : Tuple = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase__, atol=1e-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
A : Optional[Any] = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
A : Dict = model(lowerCamelCase__ )[0]
A : List[str] = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape, lowerCamelCase__ )
A : Optional[Any] = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase__, atol=1e-4 ) )
| 662 | 1 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : int = ["vqvae"]
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, ):
super().__init__()
self.register_modules(unet=lowerCamelCase__, scheduler=lowerCamelCase__, mel=lowerCamelCase__, vqvae=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
return 50 if isinstance(self.scheduler, lowerCamelCase__ ) else 1000
@torch.no_grad()
def __call__( self, lowerCamelCase__ = 1, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = 0, lowerCamelCase__ = 0, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = 0, lowerCamelCase__ = 0, lowerCamelCase__ = None, lowerCamelCase__ = 0, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__=True, ):
A : List[Any] = steps or self.get_default_steps()
self.scheduler.set_timesteps(lowerCamelCase__ )
A : str = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
A : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
A : List[str] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
), generator=lowerCamelCase__, device=self.device, )
A : Tuple = noise
A : Union[str, Any] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(lowerCamelCase__, lowerCamelCase__ )
A : List[Any] = self.mel.audio_slice_to_image(lowerCamelCase__ )
A : Optional[int] = np.frombuffer(input_image.tobytes(), dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
A : Any = (input_image / 255) * 2 - 1
A : List[str] = torch.tensor(input_image[np.newaxis, :, :], dtype=torch.float ).to(self.device )
if self.vqvae is not None:
A : Tuple = self.vqvae.encode(torch.unsqueeze(lowerCamelCase__, 0 ) ).latent_dist.sample(
generator=lowerCamelCase__ )[0]
A : Optional[int] = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
A : Optional[int] = self.scheduler.add_noise(lowerCamelCase__, lowerCamelCase__, self.scheduler.timesteps[start_step - 1] )
A : str = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
A : Optional[Any] = int(mask_start_secs * pixels_per_second )
A : List[str] = int(mask_end_secs * pixels_per_second )
A : Tuple = self.scheduler.add_noise(lowerCamelCase__, lowerCamelCase__, torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet, lowerCamelCase__ ):
A : int = self.unet(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )["""sample"""]
else:
A : List[Any] = self.unet(lowerCamelCase__, lowerCamelCase__ )["""sample"""]
if isinstance(self.scheduler, lowerCamelCase__ ):
A : Union[str, Any] = self.scheduler.step(
model_output=lowerCamelCase__, timestep=lowerCamelCase__, sample=lowerCamelCase__, eta=lowerCamelCase__, generator=lowerCamelCase__, )["""prev_sample"""]
else:
A : Any = self.scheduler.step(
model_output=lowerCamelCase__, timestep=lowerCamelCase__, sample=lowerCamelCase__, generator=lowerCamelCase__, )["""prev_sample"""]
if mask is not None:
if mask_start > 0:
A : List[str] = mask[:, step, :, :mask_start]
if mask_end > 0:
A : Tuple = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
A : Any = 1 / self.vqvae.config.scaling_factor * images
A : List[str] = self.vqvae.decode(lowerCamelCase__ )["""sample"""]
A : Dict = (images / 2 + 0.5).clamp(0, 1 )
A : int = images.cpu().permute(0, 2, 3, 1 ).numpy()
A : Union[str, Any] = (images * 255).round().astype("""uint8""" )
A : Tuple = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(lowerCamelCase__, mode="""RGB""" ).convert("""L""" ) for _ in images) )
A : Dict = [self.mel.image_to_audio(lowerCamelCase__ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(lowerCamelCase__ )[:, np.newaxis, :] ), **ImagePipelineOutput(lowerCamelCase__ ) )
@torch.no_grad()
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = 50 ):
assert isinstance(self.scheduler, lowerCamelCase__ )
self.scheduler.set_timesteps(lowerCamelCase__ )
A : Optional[int] = np.array(
[np.frombuffer(image.tobytes(), dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
A : Optional[Any] = (sample / 255) * 2 - 1
A : int = torch.Tensor(lowerCamelCase__ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps, (0,) ) ):
A : List[Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
A : List[Any] = self.scheduler.alphas_cumprod[t]
A : str = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
A : Union[str, Any] = 1 - alpha_prod_t
A : int = self.unet(lowerCamelCase__, lowerCamelCase__ )["""sample"""]
A : Union[str, Any] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
A : Dict = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
A : int = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _lowerCAmelCase ( lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Tuple = acos(torch.dot(torch.flatten(lowerCamelCase__ ), torch.flatten(lowerCamelCase__ ) ) / torch.norm(lowerCamelCase__ ) / torch.norm(lowerCamelCase__ ) )
return sin((1 - alpha) * theta ) * xa / sin(lowerCamelCase__ ) + sin(alpha * theta ) * xa / sin(lowerCamelCase__ )
| 662 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_:Union[str, Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : str = ["pixel_values"]
def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 384}
A : Optional[Any] = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Optional[Any] = do_resize
A : Dict = size
# Default value set here for backwards compatibility where the value in config is None
A : Dict = crop_pct if crop_pct is not None else 224 / 256
A : Optional[int] = resample
A : List[str] = do_rescale
A : Tuple = rescale_factor
A : Optional[int] = do_normalize
A : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Tuple = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
A : List[str] = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
A : int = int(shortest_edge / crop_pct )
A : List[Any] = get_resize_output_image_size(lowerCamelCase__, size=lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Any = resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCamelCase__, size=(shortest_edge, shortest_edge), data_format=lowerCamelCase__, **lowerCamelCase__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCamelCase__, size=(shortest_edge, shortest_edge), resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ):
A : Dict = do_resize if do_resize is not None else self.do_resize
A : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct
A : str = resample if resample is not None else self.resample
A : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
A : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
A : Dict = do_normalize if do_normalize is not None else self.do_normalize
A : List[str] = image_mean if image_mean is not None else self.image_mean
A : Optional[Any] = image_std if image_std is not None else self.image_std
A : Optional[Any] = size if size is not None else self.size
A : str = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Any = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A : List[Any] = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
A : Any = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, crop_pct=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images]
if do_rescale:
A : str = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images]
if do_normalize:
A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images]
A : Tuple = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images]
A : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
| 662 | 1 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
__lowerCamelCase : List[str] = PegasusConfig
__lowerCamelCase : Union[str, Any] = {}
__lowerCamelCase : Tuple = "gelu"
def __init__( self, lowerCamelCase__, lowerCamelCase__=13, lowerCamelCase__=7, lowerCamelCase__=True, lowerCamelCase__=False, lowerCamelCase__=99, lowerCamelCase__=32, lowerCamelCase__=2, lowerCamelCase__=4, lowerCamelCase__=37, lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=40, lowerCamelCase__=2, lowerCamelCase__=1, lowerCamelCase__=0, ):
A : Tuple = parent
A : Optional[Any] = batch_size
A : Any = seq_length
A : int = is_training
A : Union[str, Any] = use_labels
A : Any = vocab_size
A : Tuple = hidden_size
A : Optional[Any] = num_hidden_layers
A : Optional[int] = num_attention_heads
A : str = intermediate_size
A : Any = hidden_dropout_prob
A : List[str] = attention_probs_dropout_prob
A : str = max_position_embeddings
A : Dict = eos_token_id
A : Optional[Any] = pad_token_id
A : Optional[Any] = bos_token_id
def _lowerCAmelCase ( self ):
A : Tuple = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
A : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
A : Tuple = tf.concat([input_ids, eos_tensor], axis=1 )
A : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : List[str] = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
A : Optional[int] = prepare_pegasus_inputs_dict(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
return config, inputs_dict
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
A : int = TFPegasusModel(config=lowerCamelCase__ ).get_decoder()
A : Optional[Any] = inputs_dict["""input_ids"""]
A : Union[str, Any] = input_ids[:1, :]
A : List[Any] = inputs_dict["""attention_mask"""][:1, :]
A : Union[str, Any] = inputs_dict["""head_mask"""]
A : Optional[int] = 1
# first forward pass
A : str = model(lowerCamelCase__, attention_mask=lowerCamelCase__, head_mask=lowerCamelCase__, use_cache=lowerCamelCase__ )
A , A : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A : Optional[Any] = ids_tensor((self.batch_size, 3), config.vocab_size )
A : int = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
A : Optional[int] = tf.concat([input_ids, next_tokens], axis=-1 )
A : Optional[int] = tf.concat([attention_mask, next_attn_mask], axis=-1 )
A : List[str] = model(lowerCamelCase__, attention_mask=lowerCamelCase__ )[0]
A : Optional[Any] = model(lowerCamelCase__, attention_mask=lowerCamelCase__, past_key_values=lowerCamelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
A : Optional[Any] = int(ids_tensor((1,), output_from_past.shape[-1] ) )
A : Dict = output_from_no_past[:, -3:, random_slice_idx]
A : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCamelCase__, lowerCamelCase__, rtol=1e-3 )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , ) -> List[Any]:
"""simple docstring"""
if attention_mask is None:
A : Union[str, Any] = tf.cast(tf.math.not_equal(_lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A : Union[str, Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
__lowerCamelCase : Optional[int] = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
__lowerCamelCase : Optional[Any] = (
{
"conversational": TFPegasusForConditionalGeneration,
"feature-extraction": TFPegasusModel,
"summarization": TFPegasusForConditionalGeneration,
"text2text-generation": TFPegasusForConditionalGeneration,
"translation": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCamelCase : int = True
__lowerCamelCase : Dict = False
__lowerCamelCase : Optional[int] = False
def _lowerCAmelCase ( self ):
A : Union[str, Any] = TFPegasusModelTester(self )
A : Optional[int] = ConfigTester(self, config_class=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ):
A : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Dict = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
__lowerCamelCase : List[Any] = [
"California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"
" reduce the risk of wildfires.",
"N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
__lowerCamelCase : Optional[Any] = "google/pegasus-xsum"
@cached_property
def _lowerCAmelCase ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _lowerCAmelCase ( self ):
A : int = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
A : List[Any] = self.translate_src_text(**lowerCamelCase__ )
assert self.expected_text == generated_words
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
A : Any = self.tokenizer(self.src_text, **lowerCamelCase__, padding=lowerCamelCase__, return_tensors="""tf""" )
A : int = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=lowerCamelCase__, )
A : str = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=lowerCamelCase__ )
return generated_words
@slow
def _lowerCAmelCase ( self ):
self._assert_generated_batch_equal_expected()
| 662 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Tuple = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any:
"""simple docstring"""
A : Dict = """backbone.""" if is_semantic else """"""
A : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', """beit.embeddings.cls_token"""),
(f'''{prefix}patch_embed.proj.weight''', """beit.embeddings.patch_embeddings.projection.weight"""),
(f'''{prefix}patch_embed.proj.bias''', """beit.embeddings.patch_embeddings.projection.bias"""),
(f'''{prefix}pos_embed''', """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Any:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
A : Dict = """backbone.""" if is_semantic else """"""
# queries, keys and values
A : Union[str, Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
A : Tuple = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
A : Optional[int] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
A : int = in_proj_weight[
: config.hidden_size, :
]
A : Any = q_bias
A : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A : Tuple = in_proj_weight[
-config.hidden_size :, :
]
A : Union[str, Any] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
A : str = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
A : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
A : Dict = gamma_a
A : Dict = gamma_a
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
"""simple docstring"""
A : List[str] = dct.pop(_lowerCAmelCase )
A : Optional[Any] = val
def __UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
A : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A : Optional[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> str:
"""simple docstring"""
A : Dict = False if """rvlcdip""" in checkpoint_url else True
A : Union[str, Any] = BeitConfig(use_absolute_position_embeddings=_lowerCAmelCase , use_mask_token=_lowerCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
A : Dict = 1024
A : List[Any] = 4096
A : int = 24
A : int = 16
# labels
if "rvlcdip" in checkpoint_url:
A : List[Any] = 16
A : List[Any] = """huggingface/label-files"""
A : int = """rvlcdip-id2label.json"""
A : Dict = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
A : List[str] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
A : int = idalabel
A : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
A : List[str] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="""cpu""" )["""model"""]
A : str = create_rename_keys(_lowerCAmelCase , has_lm_head=_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , has_lm_head=_lowerCAmelCase )
# load HuggingFace model
A : Any = BeitForMaskedImageModeling(_lowerCAmelCase ) if has_lm_head else BeitForImageClassification(_lowerCAmelCase )
model.eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image
A : Any = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_lowerCAmelCase )
A : int = prepare_img()
A : Tuple = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" )
A : str = encoding["""pixel_values"""]
A : Tuple = model(_lowerCAmelCase )
A : Optional[int] = outputs.logits
# verify logits
A : Tuple = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(_lowerCAmelCase ), "Shape of logits not as expected"
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
if has_lm_head:
A : Any = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
A : List[Any] = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_lowerCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_lowerCAmelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 662 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_:Tuple = {
"""configuration_lxmert""": ["""LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LxmertConfig"""],
"""tokenization_lxmert""": ["""LxmertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:List[str] = ["""LxmertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Tuple = [
"""LxmertEncoder""",
"""LxmertForPreTraining""",
"""LxmertForQuestionAnswering""",
"""LxmertModel""",
"""LxmertPreTrainedModel""",
"""LxmertVisualFeatureEncoder""",
"""LxmertXLayer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Optional[Any] = [
"""TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLxmertForPreTraining""",
"""TFLxmertMainLayer""",
"""TFLxmertModel""",
"""TFLxmertPreTrainedModel""",
"""TFLxmertVisualFeatureEncoder""",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
SCREAMING_SNAKE_CASE_:str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 662 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE_:Optional[int] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""", lowerCamelCase__, )
super().__init__(*lowerCamelCase__, **lowerCamelCase__ )
| 662 | 1 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple[int, int]:
"""simple docstring"""
def constraint_to_multiple_of(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=0 , _lowerCAmelCase=None ):
A : Optional[int] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
A : Optional[Any] = math.floor(val / multiple ) * multiple
if x < min_val:
A : Any = math.ceil(val / multiple ) * multiple
return x
A : Optional[Any] = (output_size, output_size) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else output_size
A , A : List[Any] = get_image_size(_lowerCAmelCase )
A , A : List[Any] = output_size
# determine new height and width
A : Optional[int] = output_height / input_height
A : Optional[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
A : Any = scale_width
else:
# fit height
A : int = scale_height
A : Any = constraint_to_multiple_of(scale_height * input_height , multiple=_lowerCAmelCase )
A : int = constraint_to_multiple_of(scale_width * input_width , multiple=_lowerCAmelCase )
return (new_height, new_width)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ["pixel_values"]
def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : int = size if size is not None else {"""height""": 384, """width""": 384}
A : str = get_size_dict(lowerCamelCase__ )
A : Optional[Any] = do_resize
A : Optional[int] = size
A : Union[str, Any] = keep_aspect_ratio
A : int = ensure_multiple_of
A : Dict = resample
A : Optional[Any] = do_rescale
A : Any = rescale_factor
A : str = do_normalize
A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Dict = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
A : Optional[Any] = get_resize_output_image_size(
lowerCamelCase__, output_size=(size["""height"""], size["""width"""]), keep_aspect_ratio=lowerCamelCase__, multiple=lowerCamelCase__, )
return resize(lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ):
A : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
A : str = size if size is not None else self.size
A : str = get_size_dict(lowerCamelCase__ )
A : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
A : Optional[int] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
A : Tuple = resample if resample is not None else self.resample
A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A : int = rescale_factor if rescale_factor is not None else self.rescale_factor
A : int = do_normalize if do_normalize is not None else self.do_normalize
A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
A : Optional[int] = image_std if image_std is not None else self.image_std
A : Any = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A : str = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
A : Dict = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images]
if do_rescale:
A : Optional[Any] = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images]
if do_normalize:
A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images]
A : Dict = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images]
A : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : Any = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(lowerCamelCase__ ):
A : int = target_sizes.numpy()
A : Union[str, Any] = []
for idx in range(len(lowerCamelCase__ ) ):
A : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ), size=target_sizes[idx], mode="""bilinear""", align_corners=lowerCamelCase__ )
A : Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCamelCase__ )
else:
A : List[str] = logits.argmax(dim=1 )
A : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 662 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(
lowerCamelCase__, split=lowerCamelCase__, features=lowerCamelCase__, cache_dir=lowerCamelCase__, keep_in_memory=lowerCamelCase__, streaming=lowerCamelCase__, num_proc=lowerCamelCase__, **lowerCamelCase__, )
A : List[Any] = path_or_paths if isinstance(lowerCamelCase__, lowerCamelCase__ ) else {self.split: path_or_paths}
A : str = Text(
cache_dir=lowerCamelCase__, data_files=lowerCamelCase__, features=lowerCamelCase__, **lowerCamelCase__, )
def _lowerCAmelCase ( self ):
# Build iterable dataset
if self.streaming:
A : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A : List[str] = None
A : Dict = None
A : Tuple = None
A : Tuple = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__, download_mode=lowerCamelCase__, verification_mode=lowerCamelCase__, base_path=lowerCamelCase__, num_proc=self.num_proc, )
A : List[str] = self.builder.as_dataset(
split=self.split, verification_mode=lowerCamelCase__, in_memory=self.keep_in_memory )
return dataset
| 662 | 1 |
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 662 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
SCREAMING_SNAKE_CASE_:int = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
SCREAMING_SNAKE_CASE_:Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 662 | 1 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 662 |
def __UpperCamelCase ( _lowerCAmelCase = 1000 ) -> int:
"""simple docstring"""
A , A : str = 1, 1
A : List[Any] = []
for i in range(1 , n + 1 ):
A : Optional[int] = prev_numerator + 2 * prev_denominator
A : Any = prev_numerator + prev_denominator
if len(str(_lowerCAmelCase ) ) > len(str(_lowerCAmelCase ) ):
result.append(_lowerCAmelCase )
A : int = numerator
A : int = denominator
return len(_lowerCAmelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 662 | 1 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class SCREAMING_SNAKE_CASE__ ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
'''simple docstring'''
def __init__( self, lowerCamelCase__=None, **lowerCamelCase__ ):
super().__init__(features=lowerCamelCase__ )
A : Any = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _lowerCAmelCase ( self, lowerCamelCase__ ):
import torch
if isinstance(lowerCamelCase__, lowerCamelCase__ ) and column:
if all(
isinstance(lowerCamelCase__, torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(lowerCamelCase__ )
return column
def _lowerCAmelCase ( self, lowerCamelCase__ ):
import torch
if isinstance(lowerCamelCase__, (str, bytes, type(lowerCamelCase__ )) ):
return value
elif isinstance(lowerCamelCase__, (np.character, np.ndarray) ) and np.issubdtype(value.dtype, np.character ):
return value.tolist()
A : Optional[Any] = {}
if isinstance(lowerCamelCase__, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.integer ):
A : List[str] = {"""dtype""": torch.intaa}
elif isinstance(lowerCamelCase__, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.floating ):
A : str = {"""dtype""": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowerCamelCase__, PIL.Image.Image ):
A : Optional[Any] = np.asarray(lowerCamelCase__ )
return torch.tensor(lowerCamelCase__, **{**default_dtype, **self.torch_tensor_kwargs} )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
import torch
# support for torch, tf, jax etc.
if hasattr(lowerCamelCase__, """__array__""" ) and not isinstance(lowerCamelCase__, torch.Tensor ):
A : Any = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowerCamelCase__, np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowerCamelCase__ ) for substruct in data_struct] )
elif isinstance(lowerCamelCase__, (list, tuple) ):
return self._consolidate([self.recursive_tensorize(lowerCamelCase__ ) for substruct in data_struct] )
return self._tensorize(lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return map_nested(self._recursive_tensorize, lowerCamelCase__, map_list=lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Optional[int] = self.numpy_arrow_extractor().extract_row(lowerCamelCase__ )
A : Optional[int] = self.python_features_decoder.decode_row(lowerCamelCase__ )
return self.recursive_tensorize(lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : str = self.numpy_arrow_extractor().extract_column(lowerCamelCase__ )
A : List[Any] = self.python_features_decoder.decode_column(lowerCamelCase__, pa_table.column_names[0] )
A : Optional[Any] = self.recursive_tensorize(lowerCamelCase__ )
A : Any = self._consolidate(lowerCamelCase__ )
return column
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Tuple = self.numpy_arrow_extractor().extract_batch(lowerCamelCase__ )
A : Dict = self.python_features_decoder.decode_batch(lowerCamelCase__ )
A : Any = self.recursive_tensorize(lowerCamelCase__ )
for column_name in batch:
A : Tuple = self._consolidate(batch[column_name] )
return batch
| 662 |
import re
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
if len(re.findall("""[ATCG]""" , _lowerCAmelCase ) ) != len(_lowerCAmelCase ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 1 |
import re
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
if len(re.findall("""[ATCG]""" , _lowerCAmelCase ) ) != len(_lowerCAmelCase ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 |
from __future__ import annotations
SCREAMING_SNAKE_CASE_:Tuple = """#"""
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self ):
A : dict = {}
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : List[Any] = self._trie
for char in text:
if char not in trie:
A : str = {}
A : str = trie[char]
A : Optional[int] = True
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Dict = self._trie
for char in prefix:
if char in trie:
A : Optional[Any] = trie[char]
else:
return []
return self._elements(lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : int = []
for c, v in d.items():
A : List[Any] = [""" """] if c == END else [(c + s) for s in self._elements(lowerCamelCase__ )]
result.extend(lowerCamelCase__ )
return tuple(lowerCamelCase__ )
SCREAMING_SNAKE_CASE_:Any = Trie()
SCREAMING_SNAKE_CASE_:Tuple = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def __UpperCamelCase ( _lowerCAmelCase ) -> tuple:
"""simple docstring"""
A : List[str] = trie.find_word(_lowerCAmelCase )
return tuple(string + word for word in suffixes )
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
print(autocomplete_using_trie("""de""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 662 | 1 |
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : str = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
A : str = dict(zip(lowerCamelCase__, range(len(lowerCamelCase__ ) ) ) )
A : str = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
A : List[str] = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_6000,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
A : Optional[int] = tempfile.mkdtemp()
A : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
A : Optional[Any] = os.path.join(self.tmpdirname, lowerCamelCase__ )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + """\n""" )
with open(self.feature_extraction_file, """w""", encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + """\n""" )
# load decoder from hub
A : Union[str, Any] = """hf-internal-testing/ngram-beam-search-decoder"""
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
A : str = self.add_kwargs_tokens_map.copy()
kwargs.update(lowerCamelCase__ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase__ )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname, **lowerCamelCase__ )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name, **lowerCamelCase__ )
def _lowerCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.get_tokenizer()
A : Union[str, Any] = self.get_feature_extractor()
A : List[Any] = self.get_decoder()
A : Tuple = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__, feature_extractor=lowerCamelCase__, decoder=lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
A : Optional[int] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCamelCase__ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor, lowerCamelCase__ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels, decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set, decoder.model_container[decoder._model_key]._unigram_set, )
self.assertIsInstance(processor.decoder, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
A : Tuple = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname, alpha=5.0, beta=3.0, score_boundary=-7.0, unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha, 5.0 )
self.assertEqual(processor.language_model.beta, 3.0 )
self.assertEqual(processor.language_model.score_boundary, -7.0 )
self.assertEqual(processor.language_model.unk_score_offset, 3 )
def _lowerCAmelCase ( self ):
A : List[str] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(lowerCamelCase__, """include""" ):
WavaVecaProcessorWithLM(
tokenizer=lowerCamelCase__, feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.get_feature_extractor()
A : Dict = self.get_tokenizer()
A : Optional[int] = self.get_decoder()
A : str = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__, feature_extractor=lowerCamelCase__, decoder=lowerCamelCase__ )
A : List[str] = floats_list((3, 1000) )
A : Optional[Any] = feature_extractor(lowerCamelCase__, return_tensors="""np""" )
A : Dict = processor(lowerCamelCase__, return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 )
def _lowerCAmelCase ( self ):
A : Any = self.get_feature_extractor()
A : Optional[Any] = self.get_tokenizer()
A : List[str] = self.get_decoder()
A : int = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__, feature_extractor=lowerCamelCase__, decoder=lowerCamelCase__ )
A : Tuple = """This is a test string"""
A : List[str] = processor(text=lowerCamelCase__ )
A : Optional[int] = tokenizer(lowerCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def _lowerCAmelCase ( self, lowerCamelCase__=(2, 10, 16), lowerCamelCase__=77 ):
np.random.seed(lowerCamelCase__ )
return np.random.rand(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[str] = self.get_feature_extractor()
A : Tuple = self.get_tokenizer()
A : Any = self.get_decoder()
A : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__, feature_extractor=lowerCamelCase__, decoder=lowerCamelCase__ )
A : List[str] = self._get_dummy_logits(shape=(10, 16), seed=13 )
A : List[str] = processor.decode(lowerCamelCase__ )
A : Tuple = decoder.decode_beams(lowerCamelCase__ )[0]
self.assertEqual(decoded_decoder[0], decoded_processor.text )
self.assertEqual("""</s> <s> </s>""", decoded_processor.text )
self.assertEqual(decoded_decoder[-2], decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1], decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Union[str, Any] = self.get_feature_extractor()
A : Union[str, Any] = self.get_tokenizer()
A : List[Any] = self.get_decoder()
A : Tuple = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__, feature_extractor=lowerCamelCase__, decoder=lowerCamelCase__ )
A : int = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
A : Union[str, Any] = processor.batch_decode(lowerCamelCase__ )
else:
with get_context(lowerCamelCase__ ).Pool() as pool:
A : int = processor.batch_decode(lowerCamelCase__, lowerCamelCase__ )
A : List[str] = list(lowerCamelCase__ )
with get_context("""fork""" ).Pool() as p:
A : int = decoder.decode_beams_batch(lowerCamelCase__, lowerCamelCase__ )
A , A , A : Dict = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(lowerCamelCase__, decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""], decoded_processor.text )
self.assertListEqual(lowerCamelCase__, decoded_processor.logit_score )
self.assertListEqual(lowerCamelCase__, decoded_processor.lm_score )
def _lowerCAmelCase ( self ):
A : List[str] = self.get_feature_extractor()
A : Dict = self.get_tokenizer()
A : List[str] = self.get_decoder()
A : List[str] = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__, feature_extractor=lowerCamelCase__, decoder=lowerCamelCase__ )
A : Tuple = self._get_dummy_logits()
A : int = 15
A : List[Any] = -20.0
A : Optional[int] = -4.0
A : Dict = processor.batch_decode(
lowerCamelCase__, beam_width=lowerCamelCase__, beam_prune_logp=lowerCamelCase__, token_min_logp=lowerCamelCase__, )
A : List[str] = decoded_processor_out.text
A : Tuple = list(lowerCamelCase__ )
with get_context("""fork""" ).Pool() as pool:
A : str = decoder.decode_beams_batch(
lowerCamelCase__, lowerCamelCase__, beam_width=lowerCamelCase__, beam_prune_logp=lowerCamelCase__, token_min_logp=lowerCamelCase__, )
A : Union[str, Any] = [d[0][0] for d in decoded_decoder_out]
A : str = [d[0][2] for d in decoded_decoder_out]
A : Any = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""], lowerCamelCase__ )
self.assertTrue(np.array_equal(lowerCamelCase__, decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447], lowerCamelCase__, atol=1e-3 ) )
self.assertTrue(np.array_equal(lowerCamelCase__, decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474], lowerCamelCase__, atol=1e-3 ) )
def _lowerCAmelCase ( self ):
A : str = self.get_feature_extractor()
A : Union[str, Any] = self.get_tokenizer()
A : int = self.get_decoder()
A : Dict = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__, feature_extractor=lowerCamelCase__, decoder=lowerCamelCase__ )
A : int = self._get_dummy_logits()
A : Dict = 2.0
A : Optional[Any] = 5.0
A : Optional[Any] = -20.0
A : Tuple = True
A : List[Any] = processor.batch_decode(
lowerCamelCase__, alpha=lowerCamelCase__, beta=lowerCamelCase__, unk_score_offset=lowerCamelCase__, lm_score_boundary=lowerCamelCase__, )
A : Optional[int] = decoded_processor_out.text
A : Tuple = list(lowerCamelCase__ )
decoder.reset_params(
alpha=lowerCamelCase__, beta=lowerCamelCase__, unk_score_offset=lowerCamelCase__, lm_score_boundary=lowerCamelCase__, )
with get_context("""fork""" ).Pool() as pool:
A : List[Any] = decoder.decode_beams_batch(
lowerCamelCase__, lowerCamelCase__, )
A : List[Any] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""], lowerCamelCase__ )
A : str = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha, 2.0 )
self.assertEqual(lm_model.beta, 5.0 )
self.assertEqual(lm_model.unk_score_offset, -20.0 )
self.assertEqual(lm_model.score_boundary, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Dict = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
A : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
A : Tuple = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
A : Dict = os.listdir(lowerCamelCase__ )
A : Optional[Any] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[Any] = snapshot_download("""hf-internal-testing/processor_with_lm""" )
A : Optional[int] = WavaVecaProcessorWithLM.from_pretrained(lowerCamelCase__ )
A : str = processor.decoder.model_container[processor.decoder._model_key]
A : Any = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
A : str = os.listdir(lowerCamelCase__ )
A : List[str] = os.listdir(lowerCamelCase__ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : int = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
A : Dict = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
A : List[Any] = floats_list((3, 1000) )
A : List[Any] = processor_wavaveca(lowerCamelCase__, return_tensors="""np""" )
A : Union[str, Any] = processor_auto(lowerCamelCase__, return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum(), input_auto[key].sum(), delta=1e-2 )
A : Dict = self._get_dummy_logits()
A : Optional[Any] = processor_wavaveca.batch_decode(lowerCamelCase__ )
A : str = processor_auto.batch_decode(lowerCamelCase__ )
self.assertListEqual(decoded_wavaveca.text, decoded_auto.text )
def _lowerCAmelCase ( self ):
A : str = self.get_feature_extractor()
A : Any = self.get_tokenizer()
A : int = self.get_decoder()
A : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__, feature_extractor=lowerCamelCase__, decoder=lowerCamelCase__ )
self.assertListEqual(
processor.model_input_names, feature_extractor.model_input_names, msg="""`processor` and `feature_extractor` model input names do not match""", )
@staticmethod
def _lowerCAmelCase ( lowerCamelCase__, lowerCamelCase__ ):
A : str = [d[key] for d in offsets]
return retrieved_list
def _lowerCAmelCase ( self ):
A : List[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
A : Optional[Any] = self._get_dummy_logits()[0]
A : Dict = processor.decode(lowerCamelCase__, output_word_offsets=lowerCamelCase__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ), 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(lowerCamelCase__, lowerCamelCase__ ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""], """word""" ) ), outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""], """word""" ), ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""], """start_offset""" ), [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""], """end_offset""" ), [1, 3, 5] )
def _lowerCAmelCase ( self ):
A : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
A : Optional[Any] = self._get_dummy_logits()
A : int = processor.batch_decode(lowerCamelCase__, output_word_offsets=lowerCamelCase__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ), 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(lowerCamelCase__, lowerCamelCase__ ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(lowerCamelCase__, """word""" ) ) for o in outputs["""word_offsets"""]], outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0], """word""" ), ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0], """start_offset""" ), [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0], """end_offset""" ), [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _lowerCAmelCase ( self ):
import torch
A : Dict = load_dataset("""common_voice""", """en""", split="""train""", streaming=lowerCamelCase__ )
A : List[str] = ds.cast_column("""audio""", datasets.Audio(sampling_rate=1_6000 ) )
A : List[str] = iter(lowerCamelCase__ )
A : int = next(lowerCamelCase__ )
A : List[Any] = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
A : Optional[Any] = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
A : Tuple = processor(sample["""audio"""]["""array"""], return_tensors="""pt""" ).input_values
with torch.no_grad():
A : Optional[Any] = model(lowerCamelCase__ ).logits.cpu().numpy()
A : Any = processor.decode(logits[0], output_word_offsets=lowerCamelCase__ )
A : Any = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
A : List[Any] = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
A : str = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(lowerCamelCase__, """word""" ) ), lowerCamelCase__ )
self.assertEqual(""" """.join(self.get_from_offsets(lowerCamelCase__, """word""" ) ), output.text )
# output times
A : Dict = torch.tensor(self.get_from_offsets(lowerCamelCase__, """start_time""" ) )
A : List[str] = torch.tensor(self.get_from_offsets(lowerCamelCase__, """end_time""" ) )
# fmt: off
A : Tuple = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
A : List[Any] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=0.01 ) )
self.assertTrue(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=0.01 ) )
| 662 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
SCREAMING_SNAKE_CASE_:Optional[int] = logging.getLogger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[int] = bnb_quantization_config.load_in_abit
A : int = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
A : Any = []
# custom device map
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(device_map.keys() ) > 1:
A : Optional[int] = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
A : int = get_keys_to_not_convert(_lowerCAmelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_lowerCAmelCase )
A : Optional[Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
A : Dict = []
A : Tuple = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_lowerCAmelCase )
# compatibility with peft
A : Union[str, Any] = load_in_abit
A : Tuple = load_in_abit
A : List[str] = get_parameter_device(_lowerCAmelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
A : Optional[int] = replace_with_bnb_layers(_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase )
# convert param to the right dtype
A : Tuple = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
A : Optional[Any] = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
A : int = getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_lowerCAmelCase ):
param.to(_lowerCAmelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
A : str = replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase )
A : Optional[Any] = get_quantized_model_device_map(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , max_memory=_lowerCAmelCase , no_split_module_classes=_lowerCAmelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
A : Tuple = True
A : int = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=_lowerCAmelCase , offload_state_dict=_lowerCAmelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_lowerCAmelCase , device_map=_lowerCAmelCase , offload_dir=_lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[int]:
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
A : Optional[int] = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
A : Tuple = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
A : Any = {}
A : List[str] = special_dtypes
A : Any = no_split_module_classes
A : Union[str, Any] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
A : Tuple = get_balanced_memory(
_lowerCAmelCase , low_zero=(device_map == """balanced_low_0""") , max_memory=_lowerCAmelCase , **_lowerCAmelCase , )
A : int = max_memory
A : Any = infer_auto_device_map(_lowerCAmelCase , **_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# check if don't have any quantized module on the cpu
A : Optional[Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
A : Optional[int] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]:
"""simple docstring"""
if modules_to_not_convert is None:
A : Optional[Any] = []
A , A : Dict = _replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , ) -> int:
"""simple docstring"""
A : Optional[int] = False
for name, module in model.named_children():
if current_key_name is None:
A : int = []
current_key_name.append(_lowerCAmelCase )
if isinstance(_lowerCAmelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
A : Dict = """.""".join(_lowerCAmelCase )
A : Optional[Any] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
A : Dict = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
A : Optional[Any] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_lowerCAmelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
A : Dict = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
A : Any = module.weight.data
if module.bias is not None:
A : Any = module.bias.data
bnb_module.requires_grad_(_lowerCAmelCase )
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : Dict = True
if len(list(module.children() ) ) > 0:
A , A : Dict = _replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : Union[str, Any] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __UpperCamelCase ( _lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
with init_empty_weights():
A : Tuple = deepcopy(_lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
A : Optional[int] = find_tied_parameters(_lowerCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A : int = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
A : Optional[int] = sum(_lowerCAmelCase , [] )
A : Tuple = len(_lowerCAmelCase ) > 0
# Check if it is a base model
A : List[str] = False
if hasattr(_lowerCAmelCase , """base_model_prefix""" ):
A : Optional[Any] = not hasattr(_lowerCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
A : str = list(model.named_children() )
A : Tuple = [list_modules[-1][0]]
# add last module together with tied weights
A : int = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
A : Optional[Any] = list(set(_lowerCAmelCase ) ) + list(_lowerCAmelCase )
# remove ".weight" from the keys
A : Union[str, Any] = [""".weight""", """.bias"""]
A : Optional[int] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
A : List[str] = name.replace(_lowerCAmelCase , """""" )
filtered_module_names.append(_lowerCAmelCase )
return filtered_module_names
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
for m in model.modules():
if isinstance(_lowerCAmelCase , bnb.nn.Linearabit ):
return True
return False
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
return next(parameter.parameters() ).device
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , 0 , dtype=_lowerCAmelCase , value=_lowerCAmelCase )
A : Tuple = param_name
A : Union[str, Any] = model
if "." in tensor_name:
A : int = tensor_name.split(""".""" )
for split in splits[:-1]:
A : Union[str, Any] = getattr(_lowerCAmelCase , _lowerCAmelCase )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
A : Optional[Any] = new_module
A : List[str] = splits[-1]
# offload weights
A : Optional[int] = False
offload_weight(module._parameters[tensor_name] , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _lowerCAmelCase , index=_lowerCAmelCase , )
else:
offload_weight(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase )
offload_weight(_lowerCAmelCase , param_name.replace("""weight""" , """SCB""" ) , _lowerCAmelCase , index=_lowerCAmelCase )
set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , """meta""" , dtype=_lowerCAmelCase , value=torch.empty(*param.size() ) )
| 662 | 1 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
SCREAMING_SNAKE_CASE_:Optional[Any] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"""text-classification""",
"""language-modeling""",
"""summarization""",
"""token-classification""",
"""question-answering""",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE_:str = logging.getLogger()
def __UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
A : Any = argparse.ArgumentParser()
parser.add_argument("""-f""" )
A : int = parser.parse_args()
return args.f
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase="eval" ) -> Optional[Any]:
"""simple docstring"""
A : List[Any] = os.path.join(_lowerCAmelCase , f'''{split}_results.json''' )
if os.path.exists(_lowerCAmelCase ):
with open(_lowerCAmelCase , """r""" ) as f:
return json.load(_lowerCAmelCase )
raise ValueError(f'''can\'t find {path}''' )
SCREAMING_SNAKE_CASE_:Union[str, Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : Any = self.get_auto_remove_tmp_dir()
A : Tuple = f'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(lowerCamelCase__, """argv""", lowerCamelCase__ ):
run_flax_glue.main()
A : str = get_results(lowerCamelCase__ )
self.assertGreaterEqual(result["""eval_accuracy"""], 0.75 )
@slow
def _lowerCAmelCase ( self ):
A : Union[str, Any] = self.get_auto_remove_tmp_dir()
A : Any = f'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(lowerCamelCase__, """argv""", lowerCamelCase__ ):
run_clm_flax.main()
A : Optional[Any] = get_results(lowerCamelCase__ )
self.assertLess(result["""eval_perplexity"""], 100 )
@slow
def _lowerCAmelCase ( self ):
A : Dict = self.get_auto_remove_tmp_dir()
A : List[Any] = f'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(lowerCamelCase__, """argv""", lowerCamelCase__ ):
run_summarization_flax.main()
A : List[Any] = get_results(lowerCamelCase__, split="""test""" )
self.assertGreaterEqual(result["""test_rouge1"""], 10 )
self.assertGreaterEqual(result["""test_rouge2"""], 2 )
self.assertGreaterEqual(result["""test_rougeL"""], 7 )
self.assertGreaterEqual(result["""test_rougeLsum"""], 7 )
@slow
def _lowerCAmelCase ( self ):
A : int = self.get_auto_remove_tmp_dir()
A : List[str] = f'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(lowerCamelCase__, """argv""", lowerCamelCase__ ):
run_mlm_flax.main()
A : Optional[Any] = get_results(lowerCamelCase__ )
self.assertLess(result["""eval_perplexity"""], 42 )
@slow
def _lowerCAmelCase ( self ):
A : str = self.get_auto_remove_tmp_dir()
A : str = f'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(lowerCamelCase__, """argv""", lowerCamelCase__ ):
run_ta_mlm_flax.main()
A : Union[str, Any] = get_results(lowerCamelCase__ )
self.assertGreaterEqual(result["""eval_accuracy"""], 0.42 )
@slow
def _lowerCAmelCase ( self ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
A : str = 7 if get_gpu_count() > 1 else 2
A : Optional[int] = self.get_auto_remove_tmp_dir()
A : Tuple = f'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(lowerCamelCase__, """argv""", lowerCamelCase__ ):
run_flax_ner.main()
A : Optional[int] = get_results(lowerCamelCase__ )
self.assertGreaterEqual(result["""eval_accuracy"""], 0.75 )
self.assertGreaterEqual(result["""eval_f1"""], 0.3 )
@slow
def _lowerCAmelCase ( self ):
A : str = self.get_auto_remove_tmp_dir()
A : Dict = f'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(lowerCamelCase__, """argv""", lowerCamelCase__ ):
run_qa.main()
A : List[str] = get_results(lowerCamelCase__ )
self.assertGreaterEqual(result["""eval_f1"""], 30 )
self.assertGreaterEqual(result["""eval_exact"""], 30 )
| 662 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __UpperCamelCase ( ) -> Dict:
"""simple docstring"""
A : Tuple = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
A : Dict = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(_lowerCAmelCase )
DownloadCommand.register_subcommand(_lowerCAmelCase )
EnvironmentCommand.register_subcommand(_lowerCAmelCase )
RunCommand.register_subcommand(_lowerCAmelCase )
ServeCommand.register_subcommand(_lowerCAmelCase )
UserCommands.register_subcommand(_lowerCAmelCase )
AddNewModelCommand.register_subcommand(_lowerCAmelCase )
AddNewModelLikeCommand.register_subcommand(_lowerCAmelCase )
LfsCommands.register_subcommand(_lowerCAmelCase )
PTtoTFCommand.register_subcommand(_lowerCAmelCase )
# Let's go
A : Tuple = parser.parse_args()
if not hasattr(_lowerCAmelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
A : Any = args.func(_lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 662 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ ):
A : List[Any] = feature_size
A : Optional[Any] = sampling_rate
A : int = padding_value
A : Union[str, Any] = kwargs.pop("""padding_side""", """right""" )
A : Union[str, Any] = kwargs.pop("""return_attention_mask""", lowerCamelCase__ )
super().__init__(**lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = False, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, ):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(lowerCamelCase__, (list, tuple) ) and isinstance(processed_features[0], (dict, BatchFeature) ):
A : int = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
f''' to this method that includes {self.model_input_names[0]}, but you provided'''
f''' {list(processed_features.keys() )}''' )
A : Optional[int] = processed_features[self.model_input_names[0]]
A : Tuple = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowerCamelCase__ ) == 0:
if return_attention_mask:
A : int = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
A : Optional[Any] = required_input[0]
if isinstance(lowerCamelCase__, (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
A : List[str] = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowerCamelCase__ ):
A : List[Any] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowerCamelCase__ ):
A : Tuple = """tf"""
elif is_torch_tensor(lowerCamelCase__ ):
A : Dict = """pt"""
elif isinstance(lowerCamelCase__, (int, float, list, tuple, np.ndarray) ):
A : str = """np"""
else:
raise ValueError(
f'''type of {first_element} unknown: {type(lowerCamelCase__ )}. '''
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0], (int, float) ):
A : Optional[Any] = to_numpy(lowerCamelCase__ )
else:
A : Optional[int] = [to_numpy(lowerCamelCase__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
A : Optional[int] = self._get_padding_strategies(padding=lowerCamelCase__, max_length=lowerCamelCase__ )
A : List[str] = processed_features[self.model_input_names[0]]
A : str = len(lowerCamelCase__ )
if not all(len(lowerCamelCase__ ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
A : int = []
for i in range(lowerCamelCase__ ):
A : Optional[Any] = {k: v[i] for k, v in processed_features.items()}
# truncation
A : Optional[Any] = self._truncate(
lowerCamelCase__, max_length=lowerCamelCase__, pad_to_multiple_of=lowerCamelCase__, truncation=lowerCamelCase__, )
truncated_inputs.append(lowerCamelCase__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
A : int = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
A : List[str] = PaddingStrategy.MAX_LENGTH
A : Optional[int] = {}
for i in range(lowerCamelCase__ ):
# padding
A : Optional[int] = self._pad(
truncated_inputs[i], max_length=lowerCamelCase__, padding_strategy=lowerCamelCase__, pad_to_multiple_of=lowerCamelCase__, return_attention_mask=lowerCamelCase__, )
for key, value in outputs.items():
if key not in batch_outputs:
A : List[Any] = []
if value.dtype is np.dtype(np.floataa ):
A : List[Any] = value.astype(np.floataa )
batch_outputs[key].append(lowerCamelCase__ )
return BatchFeature(lowerCamelCase__, tensor_type=lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = PaddingStrategy.DO_NOT_PAD, lowerCamelCase__ = None, lowerCamelCase__ = None, ):
A : str = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
A : Any = len(lowerCamelCase__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
A : Optional[int] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
A : str = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCamelCase__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
A : Dict = np.ones(len(lowerCamelCase__ ), dtype=np.intaa )
if needs_to_be_padded:
A : List[Any] = max_length - len(lowerCamelCase__ )
if self.padding_side == "right":
if return_attention_mask:
A : List[str] = np.pad(
processed_features["""attention_mask"""], (0, difference) )
A : Any = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
A : Optional[int] = np.pad(
lowerCamelCase__, lowerCamelCase__, """constant""", constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
A : Union[str, Any] = np.pad(
processed_features["""attention_mask"""], (difference, 0) )
A : List[str] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
A : Dict = np.pad(
lowerCamelCase__, lowerCamelCase__, """constant""", constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
A : Union[str, Any] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
A : Any = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
A : Optional[Any] = len(lowerCamelCase__ ) > max_length
if needs_to_be_truncated:
A : Dict = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
A : Any = processed_features["""attention_mask"""][:max_length]
return processed_features
def _lowerCAmelCase ( self, lowerCamelCase__=False, lowerCamelCase__=None ):
# Get padding strategy
if padding is not False:
if padding is True:
A : Optional[Any] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowerCamelCase__, lowerCamelCase__ ):
A : Optional[Any] = PaddingStrategy(lowerCamelCase__ )
elif isinstance(lowerCamelCase__, lowerCamelCase__ ):
A : List[Any] = padding
else:
A : Any = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 662 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_:int = {
"""configuration_blenderbot""": [
"""BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotConfig""",
"""BlenderbotOnnxConfig""",
],
"""tokenization_blenderbot""": ["""BlenderbotTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Union[str, Any] = ["""BlenderbotTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Optional[int] = [
"""BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotForCausalLM""",
"""BlenderbotForConditionalGeneration""",
"""BlenderbotModel""",
"""BlenderbotPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Union[str, Any] = [
"""TFBlenderbotForConditionalGeneration""",
"""TFBlenderbotModel""",
"""TFBlenderbotPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Any = [
"""FlaxBlenderbotForConditionalGeneration""",
"""FlaxBlenderbotModel""",
"""FlaxBlenderbotPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 662 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=7, lowerCamelCase__=3, lowerCamelCase__=30, lowerCamelCase__=400, lowerCamelCase__=True, lowerCamelCase__=None, lowerCamelCase__=True, lowerCamelCase__=[0.5, 0.5, 0.5], lowerCamelCase__=[0.5, 0.5, 0.5], lowerCamelCase__=True, lowerCamelCase__=1 / 255, lowerCamelCase__=True, ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
A : Tuple = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
A : Union[str, Any] = parent
A : Optional[int] = batch_size
A : Union[str, Any] = num_channels
A : Tuple = min_resolution
A : str = max_resolution
A : Dict = do_resize
A : Optional[Any] = size
A : Optional[int] = do_normalize
A : Any = image_mean
A : List[str] = image_std
A : Any = do_rescale
A : List[Any] = rescale_factor
A : Tuple = do_pad
def _lowerCAmelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=False ):
if not batched:
A : str = image_inputs[0]
if isinstance(lowerCamelCase__, Image.Image ):
A , A : Optional[Any] = image.size
else:
A , A : int = image.shape[1], image.shape[2]
if w < h:
A : str = int(self.size["""shortest_edge"""] * h / w )
A : List[str] = self.size["""shortest_edge"""]
elif w > h:
A : Any = self.size["""shortest_edge"""]
A : List[str] = int(self.size["""shortest_edge"""] * w / h )
else:
A : Any = self.size["""shortest_edge"""]
A : Optional[Any] = self.size["""shortest_edge"""]
else:
A : Any = []
for image in image_inputs:
A , A : Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A : List[Any] = max(lowerCamelCase__, key=lambda lowerCamelCase__ : item[0] )[0]
A : Any = max(lowerCamelCase__, key=lambda lowerCamelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[str] = ConditionalDetrImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self ):
A : Optional[Any] = ConditionalDetrImageProcessingTester(self )
@property
def _lowerCAmelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self ):
A : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__, """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """size""" ) )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad, lowerCamelCase__ )
A : Union[str, Any] = self.image_processing_class.from_dict(
self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=lowerCamelCase__ )
self.assertEqual(image_processor.size, {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
pass
def _lowerCAmelCase ( self ):
# Initialize image_processing
A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A : Tuple = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__, Image.Image )
# Test not batched input
A : List[str] = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values
A , A : Tuple = self.image_processor_tester.get_expected_values(lowerCamelCase__ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
A , A : Any = self.image_processor_tester.get_expected_values(lowerCamelCase__, batched=lowerCamelCase__ )
A : int = image_processing(lowerCamelCase__, return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def _lowerCAmelCase ( self ):
# Initialize image_processing
A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A : Optional[int] = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase__, numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__, np.ndarray )
# Test not batched input
A : Optional[int] = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values
A , A : Any = self.image_processor_tester.get_expected_values(lowerCamelCase__ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
A : int = image_processing(lowerCamelCase__, return_tensors="""pt""" ).pixel_values
A , A : Any = self.image_processor_tester.get_expected_values(lowerCamelCase__, batched=lowerCamelCase__ )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def _lowerCAmelCase ( self ):
# Initialize image_processing
A : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A : Dict = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase__, torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__, torch.Tensor )
# Test not batched input
A : Optional[int] = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values
A , A : Any = self.image_processor_tester.get_expected_values(lowerCamelCase__ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
A : int = image_processing(lowerCamelCase__, return_tensors="""pt""" ).pixel_values
A , A : List[str] = self.image_processor_tester.get_expected_values(lowerCamelCase__, batched=lowerCamelCase__ )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
@slow
def _lowerCAmelCase ( self ):
# prepare image and target
A : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""", """r""" ) as f:
A : Optional[int] = json.loads(f.read() )
A : int = {"""image_id""": 3_9769, """annotations""": target}
# encode them
A : int = ConditionalDetrImageProcessor.from_pretrained("""microsoft/conditional-detr-resnet-50""" )
A : int = image_processing(images=lowerCamelCase__, annotations=lowerCamelCase__, return_tensors="""pt""" )
# verify pixel values
A : Optional[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape, lowerCamelCase__ )
A : Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3], lowerCamelCase__, atol=1e-4 ) )
# verify area
A : Optional[int] = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""], lowerCamelCase__ ) )
# verify boxes
A : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape, lowerCamelCase__ )
A : List[Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0], lowerCamelCase__, atol=1e-3 ) )
# verify image_id
A : Union[str, Any] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""], lowerCamelCase__ ) )
# verify is_crowd
A : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""], lowerCamelCase__ ) )
# verify class_labels
A : Tuple = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""], lowerCamelCase__ ) )
# verify orig_size
A : Dict = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""], lowerCamelCase__ ) )
# verify size
A : Optional[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""], lowerCamelCase__ ) )
@slow
def _lowerCAmelCase ( self ):
# prepare image, target and masks_path
A : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""", """r""" ) as f:
A : List[str] = json.loads(f.read() )
A : Any = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
A : Optional[int] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
A : List[Any] = ConditionalDetrImageProcessor(format="""coco_panoptic""" )
A : int = image_processing(images=lowerCamelCase__, annotations=lowerCamelCase__, masks_path=lowerCamelCase__, return_tensors="""pt""" )
# verify pixel values
A : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape, lowerCamelCase__ )
A : List[str] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3], lowerCamelCase__, atol=1e-4 ) )
# verify area
A : Tuple = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""], lowerCamelCase__ ) )
# verify boxes
A : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape, lowerCamelCase__ )
A : List[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0], lowerCamelCase__, atol=1e-3 ) )
# verify image_id
A : Optional[int] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""], lowerCamelCase__ ) )
# verify is_crowd
A : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""], lowerCamelCase__ ) )
# verify class_labels
A : str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""], lowerCamelCase__ ) )
# verify masks
A : Optional[Any] = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item(), lowerCamelCase__ )
# verify orig_size
A : Optional[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""], lowerCamelCase__ ) )
# verify size
A : Optional[int] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""], lowerCamelCase__ ) )
| 662 |
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> list[int]:
"""simple docstring"""
A : Optional[int] = int(_lowerCAmelCase )
# Initialize Result
A : int = []
# Traverse through all denomination
for denomination in reversed(_lowerCAmelCase ):
# Find denominations
while int(_lowerCAmelCase ) >= int(_lowerCAmelCase ):
total_value -= int(_lowerCAmelCase )
answer.append(_lowerCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:List[Any] = []
SCREAMING_SNAKE_CASE_:Dict = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
SCREAMING_SNAKE_CASE_:Optional[int] = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
SCREAMING_SNAKE_CASE_:Tuple = [1, 2, 5, 10, 20, 50, 100, 500, 2_000]
SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F"""Following is minimal change for {value}: """)
SCREAMING_SNAKE_CASE_:str = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 662 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE_:Optional[Any] = {
"""configuration_roc_bert""": ["""ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoCBertConfig"""],
"""tokenization_roc_bert""": ["""RoCBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Optional[int] = [
"""ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoCBertForCausalLM""",
"""RoCBertForMaskedLM""",
"""RoCBertForMultipleChoice""",
"""RoCBertForPreTraining""",
"""RoCBertForQuestionAnswering""",
"""RoCBertForSequenceClassification""",
"""RoCBertForTokenClassification""",
"""RoCBertLayer""",
"""RoCBertModel""",
"""RoCBertPreTrainedModel""",
"""load_tf_weights_in_roc_bert""",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
SCREAMING_SNAKE_CASE_:str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 662 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_:Union[str, Any] = """tiny-wmt19-en-ru"""
# Build
# borrowed from a test
SCREAMING_SNAKE_CASE_:Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
SCREAMING_SNAKE_CASE_:Any = dict(zip(vocab, range(len(vocab))))
SCREAMING_SNAKE_CASE_:Dict = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_:List[Any] = Path(tmpdirname)
SCREAMING_SNAKE_CASE_:str = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""]
SCREAMING_SNAKE_CASE_:Union[str, Any] = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""]
SCREAMING_SNAKE_CASE_:Any = build_dir / VOCAB_FILES_NAMES["""merges_file"""]
with open(src_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, """w""") as fp:
fp.write("""\n""".join(merges))
SCREAMING_SNAKE_CASE_:Optional[int] = FSMTTokenizer(
langs=["""en""", """ru"""],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
SCREAMING_SNAKE_CASE_:Optional[int] = FSMTConfig(
langs=["""ru""", """en"""],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
SCREAMING_SNAKE_CASE_:Optional[Any] = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
SCREAMING_SNAKE_CASE_:Tuple = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
SCREAMING_SNAKE_CASE_:str = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 662 | 1 |
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
"""simple docstring"""
def count_of_possible_combinations(_lowerCAmelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
_lowerCAmelCase , _lowerCAmelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
A : int = sum(
count_of_possible_combinations_with_dp_array(target - item , _lowerCAmelCase )
for item in array )
A : List[str] = answer
return answer
A : List[str] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_lowerCAmelCase , _lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
"""simple docstring"""
A : Union[str, Any] = [0] * (target + 1)
A : Union[str, Any] = 1
for i in range(1 , target + 1 ):
for j in range(_lowerCAmelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_:Optional[int] = 3
SCREAMING_SNAKE_CASE_:Union[str, Any] = 5
SCREAMING_SNAKE_CASE_:Tuple = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 662 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:int = """Hello, World!"""
SCREAMING_SNAKE_CASE_:List[Any] = """en_XX"""
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Optional[int] = Path("""data_bin""" )
A : Optional[Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowerCAmelCase ).parent ) , checkpoint_file=Path(_lowerCAmelCase ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(_lowerCAmelCase ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(_lowerCAmelCase ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(_lowerCAmelCase )
A : Any = xmod.model.encoder.sentence_encoder
A : Optional[int] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , _lowerCAmelCase )
A : int = XmodForSequenceClassification(_lowerCAmelCase ) if classification_head else XmodForMaskedLM(_lowerCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
A : Any = xmod_sent_encoder.embed_tokens.weight
A : int = xmod_sent_encoder.embed_positions.weight
A : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
A : Dict = xmod_sent_encoder.layernorm_embedding.weight
A : int = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
A : str = model.roberta.encoder.layer[i]
A : Tuple = xmod_sent_encoder.layers[i]
# self attention
A : Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
A : List[str] = xmod_layer.self_attn.q_proj.weight
A : Optional[int] = xmod_layer.self_attn.q_proj.bias
A : List[Any] = xmod_layer.self_attn.k_proj.weight
A : Union[str, Any] = xmod_layer.self_attn.k_proj.bias
A : Optional[int] = xmod_layer.self_attn.v_proj.weight
A : Dict = xmod_layer.self_attn.v_proj.bias
# self-attention output
A : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
A : Optional[Any] = xmod_layer.self_attn.out_proj.weight
A : Dict = xmod_layer.self_attn.out_proj.bias
A : Union[str, Any] = xmod_layer.self_attn_layer_norm.weight
A : str = xmod_layer.self_attn_layer_norm.bias
# intermediate
A : str = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
A : Optional[int] = xmod_layer.fca.weight
A : Optional[int] = xmod_layer.fca.bias
# output
A : Dict = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
A : Union[str, Any] = xmod_layer.fca.weight
A : int = xmod_layer.fca.bias
A : List[str] = xmod_layer.final_layer_norm.weight
A : Optional[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
A : str = xmod_layer.adapter_layer_norm.weight
A : str = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
A : Optional[int] = bert_output.adapter_modules[lang_code]
A : int = xmod_layer.adapter_modules[lang_code]
A : Optional[Any] = from_adapter.fca.weight
A : Optional[Any] = from_adapter.fca.bias
A : List[str] = from_adapter.fca.weight
A : Any = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
A : Dict = xmod_sent_encoder.layer_norm.weight
A : int = xmod_sent_encoder.layer_norm.bias
if classification_head:
A : int = xmod.model.classification_heads["""mnli"""].dense.weight
A : Optional[Any] = xmod.model.classification_heads["""mnli"""].dense.bias
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
A : Any = xmod.model.encoder.lm_head.dense.weight
A : Tuple = xmod.model.encoder.lm_head.dense.bias
A : Any = xmod.model.encoder.lm_head.layer_norm.weight
A : List[str] = xmod.model.encoder.lm_head.layer_norm.bias
A : Union[str, Any] = xmod.model.encoder.lm_head.weight
A : Tuple = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
A : Optional[int] = xmod.encode(_lowerCAmelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowerCAmelCase )
A : List[str] = model(_lowerCAmelCase )[0]
if classification_head:
A : Dict = xmod.model.classification_heads["""mnli"""](xmod.extract_features(_lowerCAmelCase ) )
else:
A : Optional[Any] = xmod.model(_lowerCAmelCase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
A : str = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
A : Optional[Any] = torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(_lowerCAmelCase ).mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 662 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:List[Any] = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
SCREAMING_SNAKE_CASE_:Dict = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
SCREAMING_SNAKE_CASE_:str = {"""facebook/blenderbot-3B""": 128}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : str = VOCAB_FILES_NAMES
__lowerCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : List[Any] = ["input_ids", "attention_mask"]
__lowerCamelCase : str = BlenderbotTokenizer
def __init__( self, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__="replace", lowerCamelCase__="<s>", lowerCamelCase__="</s>", lowerCamelCase__="</s>", lowerCamelCase__="<s>", lowerCamelCase__="<unk>", lowerCamelCase__="<pad>", lowerCamelCase__="<mask>", lowerCamelCase__=False, lowerCamelCase__=True, **lowerCamelCase__, ):
super().__init__(
lowerCamelCase__, lowerCamelCase__, tokenizer_file=lowerCamelCase__, errors=lowerCamelCase__, bos_token=lowerCamelCase__, eos_token=lowerCamelCase__, sep_token=lowerCamelCase__, cls_token=lowerCamelCase__, unk_token=lowerCamelCase__, pad_token=lowerCamelCase__, mask_token=lowerCamelCase__, add_prefix_space=lowerCamelCase__, trim_offsets=lowerCamelCase__, **lowerCamelCase__, )
A : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""", lowerCamelCase__ ) != add_prefix_space:
A : List[Any] = getattr(lowerCamelCase__, pre_tok_state.pop("""type""" ) )
A : Optional[int] = add_prefix_space
A : List[Any] = pre_tok_class(**lowerCamelCase__ )
A : Any = add_prefix_space
A : Optional[Any] = """post_processor"""
A : Dict = getattr(self.backend_tokenizer, lowerCamelCase__, lowerCamelCase__ )
if tokenizer_component_instance:
A : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A : Dict = tuple(state["""sep"""] )
if "cls" in state:
A : Tuple = tuple(state["""cls"""] )
A : Any = False
if state.get("""add_prefix_space""", lowerCamelCase__ ) != add_prefix_space:
A : List[Any] = add_prefix_space
A : Optional[Any] = True
if state.get("""trim_offsets""", lowerCamelCase__ ) != trim_offsets:
A : Optional[int] = trim_offsets
A : Optional[int] = True
if changes_to_apply:
A : List[str] = getattr(lowerCamelCase__, state.pop("""type""" ) )
A : Optional[int] = component_class(**lowerCamelCase__ )
setattr(self.backend_tokenizer, lowerCamelCase__, lowerCamelCase__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowerCAmelCase ( self ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Optional[Any] = AddedToken(lowerCamelCase__, lstrip=lowerCamelCase__, rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__, lowerCamelCase__ ) else value
A : Optional[int] = value
def _lowerCAmelCase ( self, *lowerCamelCase__, **lowerCamelCase__ ):
A : Any = kwargs.get("""is_split_into_words""", lowerCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, *lowerCamelCase__, **lowerCamelCase__ ):
A : Dict = kwargs.get("""is_split_into_words""", lowerCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : Optional[Any] = self._tokenizer.model.save(lowerCamelCase__, name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : List[str] = [self.sep_token_id]
A : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
return token_ids_a + [self.eos_token_id]
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Any = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(lowerCamelCase__ )
A : List[Any] = """ """.join(lowerCamelCase__ )
A : Optional[int] = self.encode(lowerCamelCase__ )
if len(lowerCamelCase__ ) > self.model_max_length:
A : int = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 662 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : Any = tempfile.mkdtemp()
A : List[str] = BlipImageProcessor()
A : Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
A : str = BlipProcessor(lowerCamelCase__, lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ ).tokenizer
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ ).image_processor
def _lowerCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
A : Any = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
A : Any = [Image.fromarray(np.moveaxis(lowerCamelCase__, 0, -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ):
A : int = BlipProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : Any = self.get_tokenizer(bos_token="""(BOS)""", eos_token="""(EOS)""" )
A : Union[str, Any] = self.get_image_processor(do_normalize=lowerCamelCase__, padding_value=1.0 )
A : Dict = BlipProcessor.from_pretrained(
self.tmpdirname, bos_token="""(BOS)""", eos_token="""(EOS)""", do_normalize=lowerCamelCase__, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.get_image_processor()
A : str = self.get_tokenizer()
A : Dict = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Any = self.prepare_image_inputs()
A : int = image_processor(lowerCamelCase__, return_tensors="""np""" )
A : Optional[Any] = processor(images=lowerCamelCase__, return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 )
def _lowerCAmelCase ( self ):
A : List[str] = self.get_image_processor()
A : int = self.get_tokenizer()
A : str = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[Any] = """lower newer"""
A : List[Any] = processor(text=lowerCamelCase__ )
A : str = tokenizer(lowerCamelCase__, return_token_type_ids=lowerCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def _lowerCAmelCase ( self ):
A : List[Any] = self.get_image_processor()
A : Dict = self.get_tokenizer()
A : Union[str, Any] = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[int] = """lower newer"""
A : Union[str, Any] = self.prepare_image_inputs()
A : str = processor(text=lowerCamelCase__, images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ), ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _lowerCAmelCase ( self ):
A : List[Any] = self.get_image_processor()
A : Dict = self.get_tokenizer()
A : Dict = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A : Optional[int] = processor.batch_decode(lowerCamelCase__ )
A : Dict = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.get_image_processor()
A : int = self.get_tokenizer()
A : Optional[int] = BlipProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[int] = """lower newer"""
A : List[str] = self.prepare_image_inputs()
A : Optional[int] = processor(text=lowerCamelCase__, images=lowerCamelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ), ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 662 | 1 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
return f'''gaussian_noise_s={seed}_shape={"_".join([str(lowerCamelCase__ ) for s in shape] )}.npy'''
def _lowerCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 4, 64, 64), lowerCamelCase__=False ):
A : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
A : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ )
return image
def _lowerCAmelCase ( self, lowerCamelCase__=False, lowerCamelCase__="CompVis/stable-diffusion-v1-4" ):
A : str = jnp.bfloataa if fpaa else jnp.floataa
A : Union[str, Any] = """bf16""" if fpaa else None
A , A : str = FlaxUNetaDConditionModel.from_pretrained(
lowerCamelCase__, subfolder="""unet""", dtype=lowerCamelCase__, revision=lowerCamelCase__ )
return model, params
def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 77, 768), lowerCamelCase__=False ):
A : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
A : List[str] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A , A : List[str] = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""", fpaa=lowerCamelCase__ )
A : str = self.get_latents(lowerCamelCase__, fpaa=lowerCamelCase__ )
A : int = self.get_encoder_hidden_states(lowerCamelCase__, fpaa=lowerCamelCase__ )
A : Optional[Any] = model.apply(
{"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample
assert sample.shape == latents.shape
A : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
A : Dict = jnp.array(lowerCamelCase__, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A , A : Tuple = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""", fpaa=lowerCamelCase__ )
A : int = self.get_latents(lowerCamelCase__, shape=(4, 4, 96, 96), fpaa=lowerCamelCase__ )
A : Union[str, Any] = self.get_encoder_hidden_states(lowerCamelCase__, shape=(4, 77, 1024), fpaa=lowerCamelCase__ )
A : Dict = model.apply(
{"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample
assert sample.shape == latents.shape
A : Dict = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
A : List[Any] = jnp.array(lowerCamelCase__, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 )
| 662 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
return f'''gaussian_noise_s={seed}_shape={"_".join([str(lowerCamelCase__ ) for s in shape] )}.npy'''
def _lowerCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 4, 64, 64), lowerCamelCase__=False ):
A : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
A : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ )
return image
def _lowerCAmelCase ( self, lowerCamelCase__=False, lowerCamelCase__="CompVis/stable-diffusion-v1-4" ):
A : str = jnp.bfloataa if fpaa else jnp.floataa
A : Union[str, Any] = """bf16""" if fpaa else None
A , A : str = FlaxUNetaDConditionModel.from_pretrained(
lowerCamelCase__, subfolder="""unet""", dtype=lowerCamelCase__, revision=lowerCamelCase__ )
return model, params
def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 77, 768), lowerCamelCase__=False ):
A : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
A : List[str] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A , A : List[str] = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""", fpaa=lowerCamelCase__ )
A : str = self.get_latents(lowerCamelCase__, fpaa=lowerCamelCase__ )
A : int = self.get_encoder_hidden_states(lowerCamelCase__, fpaa=lowerCamelCase__ )
A : Optional[Any] = model.apply(
{"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample
assert sample.shape == latents.shape
A : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
A : Dict = jnp.array(lowerCamelCase__, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A , A : Tuple = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""", fpaa=lowerCamelCase__ )
A : int = self.get_latents(lowerCamelCase__, shape=(4, 4, 96, 96), fpaa=lowerCamelCase__ )
A : Union[str, Any] = self.get_encoder_hidden_states(lowerCamelCase__, shape=(4, 77, 1024), fpaa=lowerCamelCase__ )
A : Dict = model.apply(
{"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample
assert sample.shape == latents.shape
A : Dict = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
A : List[Any] = jnp.array(lowerCamelCase__, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 )
| 662 | 1 |
SCREAMING_SNAKE_CASE_:Dict = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def __UpperCamelCase ( _lowerCAmelCase ) -> bytes:
"""simple docstring"""
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A : int = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_lowerCAmelCase )
A : str = """""".join(bin(_lowerCAmelCase )[2:].zfill(8 ) for byte in data )
A : Optional[Any] = len(_lowerCAmelCase ) % 6 != 0
if padding_needed:
# The padding that will be added later
A : Union[str, Any] = B"""=""" * ((6 - len(_lowerCAmelCase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowerCAmelCase ) % 6)
else:
A : Union[str, Any] = B""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_lowerCAmelCase ) , 6 ) ).encode()
+ padding
)
def __UpperCamelCase ( _lowerCAmelCase ) -> bytes:
"""simple docstring"""
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) and not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A : List[str] = (
"""argument should be a bytes-like object or ASCII string, """
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_lowerCAmelCase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
A : int = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
A : List[str] = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowerCAmelCase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
A : List[Any] = encoded_data[:-padding]
A : Tuple = """""".join(
bin(B64_CHARSET.index(_lowerCAmelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
A : Tuple = """""".join(
bin(B64_CHARSET.index(_lowerCAmelCase ) )[2:].zfill(6 ) for char in encoded_data )
A : Dict = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_lowerCAmelCase ) , 8 )
]
return bytes(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 |
from typing import Any
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase ) -> bool:
"""simple docstring"""
return np.array_equal(_lowerCAmelCase , matrix.conjugate().T )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Any = v.conjugate().T
A : List[Any] = v_star.dot(_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , np.ndarray )
return (v_star_dot.dot(_lowerCAmelCase )) / (v_star.dot(_lowerCAmelCase ))
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
A : Any = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
A : str = np.array([[1], [2], [3]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) )
A : Tuple = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 662 | 1 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE_:Union[str, Any] = logging.getLogger(__name__)
def __UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
A : Dict = argparse.ArgumentParser(
description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" )
parser.add_argument(
"""--dataset_name""" , type=_lowerCAmelCase , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , )
parser.add_argument(
"""--dataset_config""" , type=_lowerCAmelCase , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" )
parser.add_argument(
"""--tokenizer_name_or_path""" , type=_lowerCAmelCase , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , )
parser.add_argument(
"""--shard_size""" , type=_lowerCAmelCase , default=1000 , help="""Number of entries to go in a single shard.""" , )
parser.add_argument("""--split""" , type=_lowerCAmelCase , default="""train""" , choices=["""train""", """test""", """validation"""] )
parser.add_argument(
"""--limit""" , default=_lowerCAmelCase , type=_lowerCAmelCase , help="""Limit the number of shards (used for debugging).""" , )
parser.add_argument(
"""--max_length""" , type=_lowerCAmelCase , default=512 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum"""
""" sequence length that is a multiple of 8.""" , )
parser.add_argument(
"""--output_dir""" , default="""tf-tpu""" , type=_lowerCAmelCase , help="""Output directory where the TFRecord shards will be saved. If the"""
""" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"""
""" shards will be directly saved to a Google Cloud Storage bucket.""" , )
A : Dict = parser.parse_args()
return args
def __UpperCamelCase ( _lowerCAmelCase ) -> Dict:
"""simple docstring"""
def fn(_lowerCAmelCase ):
return tokenizer(examples["""text"""] )
return fn
def __UpperCamelCase ( _lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
A : List[str] = []
for i in range(len(tokenized_data["""input_ids"""] ) ):
A : Optional[int] = {
"""input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ),
"""attention_mask""": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ),
}
A : Optional[Any] = tf.train.Features(feature=_lowerCAmelCase )
A : Dict = tf.train.Example(features=_lowerCAmelCase )
A : Tuple = example.SerializeToString()
records.append(_lowerCAmelCase )
return records
def __UpperCamelCase ( _lowerCAmelCase ) -> Dict:
"""simple docstring"""
A : Optional[Any] = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
A : List[Any] = min(len(_lowerCAmelCase ) , args.limit )
A : List[Any] = dataset.select(range(_lowerCAmelCase ) )
print(f'''Limiting the dataset to {args.limit} entries.''' )
A : Dict = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
A : str = os.path.join(args.output_dir , args.split )
if not os.path.exists(_lowerCAmelCase ):
os.makedirs(_lowerCAmelCase )
else:
A : List[Any] = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
A : int = tokenize_function(_lowerCAmelCase )
A : int = dataset.map(_lowerCAmelCase , batched=_lowerCAmelCase , num_proc=4 , remove_columns=["""text"""] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_lowerCAmelCase ):
# Concatenate all texts.
A : Union[str, Any] = {k: sum(examples[k] , [] ) for k in examples.keys()}
A : Dict = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
A : Optional[int] = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
A : Tuple = {
k: [t[i : i + args.max_length] for i in range(0 , _lowerCAmelCase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
A : List[str] = dataset_tokenized.map(_lowerCAmelCase , batched=_lowerCAmelCase , batch_size=1000 , num_proc=4 )
A : List[Any] = 0
A : Optional[Any] = 0
for shard in range(0 , len(_lowerCAmelCase ) , args.shard_size ):
A : Tuple = grouped_dataset[shard : shard + args.shard_size]
A : Optional[Any] = len(dataset_snapshot["""input_ids"""] )
A : List[Any] = os.path.join(_lowerCAmelCase , f'''dataset-{shard_count}-{records_containing}.tfrecord''' )
A : Any = get_serialized_examples(_lowerCAmelCase )
with tf.io.TFRecordWriter(_lowerCAmelCase ) as out_file:
for i in range(len(_lowerCAmelCase ) ):
A : Optional[Any] = serialized_examples[i]
out_file.write(_lowerCAmelCase )
print("""Wrote file {} containing {} records""".format(_lowerCAmelCase , _lowerCAmelCase ) )
shard_count += 1
total_records += records_containing
with open(f'''split-{args.split}-records-count.txt''' , """w""" ) as f:
print(f'''Total {args.split} records: {total_records}''' , file=_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Any = parse_args()
main(args)
| 662 |
from __future__ import annotations
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase ) -> tuple[np.ndarray, np.ndarray]:
"""simple docstring"""
A , A : int = np.shape(_lowerCAmelCase )
if rows != columns:
A : Union[str, Any] = (
"""'table' has to be of square shaped array but got a """
f'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(_lowerCAmelCase )
A : Union[str, Any] = np.zeros((rows, columns) )
A : Dict = np.zeros((rows, columns) )
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
A : Any = (table[i][j] - total) / upper[j][j]
A : Union[str, Any] = 1
for j in range(_lowerCAmelCase , _lowerCAmelCase ):
A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) )
A : str = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.