code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time lowerCAmelCase_ = Lock() def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any: """simple docstring""" global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(_UpperCamelCase ) process_lock.release() # receive your right neighbor's value process_lock.acquire() snake_case_ : Optional[Any] = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left snake_case_ : Union[str, Any] = min(_UpperCamelCase , _UpperCamelCase ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(_UpperCamelCase ) process_lock.release() # receive your left neighbor's value process_lock.acquire() snake_case_ : List[Any] = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right snake_case_ : Optional[int] = max(_UpperCamelCase , _UpperCamelCase ) # after all swaps are performed, send the values back to main result_pipe[1].send(_UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase ) -> Any: """simple docstring""" snake_case_ : str = [] snake_case_ : Any = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop snake_case_ : List[str] = Pipe() snake_case_ : Optional[int] = Pipe() process_array_.append( Process( target=_UpperCamelCase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) snake_case_ : Union[str, Any] = temp_rs snake_case_ : Tuple = temp_rr for i in range(1 , len(_UpperCamelCase ) - 1 ): snake_case_ : Optional[Any] = Pipe() snake_case_ : Optional[int] = Pipe() process_array_.append( Process( target=_UpperCamelCase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) snake_case_ : Any = temp_rs snake_case_ : Optional[Any] = temp_rr process_array_.append( Process( target=_UpperCamelCase , args=( len(_UpperCamelCase ) - 1, arr[len(_UpperCamelCase ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(_UpperCamelCase ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(_UpperCamelCase ) ): snake_case_ : Optional[int] = result_pipe[p][0].recv() process_array_[p].join() return arr def lowerCamelCase_ ( ) -> Optional[int]: """simple docstring""" snake_case_ : Optional[Any] = list(range(10 , 0 , -1 ) ) print('''Initial List''' ) print(*_UpperCamelCase ) snake_case_ : Dict = odd_even_transposition(_UpperCamelCase ) print('''Sorted List\n''' ) print(*_UpperCamelCase ) if __name__ == "__main__": main()
60
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""", # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class _lowerCAmelCase ( _lowercase ): A__ = 'sew-d' def __init__( self , __UpperCAmelCase=32 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase=2 , __UpperCAmelCase=512 , __UpperCAmelCase=256 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=("p2c", "c2p") , __UpperCAmelCase="layer_norm" , __UpperCAmelCase="gelu_python" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-7 , __UpperCAmelCase=1e-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __UpperCAmelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __UpperCAmelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __UpperCAmelCase=False , __UpperCAmelCase=128 , __UpperCAmelCase=16 , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=10 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=10 , __UpperCAmelCase=0 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=256 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , **__UpperCAmelCase , ): super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = hidden_size lowerCAmelCase__ : Optional[int] = feat_extract_norm lowerCAmelCase__ : str = feat_extract_activation lowerCAmelCase__ : int = list(__UpperCAmelCase ) lowerCAmelCase__ : int = list(__UpperCAmelCase ) lowerCAmelCase__ : Any = list(__UpperCAmelCase ) lowerCAmelCase__ : int = conv_bias lowerCAmelCase__ : List[Any] = num_conv_pos_embeddings lowerCAmelCase__ : Optional[int] = num_conv_pos_embedding_groups lowerCAmelCase__ : int = len(self.conv_dim ) lowerCAmelCase__ : Union[str, Any] = num_hidden_layers lowerCAmelCase__ : Any = intermediate_size lowerCAmelCase__ : int = squeeze_factor lowerCAmelCase__ : int = max_position_embeddings lowerCAmelCase__ : Any = position_buckets lowerCAmelCase__ : Optional[int] = share_att_key lowerCAmelCase__ : Tuple = relative_attention lowerCAmelCase__ : Optional[int] = norm_rel_ebd lowerCAmelCase__ : Tuple = list(__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = hidden_act lowerCAmelCase__ : Any = num_attention_heads lowerCAmelCase__ : Optional[int] = hidden_dropout lowerCAmelCase__ : Union[str, Any] = attention_dropout lowerCAmelCase__ : str = activation_dropout lowerCAmelCase__ : List[Any] = feat_proj_dropout lowerCAmelCase__ : Any = final_dropout lowerCAmelCase__ : Optional[int] = layer_norm_eps lowerCAmelCase__ : List[str] = feature_layer_norm_eps lowerCAmelCase__ : Tuple = initializer_range lowerCAmelCase__ : Tuple = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)""" f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCAmelCase__ : Tuple = apply_spec_augment lowerCAmelCase__ : List[str] = mask_time_prob lowerCAmelCase__ : int = mask_time_length lowerCAmelCase__ : int = mask_time_min_masks lowerCAmelCase__ : Optional[int] = mask_feature_prob lowerCAmelCase__ : int = mask_feature_length lowerCAmelCase__ : int = mask_feature_min_masks # ctc loss lowerCAmelCase__ : Optional[Any] = ctc_loss_reduction lowerCAmelCase__ : Any = ctc_zero_infinity # sequence classification lowerCAmelCase__ : Tuple = use_weighted_layer_sum lowerCAmelCase__ : Dict = classifier_proj_size @property def __magic_name__( self ): return functools.reduce(operator.mul , self.conv_stride , 1 )
678
0
def _A ( lowerCAmelCase_ : int ): """simple docstring""" lowerCAmelCase__ = 1 for i in range(1 , num + 1 ): fact *= i return fact def _A ( lowerCAmelCase_ : int ): """simple docstring""" lowerCAmelCase__ = 0 while number > 0: lowerCAmelCase__ = number % 10 sum_of_digits += last_digit lowerCAmelCase__ = number // 10 # Removing the last_digit from the given number return sum_of_digits def _A ( lowerCAmelCase_ : int = 100 ): """simple docstring""" lowerCAmelCase__ = factorial(lowerCAmelCase_ ) lowerCAmelCase__ = split_and_add(lowerCAmelCase_ ) return result if __name__ == "__main__": print(solution(int(input('Enter the Number: ').strip())))
61
import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase_ = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""") @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( _lowercase , unittest.TestCase ): A__ = PegasusTokenizer A__ = PegasusTokenizerFast A__ = True A__ = True def __magic_name__( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ : Union[str, Any] = PegasusTokenizer(__UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __magic_name__( self ): return PegasusTokenizer.from_pretrained('''google/pegasus-large''' ) def __magic_name__( self , **__UpperCAmelCase ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __magic_name__( self , __UpperCAmelCase ): return ("This is a test", "This is a test") def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = '''</s>''' lowerCAmelCase__ : Optional[int] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Tuple = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''</s>''' ) self.assertEqual(vocab_keys[-1] , '''v''' ) self.assertEqual(len(__UpperCAmelCase ) , 1103 ) def __magic_name__( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1103 ) def __magic_name__( self ): lowerCAmelCase__ : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : Tuple = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : int = ( '''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important''' ''' </s> <pad> <pad> <pad>''' ) lowerCAmelCase__ : Any = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] lowerCAmelCase__ : Dict = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Any = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word lowerCAmelCase__ : List[str] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.''' lowerCAmelCase__ : Tuple = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1] lowerCAmelCase__ : Tuple = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Dict = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_6103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 lowerCAmelCase__ : str = '''To ensure a smooth flow of bank resolutions.''' lowerCAmelCase__ : int = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1] lowerCAmelCase__ : List[Any] = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = ['''This is going to be way too long.''' * 150, '''short example'''] lowerCAmelCase__ : List[str] = ['''not super long but more than 5 tokens''', '''tiny'''] lowerCAmelCase__ : Tuple = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) lowerCAmelCase__ : Optional[int] = self._large_tokenizer( text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask. @slow def __magic_name__( self ): # fmt: off lowerCAmelCase__ : Optional[int] = {'''input_ids''': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCAmelCase , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , ) @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( _lowercase , unittest.TestCase ): A__ = PegasusTokenizer A__ = PegasusTokenizerFast A__ = True A__ = True def __magic_name__( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ : List[Any] = PegasusTokenizer(__UpperCAmelCase , offset=0 , mask_token_sent=__UpperCAmelCase , mask_token='''[MASK]''' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __magic_name__( self ): return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' ) def __magic_name__( self , **__UpperCAmelCase ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __magic_name__( self , __UpperCAmelCase ): return ("This is a test", "This is a test") def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : int = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : str = ( '''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>''' ''' <pad> <pad> <pad>''' ) lowerCAmelCase__ : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] lowerCAmelCase__ : int = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) @require_torch def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = ['''This is going to be way too long.''' * 1000, '''short example'''] lowerCAmelCase__ : int = ['''not super long but more than 5 tokens''', '''tiny'''] lowerCAmelCase__ : Tuple = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) lowerCAmelCase__ : Tuple = self._large_tokenizer( text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask. def __magic_name__( self ): lowerCAmelCase__ : List[str] = ( '''This is an example string that is used to test the original TF implementation against the HF''' ''' implementation''' ) lowerCAmelCase__ : Union[str, Any] = self._large_tokenizer(__UpperCAmelCase ).input_ids self.assertListEqual( __UpperCAmelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
678
0
import copy import re class SCREAMING_SNAKE_CASE : '''simple docstring''' UpperCamelCase_ : List[Any] = '''hp''' UpperCamelCase_ : int = {} UpperCamelCase_ : Optional[Any] = None @classmethod def _A ( cls : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : Tuple = prefix SCREAMING_SNAKE_CASE : int = defaults cls.build_naming_info() @staticmethod def _A ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ): if len(UpperCAmelCase_ ) == 0: return "" SCREAMING_SNAKE_CASE : List[Any] = None if any(char.isdigit() for char in word ): raise Exception(f'''Parameters should not contain numbers: \'{word}\' contains a number''' ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(UpperCAmelCase_ ) + 1 ): SCREAMING_SNAKE_CASE : Tuple = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: SCREAMING_SNAKE_CASE : Tuple = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(UpperCAmelCase_ : Optional[int] ): SCREAMING_SNAKE_CASE : Union[str, Any] = "" while integer != 0: SCREAMING_SNAKE_CASE : Tuple = chr(ord("A" ) + integer % 10 ) + s integer //= 10 return s SCREAMING_SNAKE_CASE : Tuple = 0 while True: SCREAMING_SNAKE_CASE : Union[str, Any] = word + "#" + int_to_alphabetic(UpperCAmelCase_ ) if sword in info["reverse_short_word"]: continue else: SCREAMING_SNAKE_CASE : Tuple = sword break SCREAMING_SNAKE_CASE : Tuple = short_word SCREAMING_SNAKE_CASE : Tuple = word return short_word @staticmethod def _A ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict ): SCREAMING_SNAKE_CASE : Optional[int] = param_name.split("_" ) SCREAMING_SNAKE_CASE : Union[str, Any] = [TrialShortNamer.shortname_for_word(UpperCAmelCase_ , UpperCAmelCase_ ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name SCREAMING_SNAKE_CASE : List[str] = ["", "_"] for separator in separators: SCREAMING_SNAKE_CASE : Union[str, Any] = separator.join(UpperCAmelCase_ ) if shortname not in info["reverse_short_param"]: SCREAMING_SNAKE_CASE : Union[str, Any] = shortname SCREAMING_SNAKE_CASE : Union[str, Any] = param_name return shortname return param_name @staticmethod def _A ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE : int = TrialShortNamer.shortname_for_key(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = short_name SCREAMING_SNAKE_CASE : Union[str, Any] = param_name @classmethod def _A ( cls : Optional[Any] ): if cls.NAMING_INFO is not None: return SCREAMING_SNAKE_CASE : List[Any] = { "short_word": {}, "reverse_short_word": {}, "short_param": {}, "reverse_short_param": {}, } SCREAMING_SNAKE_CASE : Any = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = info @classmethod def _A ( cls : List[str] , UpperCAmelCase_ : Optional[int] ): cls.build_naming_info() assert cls.PREFIX is not None SCREAMING_SNAKE_CASE : Optional[int] = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(f'''You should provide a default value for the param name {k} with value {v}''' ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue SCREAMING_SNAKE_CASE : Optional[int] = cls.NAMING_INFO["short_param"][k] if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): SCREAMING_SNAKE_CASE : Optional[int] = 1 if v else 0 SCREAMING_SNAKE_CASE : Union[str, Any] = "" if isinstance(UpperCAmelCase_ , (int, float) ) else "-" SCREAMING_SNAKE_CASE : Any = f'''{key}{sep}{v}''' name.append(UpperCAmelCase_ ) return "_".join(UpperCAmelCase_ ) @classmethod def _A ( cls : List[Any] , UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : Optional[Any] = repr[len(cls.PREFIX ) + 1 :] if repr == "": SCREAMING_SNAKE_CASE : Union[str, Any] = [] else: SCREAMING_SNAKE_CASE : List[Any] = repr.split("_" ) SCREAMING_SNAKE_CASE : Tuple = {} for value in values: if "-" in value: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = value.split("-" ) else: SCREAMING_SNAKE_CASE : List[Any] = re.sub("[0-9.]" , "" , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = float(re.sub("[^0-9.]" , "" , UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = cls.NAMING_INFO["reverse_short_param"][p_k] SCREAMING_SNAKE_CASE : List[Any] = p_v for k in cls.DEFAULTS: if k not in parameters: SCREAMING_SNAKE_CASE : Dict = cls.DEFAULTS[k] return parameters
62
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""", # See all Donut models at https://huggingface.co/models?filter=donut-swin } class _lowerCAmelCase ( _lowercase ): A__ = 'donut-swin' A__ = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[3, 6, 12, 24] , __UpperCAmelCase=7 , __UpperCAmelCase=4.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , **__UpperCAmelCase , ): super().__init__(**__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = image_size lowerCAmelCase__ : List[str] = patch_size lowerCAmelCase__ : int = num_channels lowerCAmelCase__ : Optional[Any] = embed_dim lowerCAmelCase__ : int = depths lowerCAmelCase__ : Dict = len(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = num_heads lowerCAmelCase__ : Dict = window_size lowerCAmelCase__ : str = mlp_ratio lowerCAmelCase__ : Optional[int] = qkv_bias lowerCAmelCase__ : Any = hidden_dropout_prob lowerCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob lowerCAmelCase__ : List[str] = drop_path_rate lowerCAmelCase__ : Tuple = hidden_act lowerCAmelCase__ : List[str] = use_absolute_embeddings lowerCAmelCase__ : Dict = layer_norm_eps lowerCAmelCase__ : Any = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCAmelCase__ : List[Any] = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
678
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available a : str = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Any = ["GPTSw3Tokenizer"] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys a : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
63
lowerCAmelCase_ = { """meter""": """m""", """kilometer""": """km""", """megametre""": """Mm""", """gigametre""": """Gm""", """terametre""": """Tm""", """petametre""": """Pm""", """exametre""": """Em""", """zettametre""": """Zm""", """yottametre""": """Ym""", } # Exponent of the factor(meter) lowerCAmelCase_ = { """m""": 0, """km""": 3, """Mm""": 6, """Gm""": 9, """Tm""": 12, """Pm""": 15, """Em""": 18, """Zm""": 21, """Ym""": 24, } def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> float: lowerCAmelCase__ : List[str] = from_type.lower().strip('''s''' ) lowerCAmelCase__ : List[str] = to_type.lower().strip('''s''' ) lowerCAmelCase__ : str = UNIT_SYMBOL.get(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Any = UNIT_SYMBOL.get(UpperCamelCase , UpperCamelCase ) if from_sanitized not in METRIC_CONVERSION: lowerCAmelCase__ : Tuple = ( F"""Invalid 'from_type' value: {from_type!r}.\n""" F"""Conversion abbreviations are: {", ".join(UpperCamelCase )}""" ) raise ValueError(UpperCamelCase ) if to_sanitized not in METRIC_CONVERSION: lowerCAmelCase__ : List[Any] = ( F"""Invalid 'to_type' value: {to_type!r}.\n""" F"""Conversion abbreviations are: {", ".join(UpperCamelCase )}""" ) raise ValueError(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = METRIC_CONVERSION[from_sanitized] lowerCAmelCase__ : Optional[Any] = METRIC_CONVERSION[to_sanitized] lowerCAmelCase__ : int = 1 if from_exponent > to_exponent: lowerCAmelCase__ : List[str] = from_exponent - to_exponent else: lowerCAmelCase__ : Dict = -(to_exponent - from_exponent) return value * pow(10 , UpperCamelCase ) if __name__ == "__main__": from doctest import testmod testmod()
678
0
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ): __a = TextToVideoSDPipeline __a = TEXT_TO_IMAGE_PARAMS __a = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. __a = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ] ) def UpperCamelCase_ ( self ) -> Union[str, Any]: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: List[str]= UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , ) SCREAMING_SNAKE_CASE__: Optional[int]= DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: int= AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: str= CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , ) SCREAMING_SNAKE_CASE__: Dict= CLIPTextModel(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: int= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) SCREAMING_SNAKE_CASE__: Tuple= { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, } return components def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> Dict: if str(lowerCAmelCase ).startswith('''mps''' ): SCREAMING_SNAKE_CASE__: Any= torch.manual_seed(lowerCAmelCase ) else: SCREAMING_SNAKE_CASE__: Any= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Optional[Any]= { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''pt''', } return inputs def UpperCamelCase_ ( self ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__: str= '''cpu''' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE__: List[Any]= self.get_dummy_components() SCREAMING_SNAKE_CASE__: List[Any]= TextToVideoSDPipeline(**lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Tuple= sd_pipe.to(lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Dict= self.get_dummy_inputs(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Any= '''np''' SCREAMING_SNAKE_CASE__: Optional[int]= sd_pipe(**lowerCAmelCase ).frames SCREAMING_SNAKE_CASE__: Union[str, Any]= frames[0][-3:, -3:, -1] assert frames[0].shape == (64, 64, 3) SCREAMING_SNAKE_CASE__: Any= np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def UpperCamelCase_ ( self ) -> int: self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCAmelCase , expected_max_diff=3e-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def UpperCamelCase_ ( self ) -> Union[str, Any]: self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCAmelCase , expected_max_diff=1e-2 ) @unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' ) def UpperCamelCase_ ( self ) -> List[str]: pass @unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' ) def UpperCamelCase_ ( self ) -> Optional[int]: pass @unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' ) def UpperCamelCase_ ( self ) -> Dict: pass def UpperCamelCase_ ( self ) -> Dict: return super().test_progress_bar() @slow @skip_mps class _lowerCamelCase ( unittest.TestCase ): def UpperCamelCase_ ( self ) -> Dict: SCREAMING_SNAKE_CASE__: Optional[int]= load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' ) SCREAMING_SNAKE_CASE__: Any= TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' ) SCREAMING_SNAKE_CASE__: Union[str, Any]= DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) SCREAMING_SNAKE_CASE__: int= pipe.to('''cuda''' ) SCREAMING_SNAKE_CASE__: List[str]= '''Spiderman is surfing''' SCREAMING_SNAKE_CASE__: Tuple= torch.Generator(device='''cpu''' ).manual_seed(0 ) SCREAMING_SNAKE_CASE__: Optional[int]= pipe(lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=25 , output_type='''pt''' ).frames SCREAMING_SNAKE_CASE__: List[str]= video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2 def UpperCamelCase_ ( self ) -> List[str]: SCREAMING_SNAKE_CASE__: List[str]= load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' ) SCREAMING_SNAKE_CASE__: List[Any]= TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' ) SCREAMING_SNAKE_CASE__: Any= pipe.to('''cuda''' ) SCREAMING_SNAKE_CASE__: Optional[Any]= '''Spiderman is surfing''' SCREAMING_SNAKE_CASE__: Tuple= torch.Generator(device='''cpu''' ).manual_seed(0 ) SCREAMING_SNAKE_CASE__: Dict= pipe(lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=2 , output_type='''pt''' ).frames SCREAMING_SNAKE_CASE__: Dict= video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2
64
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _lowerCAmelCase : @staticmethod def __magic_name__( *__UpperCAmelCase , **__UpperCAmelCase ): pass @is_pipeline_test @require_vision class _lowerCAmelCase ( unittest.TestCase ): @require_torch def __magic_name__( self ): lowerCAmelCase__ : int = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , ) lowerCAmelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : List[str] = image_classifier(__UpperCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__UpperCAmelCase ) , [ [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}], [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}], ] , ) lowerCAmelCase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], ] , ) @require_tf def __magic_name__( self ): lowerCAmelCase__ : List[Any] = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' ) lowerCAmelCase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : str = image_classifier(__UpperCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , ) lowerCAmelCase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], ] , ) @slow @require_torch def __magic_name__( self ): lowerCAmelCase__ : str = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , ) # This is an image of 2 cats with remotes and no planes lowerCAmelCase__ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : str = image_classifier(__UpperCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) lowerCAmelCase__ : Tuple = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , ) @slow @require_tf def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' ) # This is an image of 2 cats with remotes and no planes lowerCAmelCase__ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : Union[str, Any] = image_classifier(__UpperCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) lowerCAmelCase__ : Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , )
678
0
"""simple docstring""" from __future__ import annotations def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ): '''simple docstring''' if (stress, tangential_force, area).count(0 ) != 1: raise ValueError("""You cannot supply more or less than 2 values""" ) elif stress < 0: raise ValueError("""Stress cannot be negative""" ) elif tangential_force < 0: raise ValueError("""Tangential Force cannot be negative""" ) elif area < 0: raise ValueError("""Area cannot be negative""" ) elif stress == 0: return ( "stress", tangential_force / area, ) elif tangential_force == 0: return ( "tangential_force", stress * area, ) else: return ( "area", tangential_force / stress, ) if __name__ == "__main__": import doctest doctest.testmod()
65
lowerCAmelCase_ = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]: # Return True if there is node that has not iterated. lowerCAmelCase__ : Optional[int] = [False] * len(UpperCamelCase ) lowerCAmelCase__ : Tuple = [s] lowerCAmelCase__ : Dict = True while queue: lowerCAmelCase__ : int = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = True lowerCAmelCase__ : Optional[int] = u return visited[t] def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: lowerCAmelCase__ : Any = [-1] * (len(UpperCamelCase )) lowerCAmelCase__ : List[Any] = 0 lowerCAmelCase__ : Tuple = [] lowerCAmelCase__ : Optional[int] = [i[:] for i in graph] # Record original cut, copy. while bfs(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : List[Any] = float('''Inf''' ) lowerCAmelCase__ : Dict = sink while s != source: # Find the minimum value in select path lowerCAmelCase__ : Tuple = min(UpperCamelCase , graph[parent[s]][s] ) lowerCAmelCase__ : List[Any] = parent[s] max_flow += path_flow lowerCAmelCase__ : List[Any] = sink while v != source: lowerCAmelCase__ : Dict = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowerCAmelCase__ : Optional[Any] = parent[v] for i in range(len(UpperCamelCase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
678
0
from PIL import Image def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Image: def brightness(SCREAMING_SNAKE_CASE ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError('level must be between -255.0 (black) and 255.0 (white)' ) return img.point(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": # Load image with Image.open("image_data/lena.jpg") as img: # Change brightness to 100 UpperCamelCase = change_brightness(img, 100) brigt_img.save("image_data/lena_brightness.png", format="png")
66
import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, require_torch_or_tf, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class _lowerCAmelCase ( unittest.TestCase ): A__ = MODEL_FOR_CAUSAL_LM_MAPPING A__ = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def __magic_name__( self ): lowerCAmelCase__ : Tuple = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' ) # Using `do_sample=False` to force deterministic output lowerCAmelCase__ : Optional[int] = text_generator('''This is a test''' , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ] , ) lowerCAmelCase__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] ) self.assertEqual( __UpperCAmelCase , [ [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ], [ { '''generated_text''': ( '''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy''' ''' oscope. oscope. FiliFili@@''' ) } ], ] , ) lowerCAmelCase__ : str = text_generator('''This is a test''' , do_sample=__UpperCAmelCase , num_return_sequences=2 , return_tensors=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ {'''generated_token_ids''': ANY(__UpperCAmelCase )}, {'''generated_token_ids''': ANY(__UpperCAmelCase )}, ] , ) lowerCAmelCase__ : List[Any] = text_generator.model.config.eos_token_id lowerCAmelCase__ : List[Any] = '''<pad>''' lowerCAmelCase__ : List[Any] = text_generator( ['''This is a test''', '''This is a second test'''] , do_sample=__UpperCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__UpperCAmelCase , ) self.assertEqual( __UpperCAmelCase , [ [ {'''generated_token_ids''': ANY(__UpperCAmelCase )}, {'''generated_token_ids''': ANY(__UpperCAmelCase )}, ], [ {'''generated_token_ids''': ANY(__UpperCAmelCase )}, {'''generated_token_ids''': ANY(__UpperCAmelCase )}, ], ] , ) @require_tf def __magic_name__( self ): lowerCAmelCase__ : int = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' ) # Using `do_sample=False` to force deterministic output lowerCAmelCase__ : List[Any] = text_generator('''This is a test''' , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ] , ) lowerCAmelCase__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ], [ { '''generated_text''': ( '''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes''' ''' Cannes 閲閲Cannes Cannes Cannes 攵 please,''' ) } ], ] , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Dict = TextGenerationPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) return text_generator, ["This is a test", "Another test"] def __magic_name__( self ): lowerCAmelCase__ : Any = '''Hello I believe in''' lowerCAmelCase__ : List[Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) lowerCAmelCase__ : Optional[int] = text_generator(__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , ) lowerCAmelCase__ : List[str] = text_generator(__UpperCAmelCase , stop_sequence=''' fe''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe'''}] ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = text_generator.model lowerCAmelCase__ : Optional[int] = text_generator.tokenizer lowerCAmelCase__ : Tuple = text_generator('''This is a test''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) lowerCAmelCase__ : Optional[int] = text_generator('''This is a test''' , return_full_text=__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) lowerCAmelCase__ : Dict = pipeline(task='''text-generation''' , model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , return_full_text=__UpperCAmelCase ) lowerCAmelCase__ : Dict = text_generator('''This is a test''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) lowerCAmelCase__ : List[str] = text_generator('''This is a test''' , return_full_text=__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) lowerCAmelCase__ : Optional[int] = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], ] , ) if text_generator.tokenizer.pad_token is not None: lowerCAmelCase__ : List[str] = text_generator( ['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], ] , ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Any = text_generator('''test''' , return_full_text=__UpperCAmelCase , return_text=__UpperCAmelCase ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = text_generator('''test''' , return_full_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : str = text_generator('''test''' , return_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase ) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): lowerCAmelCase__ : str = text_generator('''''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) else: with self.assertRaises((ValueError, AssertionError) ): lowerCAmelCase__ : List[str] = text_generator('''''' ) if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. lowerCAmelCase__ : Optional[Any] = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM'''] if ( tokenizer.model_max_length < 1_0000 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ): text_generator('''This is a test''' * 500 , max_new_tokens=20 ) lowerCAmelCase__ : Optional[Any] = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 ) # Hole strategy cannot work with self.assertRaises(__UpperCAmelCase ): text_generator( '''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , ) @require_torch @require_accelerate @require_torch_gpu def __magic_name__( self ): import torch # Classic `model_kwargs` lowerCAmelCase__ : List[str] = pipeline( model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowerCAmelCase__ : Any = pipe('''This is a test''' ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) lowerCAmelCase__ : Dict = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowerCAmelCase__ : Union[str, Any] = pipe('''This is a test''' ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 lowerCAmelCase__ : str = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa ) lowerCAmelCase__ : Any = pipe('''This is a test''' ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) @require_torch @require_torch_gpu def __magic_name__( self ): import torch lowerCAmelCase__ : List[str] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa ) pipe('''This is a test''' ) @require_torch @require_accelerate @require_torch_gpu def __magic_name__( self ): import torch lowerCAmelCase__ : Any = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa ) pipe('''This is a test''' , do_sample=__UpperCAmelCase , top_p=0.5 ) def __magic_name__( self ): lowerCAmelCase__ : int = '''Hello world''' lowerCAmelCase__ : Union[str, Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) if text_generator.model.framework == "tf": lowerCAmelCase__ : List[Any] = logging.get_logger('''transformers.generation.tf_utils''' ) else: lowerCAmelCase__ : Dict = logging.get_logger('''transformers.generation.utils''' ) lowerCAmelCase__ : Optional[Any] = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(__UpperCAmelCase ) as cl: lowerCAmelCase__ : List[str] = text_generator(__UpperCAmelCase , max_length=10 , max_new_tokens=1 ) self.assertIn(__UpperCAmelCase , cl.out ) # The user only sets one -> no warning with CaptureLogger(__UpperCAmelCase ) as cl: lowerCAmelCase__ : Any = text_generator(__UpperCAmelCase , max_new_tokens=1 ) self.assertNotIn(__UpperCAmelCase , cl.out ) with CaptureLogger(__UpperCAmelCase ) as cl: lowerCAmelCase__ : Union[str, Any] = text_generator(__UpperCAmelCase , max_length=10 ) self.assertNotIn(__UpperCAmelCase , cl.out )
678
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging snake_case = logging.get_logger(__name__) if is_vision_available(): import PIL class A_ ( UpperCAmelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = ['''pixel_values'''] def __init__( self : Optional[int] ,__A : bool = True ,__A : Dict[str, int] = None ,__A : PILImageResampling = PILImageResampling.BICUBIC ,__A : bool = True ,__A : Dict[str, int] = None ,__A : bool = True ,__A : Union[int, float] = 1 / 255 ,__A : bool = True ,__A : Optional[Union[float, List[float]]] = None ,__A : Optional[Union[float, List[float]]] = None ,__A : bool = True ,**__A : Optional[int] ,) -> None: super().__init__(**__A ) _lowercase = size if size is not None else {'shortest_edge': 224} _lowercase = get_size_dict(__A ,default_to_square=__A ) _lowercase = crop_size if crop_size is not None else {'height': 224, 'width': 224} _lowercase = get_size_dict(__A ,default_to_square=__A ,param_name='crop_size' ) _lowercase = do_resize _lowercase = size _lowercase = resample _lowercase = do_center_crop _lowercase = crop_size _lowercase = do_rescale _lowercase = rescale_factor _lowercase = do_normalize _lowercase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN _lowercase = image_std if image_std is not None else OPENAI_CLIP_STD _lowercase = do_convert_rgb def __UpperCAmelCase ( self : List[Any] ,__A : np.ndarray ,__A : Dict[str, int] ,__A : PILImageResampling = PILImageResampling.BICUBIC ,__A : Optional[Union[str, ChannelDimension]] = None ,**__A : Optional[Any] ,) -> np.ndarray: _lowercase = get_size_dict(__A ,default_to_square=__A ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) _lowercase = get_resize_output_image_size(__A ,size=size['shortest_edge'] ,default_to_square=__A ) return resize(__A ,size=__A ,resample=__A ,data_format=__A ,**__A ) def __UpperCAmelCase ( self : List[Any] ,__A : np.ndarray ,__A : Dict[str, int] ,__A : Optional[Union[str, ChannelDimension]] = None ,**__A : str ,) -> np.ndarray: _lowercase = get_size_dict(__A ) if "height" not in size or "width" not in size: raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" ) return center_crop(__A ,size=(size['height'], size['width']) ,data_format=__A ,**__A ) def __UpperCAmelCase ( self : Dict ,__A : np.ndarray ,__A : Union[int, float] ,__A : Optional[Union[str, ChannelDimension]] = None ,**__A : Tuple ,) -> Union[str, Any]: return rescale(__A ,scale=__A ,data_format=__A ,**__A ) def __UpperCAmelCase ( self : Optional[Any] ,__A : np.ndarray ,__A : Union[float, List[float]] ,__A : Union[float, List[float]] ,__A : Optional[Union[str, ChannelDimension]] = None ,**__A : Tuple ,) -> np.ndarray: return normalize(__A ,mean=__A ,std=__A ,data_format=__A ,**__A ) def __UpperCAmelCase ( self : Dict ,__A : ImageInput ,__A : bool = None ,__A : Dict[str, int] = None ,__A : PILImageResampling = None ,__A : bool = None ,__A : int = None ,__A : bool = None ,__A : float = None ,__A : bool = None ,__A : Optional[Union[float, List[float]]] = None ,__A : Optional[Union[float, List[float]]] = None ,__A : bool = None ,__A : Optional[Union[str, TensorType]] = None ,__A : Optional[ChannelDimension] = ChannelDimension.FIRST ,**__A : Union[str, Any] ,) -> PIL.Image.Image: _lowercase = do_resize if do_resize is not None else self.do_resize _lowercase = size if size is not None else self.size _lowercase = get_size_dict(__A ,param_name='size' ,default_to_square=__A ) _lowercase = resample if resample is not None else self.resample _lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop _lowercase = crop_size if crop_size is not None else self.crop_size _lowercase = get_size_dict(__A ,param_name='crop_size' ,default_to_square=__A ) _lowercase = do_rescale if do_rescale is not None else self.do_rescale _lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor _lowercase = do_normalize if do_normalize is not None else self.do_normalize _lowercase = image_mean if image_mean is not None else self.image_mean _lowercase = image_std if image_std is not None else self.image_std _lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb _lowercase = make_list_of_images(__A ) if not valid_images(__A ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # PIL RGBA images are converted to RGB if do_convert_rgb: _lowercase = [convert_to_rgb(__A ) for image in images] # All transformations expect numpy arrays. _lowercase = [to_numpy_array(__A ) for image in images] if do_resize: _lowercase = [self.resize(image=__A ,size=__A ,resample=__A ) for image in images] if do_center_crop: _lowercase = [self.center_crop(image=__A ,size=__A ) for image in images] if do_rescale: _lowercase = [self.rescale(image=__A ,scale=__A ) for image in images] if do_normalize: _lowercase = [self.normalize(image=__A ,mean=__A ,std=__A ) for image in images] _lowercase = [to_channel_dimension_format(__A ,__A ) for image in images] _lowercase = {'pixel_values': images} return BatchFeature(data=__A ,tensor_type=__A )
67
def __lowerCAmelCase ( UpperCamelCase ) -> str: return "".join([hex(UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase )] ) def __lowerCAmelCase ( UpperCamelCase ) -> bytes: # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(UpperCamelCase ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(UpperCamelCase ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCamelCase ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
678
0
from PIL import Image def lowercase__ ( A_: Image , A_: int ) -> Image: """simple docstring""" __UpperCAmelCase =(259 * (level + 255)) / (255 * (259 - level)) def contrast(A_: int ) -> int: return int(128 + factor * (c - 128) ) return img.point(A_ ) if __name__ == "__main__": # Load image with Image.open("image_data/lena.jpg") as img: # Change contrast to 170 __A = change_contrast(img, 1_70) cont_img.save("image_data/lena_high_contrast.png", format="png")
68
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class _lowerCAmelCase ( _lowercase ): A__ = (DPMSolverSDEScheduler,) A__ = 10 def __magic_name__( self , **__UpperCAmelCase ): lowerCAmelCase__ : Dict = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''noise_sampler_seed''': 0, } config.update(**__UpperCAmelCase ) return config def __magic_name__( self ): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=__UpperCAmelCase ) def __magic_name__( self ): for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=__UpperCAmelCase , beta_end=__UpperCAmelCase ) def __magic_name__( self ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__UpperCAmelCase ) def __magic_name__( self ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[Any] = self.scheduler_classes[0] lowerCAmelCase__ : str = self.get_scheduler_config() lowerCAmelCase__ : Optional[Any] = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase__ : Union[str, Any] = self.dummy_model() lowerCAmelCase__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase__ : Optional[Any] = sample.to(__UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : int = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[str] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[str] = output.prev_sample lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2 assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2 assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def __magic_name__( self ): lowerCAmelCase__ : Dict = self.scheduler_classes[0] lowerCAmelCase__ : Tuple = self.get_scheduler_config(prediction_type='''v_prediction''' ) lowerCAmelCase__ : Tuple = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase__ : Optional[Any] = self.dummy_model() lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase__ : Tuple = sample.to(__UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase__ : List[str] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : int = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : int = output.prev_sample lowerCAmelCase__ : Any = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2 assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2 assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3 else: assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2 assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3 def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0] lowerCAmelCase__ : Tuple = self.get_scheduler_config() lowerCAmelCase__ : Tuple = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase ) lowerCAmelCase__ : Dict = self.dummy_model() lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter.to(__UpperCAmelCase ) * scheduler.init_noise_sigma for t in scheduler.timesteps: lowerCAmelCase__ : List[str] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Tuple = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = output.prev_sample lowerCAmelCase__ : Dict = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Dict = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2 assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2 assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = self.scheduler_classes[0] lowerCAmelCase__ : Dict = self.get_scheduler_config() lowerCAmelCase__ : Optional[int] = scheduler_class(**__UpperCAmelCase , use_karras_sigmas=__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = self.dummy_model() lowerCAmelCase__ : int = self.dummy_sample_deter.to(__UpperCAmelCase ) * scheduler.init_noise_sigma lowerCAmelCase__ : Union[str, Any] = sample.to(__UpperCAmelCase ) for t in scheduler.timesteps: lowerCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Tuple = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Dict = output.prev_sample lowerCAmelCase__ : int = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 else: assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
678
0
'''simple docstring''' from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar a : str = TypeVar('''KEY''') a : Tuple = TypeVar('''VAL''') @dataclass(frozen=_UpperCamelCase , slots=_UpperCamelCase ) class SCREAMING_SNAKE_CASE__ ( Generic[KEY, VAL] ): __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 class SCREAMING_SNAKE_CASE__ ( _Item ): def __init__( self : Tuple ): """simple docstring""" super().__init__(a_ , a_ ) def __bool__( self : List[Any] ): """simple docstring""" return False a : Any = _DeletedItem() class SCREAMING_SNAKE_CASE__ ( MutableMapping[KEY, VAL] ): def __init__( self : Any , a_ : int = 8 , a_ : float = 0.75 ): """simple docstring""" __snake_case = initial_block_size __snake_case = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 __snake_case = capacity_factor __snake_case = 0 def A ( self : Union[str, Any] , a_ : KEY ): """simple docstring""" return hash(a_ ) % len(self._buckets ) def A ( self : Optional[Any] , a_ : int ): """simple docstring""" return (ind + 1) % len(self._buckets ) def A ( self : List[str] , a_ : int , a_ : KEY , a_ : VAL ): """simple docstring""" __snake_case = self._buckets[ind] if not stored: __snake_case = _Item(a_ , a_ ) self._len += 1 return True elif stored.key == key: __snake_case = _Item(a_ , a_ ) return True else: return False def A ( self : Tuple ): """simple docstring""" __snake_case = len(self._buckets ) * self._capacity_factor return len(self ) >= int(a_ ) def A ( self : Optional[int] ): """simple docstring""" if len(self._buckets ) <= self._initial_block_size: return False __snake_case = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def A ( self : str , a_ : int ): """simple docstring""" __snake_case = self._buckets __snake_case = [None] * new_size __snake_case = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def A ( self : List[Any] ): """simple docstring""" self._resize(len(self._buckets ) * 2 ) def A ( self : int ): """simple docstring""" self._resize(len(self._buckets ) // 2 ) def A ( self : List[Any] , a_ : KEY ): """simple docstring""" __snake_case = self._get_bucket_index(a_ ) for _ in range(len(self._buckets ) ): yield ind __snake_case = self._get_next_ind(a_ ) def A ( self : str , a_ : KEY , a_ : VAL ): """simple docstring""" for ind in self._iterate_buckets(a_ ): if self._try_set(a_ , a_ , a_ ): break def __setitem__( self : Union[str, Any] , a_ : KEY , a_ : VAL ): """simple docstring""" if self._is_full(): self._size_up() self._add_item(a_ , a_ ) def __delitem__( self : List[str] , a_ : KEY ): """simple docstring""" for ind in self._iterate_buckets(a_ ): __snake_case = self._buckets[ind] if item is None: raise KeyError(a_ ) if item is _deleted: continue if item.key == key: __snake_case = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self : Optional[int] , a_ : KEY ): """simple docstring""" for ind in self._iterate_buckets(a_ ): __snake_case = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(a_ ) def __len__( self : Dict ): """simple docstring""" return self._len def __iter__( self : List[str] ): """simple docstring""" yield from (item.key for item in self._buckets if item) def __repr__( self : int ): """simple docstring""" __snake_case = " ,".join( f'''{item.key}: {item.val}''' for item in self._buckets if item ) return f'''HashMap({val_string})'''
69
import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class _lowerCAmelCase ( unittest.TestCase ): def __magic_name__( self , __UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = 3 lowerCAmelCase__ : Tuple = 250 lowerCAmelCase__ : List[Any] = ids_tensor((batch_size, length) , __UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = torch.ones((batch_size, length) , device=__UpperCAmelCase , dtype=torch.float ) / length return input_ids, scores def __magic_name__( self ): lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._get_tensors(5 ) lowerCAmelCase__ : List[str] = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._get_tensors(9 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._get_tensors(10 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : Tuple = MaxLengthCriteria(max_length=10 ) lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self._get_tensors(5 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._get_tensors(9 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(10 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(5 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(9 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._get_tensors(10 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ : int = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 10 ) def __magic_name__( self ): lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._get_tensors(5 ) lowerCAmelCase__ : Any = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ : int = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 ) with self.assertWarns(__UpperCAmelCase ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 ) lowerCAmelCase__ : List[str] = validate_stopping_criteria(StoppingCriteriaList() , 11 ) self.assertEqual(len(__UpperCAmelCase ) , 1 )
678
0
from typing import Callable, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : Dict = logging.get_logger(__name__) lowerCamelCase : Any = { "microsoft/xprophetnet-large-wiki100-cased": ( "https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json" ), } class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = '''xlm-prophetnet''' UpperCamelCase = ['''past_key_values'''] UpperCamelCase = { '''num_attention_heads''': '''num_encoder_attention_heads''', } def __init__( self : Optional[int] , A_ : Optional[float] = 0.1 , A_ : Optional[Union[str, Callable]] = "gelu" , A_ : Optional[int] = 30522 , A_ : Optional[int] = 1024 , A_ : Optional[int] = 4096 , A_ : Optional[int] = 12 , A_ : Optional[int] = 16 , A_ : Optional[int] = 4096 , A_ : Optional[int] = 12 , A_ : Optional[int] = 16 , A_ : Optional[float] = 0.1 , A_ : Optional[float] = 0.1 , A_ : Optional[int] = 512 , A_ : Optional[float] = 0.02 , A_ : Optional[bool] = True , A_ : Optional[bool] = True , A_ : Optional[int] = 0 , A_ : Optional[int] = 2 , A_ : Optional[int] = 32 , A_ : Optional[int] = 128 , A_ : Optional[bool] = False , A_ : Optional[float] = 0.0 , A_ : Optional[bool] = True , A_ : Optional[int] = 0 , A_ : Optional[int] = 1 , A_ : Optional[int] = 2 , **A_ : str , ) -> Dict: """simple docstring""" lowerCamelCase_ = vocab_size lowerCamelCase_ = hidden_size lowerCamelCase_ = encoder_ffn_dim lowerCamelCase_ = num_encoder_layers lowerCamelCase_ = num_encoder_attention_heads lowerCamelCase_ = decoder_ffn_dim lowerCamelCase_ = num_decoder_layers lowerCamelCase_ = num_decoder_attention_heads lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = init_std # Normal(0, this parameter) lowerCamelCase_ = activation_function # parameters for xlmprophetnet lowerCamelCase_ = ngram lowerCamelCase_ = num_buckets lowerCamelCase_ = relative_max_distance lowerCamelCase_ = disable_ngram_loss lowerCamelCase_ = eps # 3 Types of Dropout lowerCamelCase_ = attention_dropout lowerCamelCase_ = activation_dropout lowerCamelCase_ = dropout lowerCamelCase_ = use_cache super().__init__( pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , add_cross_attention=A_ , decoder_start_token_id=A_ , **A_ , ) @property def a__ ( self : str ) -> int: """simple docstring""" return self.num_encoder_layers + self.num_decoder_layers @num_hidden_layers.setter def a__ ( self : List[Any] , A_ : Optional[Any] ) -> Union[str, Any]: """simple docstring""" raise NotImplementedError( 'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and' ' `num_decoder_layers`.' )
70
from functools import reduce lowerCAmelCase_ = ( """73167176531330624919225119674426574742355349194934""" """96983520312774506326239578318016984801869478851843""" """85861560789112949495459501737958331952853208805511""" """12540698747158523863050715693290963295227443043557""" """66896648950445244523161731856403098711121722383113""" """62229893423380308135336276614282806444486645238749""" """30358907296290491560440772390713810515859307960866""" """70172427121883998797908792274921901699720888093776""" """65727333001053367881220235421809751254540594752243""" """52584907711670556013604839586446706324415722155397""" """53697817977846174064955149290862569321978468622482""" """83972241375657056057490261407972968652414535100474""" """82166370484403199890008895243450658541227588666881""" """16427171479924442928230863465674813919123162824586""" """17866458359124566529476545682848912883142607690042""" """24219022671055626321111109370544217506941658960408""" """07198403850962455444362981230987879927244284909188""" """84580156166097919133875499200524063689912560717606""" """05886116467109405077541002256983155200055935729725""" """71636269561882670428252483600823257530420752963450""" ) def __lowerCAmelCase ( UpperCamelCase = N ) -> int: return max( # mypy cannot properly interpret reduce int(reduce(lambda UpperCamelCase , UpperCamelCase : str(int(UpperCamelCase ) * int(UpperCamelCase ) ) , n[i : i + 13] ) ) for i in range(len(UpperCamelCase ) - 12 ) ) if __name__ == "__main__": print(F"""{solution() = }""")
678
0
'''simple docstring''' from __future__ import annotations _lowerCamelCase = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0] _lowerCamelCase = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1] def a__ ( _SCREAMING_SNAKE_CASE : list[float] ) -> list[float]: """simple docstring""" UpperCAmelCase_ : List[Any] = [] UpperCAmelCase_ : Optional[Any] = len(_SCREAMING_SNAKE_CASE ) for i in range(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : float = -1 for j in range(i + 1 , _SCREAMING_SNAKE_CASE ): if arr[i] < arr[j]: UpperCAmelCase_ : List[Any] = arr[j] break result.append(_SCREAMING_SNAKE_CASE ) return result def a__ ( _SCREAMING_SNAKE_CASE : list[float] ) -> list[float]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = [] for i, outer in enumerate(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : float = -1 for inner in arr[i + 1 :]: if outer < inner: UpperCAmelCase_ : Optional[int] = inner break result.append(_SCREAMING_SNAKE_CASE ) return result def a__ ( _SCREAMING_SNAKE_CASE : list[float] ) -> list[float]: """simple docstring""" UpperCAmelCase_ : int = len(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : list[float] = [] UpperCAmelCase_ : list[float] = [-1] * arr_size for index in reversed(range(_SCREAMING_SNAKE_CASE ) ): if stack: while stack[-1] <= arr[index]: stack.pop() if not stack: break if stack: UpperCAmelCase_ : Tuple = stack[-1] stack.append(arr[index] ) return result if __name__ == "__main__": from doctest import testmod from timeit import timeit testmod() print(next_greatest_element_slow(arr)) print(next_greatest_element_fast(arr)) print(next_greatest_element(arr)) _lowerCamelCase = ( """from __main__ import arr, next_greatest_element_slow, """ """next_greatest_element_fast, next_greatest_element""" ) print( """next_greatest_element_slow():""", timeit("""next_greatest_element_slow(arr)""", setup=setup), ) print( """next_greatest_element_fast():""", timeit("""next_greatest_element_fast(arr)""", setup=setup), ) print( """ next_greatest_element():""", timeit("""next_greatest_element(arr)""", setup=setup), )
71
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging lowerCAmelCase_ = logging.get_logger(__name__) def __lowerCAmelCase ( UpperCamelCase ) -> List[str]: lowerCAmelCase__ : int = R'''\w+[.]\d+''' lowerCAmelCase__ : Tuple = re.findall(UpperCamelCase , UpperCamelCase ) for pat in pats: lowerCAmelCase__ : List[str] = key.replace(UpperCamelCase , '''_'''.join(pat.split('''.''' ) ) ) return key def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: lowerCAmelCase__ : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',) if ( any('''norm''' in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowerCAmelCase__ : List[str] = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''embedding''',) return renamed_pt_tuple_key, pt_tensor # conv layer lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowerCAmelCase__ : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCAmelCase__ : List[Any] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight": lowerCAmelCase__ : str = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCAmelCase__ : Any = pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCAmelCase__ : List[Any] = pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=42 ) -> Any: # Step 1: Convert pytorch tensor to numpy lowerCAmelCase__ : Optional[Any] = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowerCAmelCase__ : Tuple = flax_model.init_weights(PRNGKey(UpperCamelCase ) ) lowerCAmelCase__ : Any = flatten_dict(UpperCamelCase ) lowerCAmelCase__ : List[Any] = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCAmelCase__ : str = rename_key(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = tuple(renamed_pt_key.split('''.''' ) ) # Correctly rename weight parameters lowerCAmelCase__ , lowerCAmelCase__ : List[str] = rename_key_and_reshape_tensor(UpperCamelCase , UpperCamelCase , UpperCamelCase ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """ F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) # also add unexpected weight so that warning is thrown lowerCAmelCase__ : List[str] = jnp.asarray(UpperCamelCase ) return unflatten_dict(UpperCamelCase )
678
0
'''simple docstring''' def UpperCamelCase ( lowercase_ : str ) -> bool: '''simple docstring''' lowercase =0 for ch in input_str: lowercase =ord(lowercase_ ) lowercase =pow(2 , lowercase_ ) # If we already turned on bit for current character's unicode if bitmap >> ch_unicode & 1 == 1: return False bitmap |= ch_bit_index_on return True if __name__ == "__main__": import doctest doctest.testmod()
72
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase_ = { """configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ """NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""", """NezhaForNextSentencePrediction""", """NezhaForMaskedLM""", """NezhaForPreTraining""", """NezhaForMultipleChoice""", """NezhaForQuestionAnswering""", """NezhaForSequenceClassification""", """NezhaForTokenClassification""", """NezhaModel""", """NezhaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
678
0
from __future__ import annotations import inspect import unittest from math import floor import numpy as np from transformers import CvtConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFCvtForImageClassification, TFCvtModel from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _snake_case ( A__ ): def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]: SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(a , 'embed_dim')) self.parent.assertTrue(hasattr(a , 'num_heads')) class _snake_case : def __init__( self , a , a=13 , a=64 , a=3 , a=[16, 48, 96] , a=[1, 3, 6] , a=[1, 2, 10] , a=[7, 3, 3] , a=[4, 2, 2] , a=[2, 1, 1] , a=[2, 2, 2] , a=[False, False, True] , a=[0.0, 0.0, 0.0] , a=0.02 , a=1E-12 , a=True , a=True , a=2 , ) -> str: SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = patch_sizes SCREAMING_SNAKE_CASE = patch_stride SCREAMING_SNAKE_CASE = patch_padding SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = num_labels SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = embed_dim SCREAMING_SNAKE_CASE = num_heads SCREAMING_SNAKE_CASE = stride_kv SCREAMING_SNAKE_CASE = depth SCREAMING_SNAKE_CASE = cls_token SCREAMING_SNAKE_CASE = attention_drop_rate SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps def SCREAMING_SNAKE_CASE__ ( self) -> Any: SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) SCREAMING_SNAKE_CASE = None if self.use_labels: # create a random int32 tensor of given shape SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels) SCREAMING_SNAKE_CASE = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]: return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> Dict: SCREAMING_SNAKE_CASE = TFCvtModel(config=a) SCREAMING_SNAKE_CASE = model(a , training=a) SCREAMING_SNAKE_CASE = (self.image_size, self.image_size) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image_size[0], image_size[1] for i in range(len(self.depth)): SCREAMING_SNAKE_CASE = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1) SCREAMING_SNAKE_CASE = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width)) def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> List[str]: SCREAMING_SNAKE_CASE = self.num_labels SCREAMING_SNAKE_CASE = TFCvtForImageClassification(a) SCREAMING_SNAKE_CASE = model(a , labels=a , training=a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def SCREAMING_SNAKE_CASE__ ( self) -> int: SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs SCREAMING_SNAKE_CASE = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class _snake_case ( A__ , A__ , unittest.TestCase ): _lowercase : Optional[Any] = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else () _lowercase : str = ( {'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification} if is_tf_available() else {} ) _lowercase : Dict = False _lowercase : Optional[int] = False _lowercase : Optional[Any] = False _lowercase : Dict = False _lowercase : List[Any] = False def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: SCREAMING_SNAKE_CASE = TFCvtModelTester(self) SCREAMING_SNAKE_CASE = TFCvtConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37) def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]: self.config_tester.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason='Cvt does not output attentions') def SCREAMING_SNAKE_CASE__ ( self) -> Any: pass @unittest.skip(reason='Cvt does not use inputs_embeds') def SCREAMING_SNAKE_CASE__ ( self) -> str: pass @unittest.skip(reason='Cvt does not support input and output embeddings') def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU')) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , ) def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: super().test_dataset_conversion() @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU')) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , ) @slow def SCREAMING_SNAKE_CASE__ ( self) -> Any: super().test_keras_fit() @unittest.skip(reason='Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8') def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: SCREAMING_SNAKE_CASE = tf.keras.mixed_precision.Policy('mixed_float16') tf.keras.mixed_precision.set_global_policy(a) super().test_keras_fit() tf.keras.mixed_precision.set_global_policy('float32') def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class(a) SCREAMING_SNAKE_CASE = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE = ['pixel_values'] self.assertListEqual(arg_names[:1] , a) def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]: def check_hidden_states_output(a , a , a): SCREAMING_SNAKE_CASE = model_class(a) SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(a , a)) SCREAMING_SNAKE_CASE = outputs.hidden_states SCREAMING_SNAKE_CASE = len(self.model_tester.depth) self.assertEqual(len(a) , a) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:]) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = True check_hidden_states_output(a , a , a) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE = True check_hidden_states_output(a , a , a) def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]: SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a) def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a) @slow def SCREAMING_SNAKE_CASE__ ( self) -> List[str]: for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE = TFCvtModel.from_pretrained(a) self.assertIsNotNone(a) def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') return image @require_tf @require_vision class _snake_case ( unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE__ ( self) -> List[str]: return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) @slow def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: SCREAMING_SNAKE_CASE = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) SCREAMING_SNAKE_CASE = self.default_image_processor SCREAMING_SNAKE_CASE = prepare_img() SCREAMING_SNAKE_CASE = image_processor(images=a , return_tensors='tf') # forward pass SCREAMING_SNAKE_CASE = model(**a) # verify the logits SCREAMING_SNAKE_CASE = tf.TensorShape((1, 1000)) self.assertEqual(outputs.logits.shape , a) SCREAMING_SNAKE_CASE = tf.constant([0.92_85, 0.90_15, -0.31_50]) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , a , atol=1E-4))
73
import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel lowerCAmelCase_ = """0.12""" # assumed parallelism: 8 @require_flax @is_staging_test class _lowerCAmelCase ( unittest.TestCase ): @classmethod def __magic_name__( cls ): lowerCAmelCase__ : Dict = TOKEN HfFolder.save_token(__UpperCAmelCase ) @classmethod def __magic_name__( cls ): try: delete_repo(token=cls._token , repo_id='''test-model-flax''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' ) except HTTPError: pass def __magic_name__( self ): lowerCAmelCase__ : List[Any] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) lowerCAmelCase__ : List[Any] = FlaxBertModel(__UpperCAmelCase ) model.push_to_hub('''test-model-flax''' , use_auth_token=self._token ) lowerCAmelCase__ : List[Any] = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" ) lowerCAmelCase__ : Union[str, Any] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : List[Any] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : str = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id='''test-model-flax''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(__UpperCAmelCase , repo_id='''test-model-flax''' , push_to_hub=__UpperCAmelCase , use_auth_token=self._token ) lowerCAmelCase__ : Tuple = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" ) lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : Optional[int] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) lowerCAmelCase__ : Optional[Any] = FlaxBertModel(__UpperCAmelCase ) model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token ) lowerCAmelCase__ : str = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : Dict = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : Tuple = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( __UpperCAmelCase , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=__UpperCAmelCase , use_auth_token=self._token ) lowerCAmelCase__ : Union[str, Any] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) lowerCAmelCase__ : Optional[int] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : Tuple = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCAmelCase__ : Any = True lowerCAmelCase__ : Any = flatten_dict(modela.params ) lowerCAmelCase__ : List[str] = flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4: lowerCAmelCase__ : Optional[Any] = False return models_are_equal @require_flax class _lowerCAmelCase ( unittest.TestCase ): def __magic_name__( self ): lowerCAmelCase__ : List[str] = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) lowerCAmelCase__ : List[str] = FlaxBertModel(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertTrue(check_models_equal(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : Tuple = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) lowerCAmelCase__ : Union[str, Any] = FlaxBertModel(__UpperCAmelCase ) lowerCAmelCase__ : Dict = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , max_shard_size='''10KB''' ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Tuple = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Any = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertTrue(check_models_equal(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = '''bert''' lowerCAmelCase__ : int = '''hf-internal-testing/tiny-random-bert-subfolder''' with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Dict = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[Any] = '''bert''' lowerCAmelCase__ : Tuple = '''hf-internal-testing/tiny-random-bert-sharded-subfolder''' with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase )
678
0
import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCamelCase : """simple docstring""" def __init__( self : Dict , _A : str , _A : Optional[int]=13 , _A : List[Any]=7 , _A : Tuple=True , _A : Any=True , _A : List[str]=True , _A : Any=True , _A : Optional[Any]=99 , _A : int=16 , _A : str=36 , _A : Dict=6 , _A : Optional[int]=6 , _A : int=6 , _A : Dict=37 , _A : List[str]="gelu" , _A : Tuple=0.1 , _A : str=0.1 , _A : Tuple=512 , _A : Optional[Any]=16 , _A : Any=2 , _A : Optional[Any]=0.02 , _A : Tuple=3 , _A : Optional[int]=4 , _A : Any=None , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = parent __SCREAMING_SNAKE_CASE : Any = batch_size __SCREAMING_SNAKE_CASE : Dict = seq_length __SCREAMING_SNAKE_CASE : List[Any] = is_training __SCREAMING_SNAKE_CASE : Any = use_input_mask __SCREAMING_SNAKE_CASE : List[Any] = use_token_type_ids __SCREAMING_SNAKE_CASE : Dict = use_labels __SCREAMING_SNAKE_CASE : Optional[int] = vocab_size __SCREAMING_SNAKE_CASE : Optional[int] = embedding_size __SCREAMING_SNAKE_CASE : Optional[int] = hidden_size __SCREAMING_SNAKE_CASE : Dict = num_hidden_layers __SCREAMING_SNAKE_CASE : List[Any] = num_hidden_groups __SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads __SCREAMING_SNAKE_CASE : List[Any] = intermediate_size __SCREAMING_SNAKE_CASE : str = hidden_act __SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob __SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings __SCREAMING_SNAKE_CASE : Union[str, Any] = type_vocab_size __SCREAMING_SNAKE_CASE : Optional[int] = type_sequence_label_size __SCREAMING_SNAKE_CASE : int = initializer_range __SCREAMING_SNAKE_CASE : Optional[int] = num_labels __SCREAMING_SNAKE_CASE : List[Any] = num_choices __SCREAMING_SNAKE_CASE : Tuple = scope def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE : Tuple = None if self.use_input_mask: __SCREAMING_SNAKE_CASE : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE : str = None if self.use_token_type_ids: __SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __SCREAMING_SNAKE_CASE : Dict = None __SCREAMING_SNAKE_CASE : Union[str, Any] = None __SCREAMING_SNAKE_CASE : List[Any] = None if self.use_labels: __SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE : Optional[int] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def UpperCAmelCase__ ( self : Dict , _A : Optional[int] , _A : Dict , _A : str , _A : Union[str, Any] , _A : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = AlbertModel(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : List[Any] = model(_A , attention_mask=_A , token_type_ids=_A ) __SCREAMING_SNAKE_CASE : str = model(_A , token_type_ids=_A ) __SCREAMING_SNAKE_CASE : int = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase__ ( self : Optional[Any] , _A : str , _A : Optional[int] , _A : Dict , _A : List[str] , _A : List[str] , _A : Any , _A : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = AlbertForPreTraining(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Tuple = model( _A , attention_mask=_A , token_type_ids=_A , labels=_A , sentence_order_label=_A , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def UpperCAmelCase__ ( self : Dict , _A : Optional[int] , _A : List[str] , _A : Union[str, Any] , _A : Optional[Any] , _A : Any , _A : List[str] , _A : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = AlbertForMaskedLM(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : int = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : Tuple , _A : Any , _A : Tuple , _A : Union[str, Any] , _A : Tuple , _A : int , _A : Optional[int] , _A : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = AlbertForQuestionAnswering(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : List[str] = model( _A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self : List[str] , _A : List[str] , _A : str , _A : int , _A : Any , _A : Optional[Any] , _A : Optional[int] , _A : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.num_labels __SCREAMING_SNAKE_CASE : Optional[int] = AlbertForSequenceClassification(_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Tuple = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ ( self : List[str] , _A : Optional[int] , _A : Optional[Any] , _A : Union[str, Any] , _A : List[str] , _A : Dict , _A : Optional[int] , _A : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = self.num_labels __SCREAMING_SNAKE_CASE : str = AlbertForTokenClassification(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : int = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Optional[int] , _A : List[str] , _A : List[str] , _A : Tuple , _A : Optional[Any] , _A : Optional[int] , _A : Any , _A : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.num_choices __SCREAMING_SNAKE_CASE : Union[str, Any] = AlbertForMultipleChoice(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE : Optional[Any] = model( _A , attention_mask=_A , token_type_ids=_A , labels=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs() ( ( __SCREAMING_SNAKE_CASE ), ( __SCREAMING_SNAKE_CASE ), ( __SCREAMING_SNAKE_CASE ), ( __SCREAMING_SNAKE_CASE ), ( __SCREAMING_SNAKE_CASE ), ( __SCREAMING_SNAKE_CASE ), ( __SCREAMING_SNAKE_CASE ), ) : List[str] = config_and_inputs __SCREAMING_SNAKE_CASE : Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) lowerCAmelCase_ = ( { '''feature-extraction''': AlbertModel, '''fill-mask''': AlbertForMaskedLM, '''question-answering''': AlbertForQuestionAnswering, '''text-classification''': AlbertForSequenceClassification, '''token-classification''': AlbertForTokenClassification, '''zero-shot''': AlbertForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase_ = True def UpperCAmelCase__ ( self : int , _A : Optional[int] , _A : List[str] , _A : int=False ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = super()._prepare_for_class(_A , _A , return_labels=_A ) if return_labels: if model_class in get_values(_A ): __SCREAMING_SNAKE_CASE : Dict = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_A ) __SCREAMING_SNAKE_CASE : Tuple = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_A ) return inputs_dict def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = AlbertModelTester(self ) __SCREAMING_SNAKE_CASE : Optional[int] = ConfigTester(self , config_class=_A , hidden_size=37 ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_A ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_A ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_A ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_A ) def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_A ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __SCREAMING_SNAKE_CASE : str = type self.model_tester.create_and_check_model(*_A ) @slow def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE : Tuple = AlbertModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @require_torch class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = AlbertModel.from_pretrained('''albert-base-v2''' ) __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : int = model(_A , attention_mask=_A )[0] __SCREAMING_SNAKE_CASE : List[Any] = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , _A ) __SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor( [[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4 ) )
74
from random import randint from tempfile import TemporaryFile import numpy as np def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any: lowerCAmelCase__ : Optional[Any] = 0 if start < end: lowerCAmelCase__ : Union[str, Any] = randint(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : List[str] = a[end] lowerCAmelCase__ : List[Any] = a[pivot] lowerCAmelCase__ : str = temp lowerCAmelCase__ , lowerCAmelCase__ : Tuple = _in_place_partition(UpperCamelCase , UpperCamelCase , UpperCamelCase ) count += _in_place_quick_sort(UpperCamelCase , UpperCamelCase , p - 1 ) count += _in_place_quick_sort(UpperCamelCase , p + 1 , UpperCamelCase ) return count def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: lowerCAmelCase__ : List[str] = 0 lowerCAmelCase__ : List[Any] = randint(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : str = a[end] lowerCAmelCase__ : Optional[int] = a[pivot] lowerCAmelCase__ : List[str] = temp lowerCAmelCase__ : str = start - 1 for index in range(UpperCamelCase , UpperCamelCase ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value lowerCAmelCase__ : List[str] = new_pivot_index + 1 lowerCAmelCase__ : int = a[new_pivot_index] lowerCAmelCase__ : int = a[index] lowerCAmelCase__ : Tuple = temp lowerCAmelCase__ : Optional[Any] = a[new_pivot_index + 1] lowerCAmelCase__ : List[str] = a[end] lowerCAmelCase__ : Union[str, Any] = temp return new_pivot_index + 1, count lowerCAmelCase_ = TemporaryFile() lowerCAmelCase_ = 1_00 # 1000 elements are to be sorted lowerCAmelCase_ , lowerCAmelCase_ = 0, 1 # mean and standard deviation lowerCAmelCase_ = np.random.normal(mu, sigma, p) np.save(outfile, X) print("""The array is""") print(X) outfile.seek(0) # using the same array lowerCAmelCase_ = np.load(outfile) lowerCAmelCase_ = len(M) - 1 lowerCAmelCase_ = _in_place_quick_sort(M, 0, r) print( """No of Comparisons for 100 elements selected from a standard normal distribution""" """is :""" ) print(z)
678
0
'''simple docstring''' # Imports import numpy as np class lowerCamelCase_ : def __init__( self : Dict , _A : int=None , _A : Any=None , _A : List[str]=None , _A : Optional[int]=None , _A : Optional[int]=None ): '''simple docstring''' self.set_matricies(red=_A , green=_A , blue=_A , red_edge=_A , nir=_A ) def lowercase_ ( self : Optional[Any] , _A : int=None , _A : Union[str, Any]=None , _A : Any=None , _A : Dict=None , _A : Optional[Any]=None ): '''simple docstring''' if red is not None: UpperCAmelCase__ : Dict = red if green is not None: UpperCAmelCase__ : Tuple = green if blue is not None: UpperCAmelCase__ : Any = blue if red_edge is not None: UpperCAmelCase__ : Dict = red_edge if nir is not None: UpperCAmelCase__ : Any = nir return True def lowercase_ ( self : int , _A : Dict="" , _A : Union[str, Any]=None , _A : str=None , _A : Any=None , _A : str=None , _A : int=None ): '''simple docstring''' self.set_matricies(red=_A , green=_A , blue=_A , red_edge=_A , nir=_A ) UpperCAmelCase__ : Union[str, Any] = { '''ARVI2''': self.arvaa, '''CCCI''': self.ccci, '''CVI''': self.cvi, '''GLI''': self.gli, '''NDVI''': self.ndvi, '''BNDVI''': self.bndvi, '''redEdgeNDVI''': self.red_edge_ndvi, '''GNDVI''': self.gndvi, '''GBNDVI''': self.gbndvi, '''GRNDVI''': self.grndvi, '''RBNDVI''': self.rbndvi, '''PNDVI''': self.pndvi, '''ATSAVI''': self.atsavi, '''BWDRVI''': self.bwdrvi, '''CIgreen''': self.ci_green, '''CIrededge''': self.ci_rededge, '''CI''': self.ci, '''CTVI''': self.ctvi, '''GDVI''': self.gdvi, '''EVI''': self.evi, '''GEMI''': self.gemi, '''GOSAVI''': self.gosavi, '''GSAVI''': self.gsavi, '''Hue''': self.hue, '''IVI''': self.ivi, '''IPVI''': self.ipvi, '''I''': self.i, '''RVI''': self.rvi, '''MRVI''': self.mrvi, '''MSAVI''': self.m_savi, '''NormG''': self.norm_g, '''NormNIR''': self.norm_nir, '''NormR''': self.norm_r, '''NGRDI''': self.ngrdi, '''RI''': self.ri, '''S''': self.s, '''IF''': self._if, '''DVI''': self.dvi, '''TVI''': self.tvi, '''NDRE''': self.ndre, } try: return funcs[index]() except KeyError: print('''Index not in the list!''' ) return False def lowercase_ ( self : List[Any] ): '''simple docstring''' return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red))) def lowercase_ ( self : Optional[int] ): '''simple docstring''' return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / ( (self.nir - self.red) / (self.nir + self.red) ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' return self.nir * (self.red / (self.green**2)) def lowercase_ ( self : str ): '''simple docstring''' return (2 * self.green - self.red - self.blue) / ( 2 * self.green + self.red + self.blue ) def lowercase_ ( self : Tuple ): '''simple docstring''' return (self.nir - self.red) / (self.nir + self.red) def lowercase_ ( self : Dict ): '''simple docstring''' return (self.nir - self.blue) / (self.nir + self.blue) def lowercase_ ( self : Tuple ): '''simple docstring''' return (self.redEdge - self.red) / (self.redEdge + self.red) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' return (self.nir - self.green) / (self.nir + self.green) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' return (self.nir - (self.green + self.blue)) / ( self.nir + (self.green + self.blue) ) def lowercase_ ( self : Any ): '''simple docstring''' return (self.nir - (self.green + self.red)) / ( self.nir + (self.green + self.red) ) def lowercase_ ( self : str ): '''simple docstring''' return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) def lowercase_ ( self : str ): '''simple docstring''' return (self.nir - (self.green + self.red + self.blue)) / ( self.nir + (self.green + self.red + self.blue) ) def lowercase_ ( self : Tuple , _A : int=0.0_8 , _A : int=1.2_2 , _A : List[Any]=0.0_3 ): '''simple docstring''' return a * ( (self.nir - a * self.red - b) / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) def lowercase_ ( self : List[Any] ): '''simple docstring''' return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) def lowercase_ ( self : List[str] ): '''simple docstring''' return (self.nir / self.green) - 1 def lowercase_ ( self : Any ): '''simple docstring''' return (self.nir / self.redEdge) - 1 def lowercase_ ( self : Dict ): '''simple docstring''' return (self.red - self.blue) / self.red def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2)) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' return self.nir - self.green def lowercase_ ( self : Optional[int] ): '''simple docstring''' return 2.5 * ( (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red) def lowercase_ ( self : Optional[int] , _A : List[Any]=0.1_6 ): '''simple docstring''' return (self.nir - self.green) / (self.nir + self.green + y) def lowercase_ ( self : List[str] , _A : str=0.5 ): '''simple docstring''' return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' return np.arctan( ((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) ) def lowercase_ ( self : Optional[Any] , _A : str=None , _A : Any=None ): '''simple docstring''' return (self.nir - b) / (a * self.red) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' return (self.red + self.green + self.blue) / 3_0.5 def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' return self.nir / self.red def lowercase_ ( self : Optional[Any] ): '''simple docstring''' return (self.rvi() - 1) / (self.rvi() + 1) def lowercase_ ( self : Tuple ): '''simple docstring''' return ( (2 * self.nir + 1) - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 def lowercase_ ( self : Tuple ): '''simple docstring''' return self.green / (self.nir + self.red + self.green) def lowercase_ ( self : int ): '''simple docstring''' return self.nir / (self.nir + self.red + self.green) def lowercase_ ( self : List[Any] ): '''simple docstring''' return self.red / (self.nir + self.red + self.green) def lowercase_ ( self : Optional[int] ): '''simple docstring''' return (self.green - self.red) / (self.green + self.red) def lowercase_ ( self : Dict ): '''simple docstring''' return (self.red - self.green) / (self.red + self.green) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] ) UpperCAmelCase__ : str = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] ) return (max_value - min_value) / max_value def lowercase_ ( self : Dict ): '''simple docstring''' return (2 * self.red - self.green - self.blue) / (self.green - self.blue) def lowercase_ ( self : Dict ): '''simple docstring''' return self.nir / self.red def lowercase_ ( self : List[str] ): '''simple docstring''' return (self.ndvi() + 0.5) ** (1 / 2) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' return (self.nir - self.redEdge) / (self.nir + self.redEdge)
75
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[int]: assert isinstance(UpperCamelCase , UpperCamelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]: lowerCAmelCase__ : List[str] = tmp_path / '''cache''' lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase__ : List[Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]: lowerCAmelCase__ : str = tmp_path / '''cache''' lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : str = features.copy() if features else default_expected_features lowerCAmelCase__ : List[Any] = ( Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: lowerCAmelCase__ : str = tmp_path / '''cache''' lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , split=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str: if issubclass(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : Any = parquet_path elif issubclass(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : Any = [parquet_path] lowerCAmelCase__ : int = tmp_path / '''cache''' lowerCAmelCase__ : str = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=("train",) ) -> str: assert isinstance(UpperCamelCase , UpperCamelCase ) for split in splits: lowerCAmelCase__ : str = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]: lowerCAmelCase__ : Any = tmp_path / '''cache''' lowerCAmelCase__ : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase__ : Optional[Any] = ParquetDatasetReader( {'''train''': parquet_path} , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read() _check_parquet_datasetdict(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: lowerCAmelCase__ : Any = tmp_path / '''cache''' lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : Tuple = features.copy() if features else default_expected_features lowerCAmelCase__ : Optional[int] = ( Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase__ : List[str] = ParquetDatasetReader({'''train''': parquet_path} , features=UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_datasetdict(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: if split: lowerCAmelCase__ : Tuple = {split: parquet_path} else: lowerCAmelCase__ : int = '''train''' lowerCAmelCase__ : List[Any] = {'''train''': parquet_path, '''test''': parquet_path} lowerCAmelCase__ : Optional[int] = tmp_path / '''cache''' lowerCAmelCase__ : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : List[str] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_datasetdict(UpperCamelCase , UpperCamelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCAmelCase__ : Optional[Any] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' ) assert writer.write() > 0 lowerCAmelCase__ : Union[str, Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' ) lowerCAmelCase__ : int = pf.read() assert dataset.data.table == output_table def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCAmelCase__ : List[str] = str(shared_datadir / '''test_image_rgb.jpg''' ) lowerCAmelCase__ : Dict = {'''image''': [image_path]} lowerCAmelCase__ : int = Features({'''image''': Image()} ) lowerCAmelCase__ : Dict = Dataset.from_dict(UpperCamelCase , features=UpperCamelCase ) lowerCAmelCase__ : List[str] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' ) assert writer.write() > 0 lowerCAmelCase__ : Dict = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) ) assert dataset.features == reloaded_dataset.features lowerCAmelCase__ : int = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=UpperCamelCase ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( '''feature, expected''' , [ (Features({'''foo''': Value('''int32''' )} ), None), (Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Any: assert get_writer_batch_size(UpperCamelCase ) == expected
678
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json' ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class UpperCAmelCase_ ( snake_case ): UpperCamelCase ="speech_to_text_2" UpperCamelCase =["past_key_values"] UpperCamelCase ={"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"} def __init__( self , UpperCamelCase_=1_00_00 , UpperCamelCase_=6 , UpperCamelCase_=20_48 , UpperCamelCase_=4 , UpperCamelCase_=0.0 , UpperCamelCase_=True , UpperCamelCase_="relu" , UpperCamelCase_=2_56 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0_2 , UpperCamelCase_=2 , UpperCamelCase_=True , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=10_24 , **UpperCamelCase_ , ) -> Optional[int]: __lowercase : Dict = vocab_size __lowercase : str = d_model __lowercase : List[Any] = decoder_ffn_dim __lowercase : Optional[int] = decoder_layers __lowercase : Optional[Any] = decoder_attention_heads __lowercase : Dict = dropout __lowercase : Tuple = attention_dropout __lowercase : int = activation_dropout __lowercase : List[Any] = activation_function __lowercase : Union[str, Any] = init_std __lowercase : Any = decoder_layerdrop __lowercase : Tuple = use_cache __lowercase : int = decoder_layers __lowercase : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True __lowercase : Union[str, Any] = max_target_positions super().__init__( pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
76
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""", } class _lowerCAmelCase ( _lowercase , _lowercase ): A__ = 'focalnet' def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=False , __UpperCAmelCase=[192, 384, 768, 768] , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[2, 2, 2, 2] , __UpperCAmelCase=[3, 3, 3, 3] , __UpperCAmelCase="gelu" , __UpperCAmelCase=4.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=False , __UpperCAmelCase=1e-4 , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ): super().__init__(**__UpperCAmelCase ) lowerCAmelCase__ : Any = image_size lowerCAmelCase__ : Any = patch_size lowerCAmelCase__ : Tuple = num_channels lowerCAmelCase__ : Dict = embed_dim lowerCAmelCase__ : Optional[int] = use_conv_embed lowerCAmelCase__ : Optional[int] = hidden_sizes lowerCAmelCase__ : Optional[Any] = depths lowerCAmelCase__ : Dict = focal_levels lowerCAmelCase__ : int = focal_windows lowerCAmelCase__ : Optional[Any] = hidden_act lowerCAmelCase__ : Optional[int] = mlp_ratio lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob lowerCAmelCase__ : List[Any] = drop_path_rate lowerCAmelCase__ : Tuple = use_layerscale lowerCAmelCase__ : List[Any] = layerscale_value lowerCAmelCase__ : Dict = use_post_layernorm lowerCAmelCase__ : Dict = use_post_layernorm_in_modulation lowerCAmelCase__ : Dict = normalize_modulator lowerCAmelCase__ : Union[str, Any] = initializer_range lowerCAmelCase__ : List[str] = layer_norm_eps lowerCAmelCase__ : Tuple = encoder_stride lowerCAmelCase__ : Dict = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = get_aligned_output_features_output_indices( out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
678
0
"""simple docstring""" import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation A = logging.get_logger(__name__) A = {"""vocab_file""": """spiece.model"""} A = { """vocab_file""": { """AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""", } } A = { """AI-Sweden/gpt-sw3-126m""": 2_048, """AI-Sweden/gpt-sw3-350m""": 2_048, """AI-Sweden/gpt-sw3-1.6b""": 2_048, """AI-Sweden/gpt-sw3-6.7b""": 2_048, """AI-Sweden/gpt-sw3-20b""": 2_048, } class a__ ( __magic_name__ ): lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = ["input_ids", "attention_mask"] def __init__( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Dict=False , UpperCamelCase_ : List[Any]=False , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Optional[Dict[str, Any]] = None , **UpperCamelCase_ : Optional[Any] , ): """simple docstring""" __UpperCAmelCase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs __UpperCAmelCase : Dict = kwargs.get("name_or_path") if name_or_path is None: logger.warning( "name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b," " you are testing the model, this can safely be ignored") __UpperCAmelCase : Optional[int] = "None" # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing __UpperCAmelCase : str = "<|endoftext|>" if eos_token is None else eos_token __UpperCAmelCase : Optional[Any] = "<unk>" if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: __UpperCAmelCase : Dict = unk_token if pad_token is None else pad_token __UpperCAmelCase : Any = eos_token if bos_token is None else bos_token else: __UpperCAmelCase : List[Any] = "<pad>" if pad_token is None else pad_token __UpperCAmelCase : List[str] = "<s>" if bos_token is None else bos_token super().__init__( do_lower_case=UpperCamelCase_ , remove_space=UpperCamelCase_ , keep_accents=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , ) __UpperCAmelCase : Optional[int] = do_lower_case __UpperCAmelCase : List[str] = remove_space __UpperCAmelCase : Any = keep_accents __UpperCAmelCase : int = vocab_file __UpperCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(UpperCamelCase_) # Used for whitespace normalization in input texts # fmt : off __UpperCAmelCase : int = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", "„"} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing __UpperCAmelCase : Dict = re.compile( F"[{''.join(map(UpperCamelCase_ , list(range(0 , 9)) + list(range(11 , 32)) + list(range(127 , 160)) + [160, 173, 8203]))}]") def __getstate__( self : Any): """simple docstring""" __UpperCAmelCase : Tuple = self.__dict__.copy() __UpperCAmelCase : Optional[Any] = None return state def __setstate__( self : Union[str, Any] , UpperCamelCase_ : Optional[Any]): """simple docstring""" __UpperCAmelCase : Union[str, Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs"): __UpperCAmelCase : Optional[Any] = {} __UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def a_ ( self : Any): """simple docstring""" return len(self.sp_model) def a_ ( self : Any , UpperCamelCase_ : str): """simple docstring""" __UpperCAmelCase : List[Any] = self.non_printing_characters_re.sub("" , UpperCamelCase_) # Normalize whitespaces __UpperCAmelCase : List[Any] = "".join([char if char not in self.whitespaces else " " for char in text]) # NFC Unicode normalization __UpperCAmelCase : Any = unicodedata.normalize("NFC" , UpperCamelCase_) return text def a_ ( self : Dict , UpperCamelCase_ : str , **UpperCamelCase_ : List[str]): """simple docstring""" __UpperCAmelCase : List[str] = self.preprocess_text(UpperCamelCase_) return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_) def a_ ( self : Union[str, Any] , UpperCamelCase_ : str): """simple docstring""" return self.sp_model.PieceToId(UpperCamelCase_) def a_ ( self : List[str] , UpperCamelCase_ : int): """simple docstring""" return self.sp_model.IdToPiece(UpperCamelCase_) @staticmethod def a_ ( UpperCamelCase_ : str): """simple docstring""" return out_string def a_ ( self : int , UpperCamelCase_ : List[str]): """simple docstring""" __UpperCAmelCase : List[str] = [] __UpperCAmelCase : Tuple = "" __UpperCAmelCase : Any = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(UpperCamelCase_) + token __UpperCAmelCase : int = True __UpperCAmelCase : Dict = [] else: current_sub_tokens.append(UpperCamelCase_) __UpperCAmelCase : List[Any] = False out_string += self.sp_model.decode(UpperCamelCase_) return out_string def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(UpperCamelCase_): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def a_ ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None): """simple docstring""" if not os.path.isdir(UpperCamelCase_): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return __UpperCAmelCase : List[str] = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase_) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , UpperCamelCase_) elif not os.path.isfile(self.vocab_file): with open(UpperCamelCase_ , "wb") as fi: __UpperCAmelCase : Optional[int] = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase_) return (out_vocab_file,) def a_ ( self : Any , UpperCamelCase_ : Union[str, List[str]] , UpperCamelCase_ : Union[str, bool] = False): """simple docstring""" if isinstance(UpperCamelCase_ , UpperCamelCase_): __UpperCAmelCase : Any = self.preprocess_text(UpperCamelCase_) __UpperCAmelCase : Any = self.sp_model.encode(UpperCamelCase_) else: __UpperCAmelCase : Dict = [self.preprocess_text(UpperCamelCase_) for t in text] __UpperCAmelCase : List[Any] = self.sp_model.encode(UpperCamelCase_) if return_tensors is True or return_tensors == "pt": __UpperCAmelCase : Optional[Any] = torch.tensor(UpperCamelCase_) return token_ids def a_ ( self : int , UpperCamelCase_ : Union[int, List[int]]): """simple docstring""" return self.sp_model.decode(UpperCamelCase_) def a_ ( self : Tuple , UpperCamelCase_ : "Conversation"): """simple docstring""" __UpperCAmelCase : int = [F"User: {text}" if is_user else F"Bot: {text}" for is_user, text in conversation.iter_texts()] __UpperCAmelCase : Optional[Any] = ( F"{self.eos_token}{self.bos_token}" + F"{self.bos_token}".join(UpperCamelCase_) + F"{self.bos_token}Bot:" ) return self.encode(text=UpperCamelCase_)
77
from scipy.stats import pearsonr import datasets lowerCAmelCase_ = """ Pearson correlation coefficient and p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. """ lowerCAmelCase_ = """ Args: predictions (`list` of `int`): Predicted class labels, as returned by a model. references (`list` of `int`): Ground truth labels. return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`. Returns: pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation. p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities. Examples: Example 1-A simple example using only predictions and references. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5]) >>> print(round(results['pearsonr'], 2)) -0.74 Example 2-The same as Example 1, but that also returns the `p-value`. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True) >>> print(sorted(list(results.keys()))) ['p-value', 'pearsonr'] >>> print(round(results['pearsonr'], 2)) -0.74 >>> print(round(results['p-value'], 2)) 0.15 """ lowerCAmelCase_ = """ @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, Ilhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Antonio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowerCAmelCase ( datasets.Metric ): def __magic_name__( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float''' ), '''references''': datasets.Value('''float''' ), } ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ): if return_pvalue: lowerCAmelCase__ : Union[str, Any] = pearsonr(__UpperCAmelCase , __UpperCAmelCase ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(__UpperCAmelCase , __UpperCAmelCase )[0] )}
678
0
'''simple docstring''' SCREAMING_SNAKE_CASE_: List[str] ={ 'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.', 'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.', 'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-', 'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----', '2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...', '8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.', ':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.', '?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-', '(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/' } # Exclamation mark is not in ITU-R recommendation # fmt: on SCREAMING_SNAKE_CASE_: Optional[Any] ={value: key for key, value in MORSE_CODE_DICT.items()} def lowerCAmelCase_ ( snake_case_ : str ) -> str: '''simple docstring''' return " ".join(MORSE_CODE_DICT[char] for char in message.upper() ) def lowerCAmelCase_ ( snake_case_ : str ) -> str: '''simple docstring''' return "".join(REVERSE_DICT[char] for char in message.split() ) def lowerCAmelCase_ ( ) -> None: '''simple docstring''' UpperCAmelCase_ = "Morse code here!" print(snake_case_ ) UpperCAmelCase_ = encrypt(snake_case_ ) print(snake_case_ ) UpperCAmelCase_ = decrypt(snake_case_ ) print(snake_case_ ) if __name__ == "__main__": main()
78
from manim import * class _lowerCAmelCase ( _lowercase ): def __magic_name__( self ): lowerCAmelCase__ : Tuple = Rectangle(height=0.5 , width=0.5 ) lowerCAmelCase__ : Dict = Rectangle(height=0.25 , width=0.25 ) lowerCAmelCase__ : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) lowerCAmelCase__ : Optional[Any] = [mem.copy() for i in range(6 )] lowerCAmelCase__ : int = [mem.copy() for i in range(6 )] lowerCAmelCase__ : Optional[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : str = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : List[str] = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : int = Text('''CPU''' , font_size=24 ) lowerCAmelCase__ : int = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = [mem.copy() for i in range(4 )] lowerCAmelCase__ : Tuple = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Tuple = Text('''GPU''' , font_size=24 ) lowerCAmelCase__ : int = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) gpu.move_to([-1, -1, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : int = [mem.copy() for i in range(6 )] lowerCAmelCase__ : List[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Tuple = Text('''Model''' , font_size=24 ) lowerCAmelCase__ : List[Any] = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) model.move_to([3, -1.0, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : Dict = [] lowerCAmelCase__ : List[str] = [] lowerCAmelCase__ : Optional[Any] = [] for i, rect in enumerate(__UpperCAmelCase ): rect.set_stroke(__UpperCAmelCase ) lowerCAmelCase__ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCAmelCase ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=__UpperCAmelCase , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=__UpperCAmelCase , buff=0.0 ) self.add(__UpperCAmelCase ) model_cpu_arr.append(__UpperCAmelCase ) self.add(*__UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase ) lowerCAmelCase__ : Any = [mem.copy() for i in range(6 )] lowerCAmelCase__ : Optional[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Any = Text('''Loaded Checkpoint''' , font_size=24 ) lowerCAmelCase__ : Optional[Any] = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) checkpoint.move_to([3, 0.5, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : Dict = [] lowerCAmelCase__ : str = [] for i, rect in enumerate(__UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = fill.copy().set_fill(__UpperCAmelCase , opacity=0.7 ) target.move_to(__UpperCAmelCase ) ckpt_arr.append(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(__UpperCAmelCase ) self.add(*__UpperCAmelCase , *__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCAmelCase__ : List[Any] = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[str] = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , ) blue_text.next_to(__UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : str = MarkupText( f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) lowerCAmelCase__ : Optional[Any] = [meta_mem.copy() for i in range(6 )] lowerCAmelCase__ : Dict = [meta_mem.copy() for i in range(6 )] lowerCAmelCase__ : Union[str, Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Dict = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : str = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : List[str] = Text('''Disk''' , font_size=24 ) lowerCAmelCase__ : Any = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(__UpperCAmelCase , run_time=3 ) , Write(__UpperCAmelCase , run_time=1 ) , Create(__UpperCAmelCase , run_time=1 ) ) lowerCAmelCase__ : str = [] for i, rect in enumerate(__UpperCAmelCase ): lowerCAmelCase__ : Dict = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(__UpperCAmelCase , run_time=1.5 ) ) self.play(*__UpperCAmelCase ) self.play(FadeOut(__UpperCAmelCase ) ) lowerCAmelCase__ : int = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(__UpperCAmelCase , run_time=3 ) ) self.play( FadeOut(__UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase ) , ) self.wait()
678
0
import requests SCREAMING_SNAKE_CASE__ : Optional[Any] = """""" # <-- Put your OpenWeatherMap appid here! SCREAMING_SNAKE_CASE__ : Optional[int] = """https://api.openweathermap.org/data/2.5/""" def _lowerCamelCase ( __lowerCamelCase = "Chicago" , __lowerCamelCase = APPID ) -> dict: '''simple docstring''' return requests.get(URL_BASE + """weather""" , params=locals() ).json() def _lowerCamelCase ( __lowerCamelCase = "Kolkata, India" , __lowerCamelCase = APPID ) -> dict: '''simple docstring''' return requests.get(URL_BASE + """forecast""" , params=locals() ).json() def _lowerCamelCase ( __lowerCamelCase = 55.68 , __lowerCamelCase = 12.57 , __lowerCamelCase = APPID ) -> dict: '''simple docstring''' return requests.get(URL_BASE + """onecall""" , params=locals() ).json() if __name__ == "__main__": from pprint import pprint while True: SCREAMING_SNAKE_CASE__ : int = input("""Enter a location:""").strip() if location: pprint(current_weather(location)) else: break
79
import collections import os import re from pathlib import Path lowerCAmelCase_ = """src/transformers""" # Matches is_xxx_available() lowerCAmelCase_ = re.compile(R"""is\_([a-z_]*)_available()""") # Catches a one-line _import_struct = {xxx} lowerCAmelCase_ = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] lowerCAmelCase_ = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""") # Catches a line if not is_foo_available lowerCAmelCase_ = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""") # Catches a line _import_struct["bla"].append("foo") lowerCAmelCase_ = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] lowerCAmelCase_ = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""") # Catches a line with an object between quotes and a comma: "MyModel", lowerCAmelCase_ = re.compile(R"""^\s+\"([^\"]+)\",""") # Catches a line with objects between brackets only: ["foo", "bar"], lowerCAmelCase_ = re.compile(R"""^\s+\[([^\]]+)\]""") # Catches a line with from foo import bar, bla, boo lowerCAmelCase_ = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") # Catches a line with try: lowerCAmelCase_ = re.compile(R"""^\s*try:""") # Catches a line with else: lowerCAmelCase_ = re.compile(R"""^\s*else:""") def __lowerCAmelCase ( UpperCamelCase ) -> int: if _re_test_backend.search(UpperCamelCase ) is None: return None lowerCAmelCase__ : int = [b[0] for b in _re_backend.findall(UpperCamelCase )] backends.sort() return "_and_".join(UpperCamelCase ) def __lowerCAmelCase ( UpperCamelCase ) -> Any: with open(UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCAmelCase__ : Union[str, Any] = f.readlines() lowerCAmelCase__ : Tuple = 0 while line_index < len(UpperCamelCase ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(UpperCamelCase ): return None # First grab the objects without a specific backend in _import_structure lowerCAmelCase__ : List[str] = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: lowerCAmelCase__ : str = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(UpperCamelCase ): lowerCAmelCase__ : str = _re_one_line_import_struct.search(UpperCamelCase ).groups()[0] lowerCAmelCase__ : Optional[Any] = re.findall(R'''\[([^\]]+)\]''' , UpperCamelCase ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue lowerCAmelCase__ : Tuple = _re_import_struct_key_value.search(UpperCamelCase ) if single_line_import_search is not None: lowerCAmelCase__ : Optional[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 lowerCAmelCase__ : Any = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. lowerCAmelCase__ : List[str] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase__ : Union[str, Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase__ : Optional[int] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): lowerCAmelCase__ : str = lines[line_index] if _re_import_struct_add_one.search(UpperCamelCase ) is not None: objects.append(_re_import_struct_add_one.search(UpperCamelCase ).groups()[0] ) elif _re_import_struct_add_many.search(UpperCamelCase ) is not None: lowerCAmelCase__ : Optional[int] = _re_import_struct_add_many.search(UpperCamelCase ).groups()[0].split(''', ''' ) lowerCAmelCase__ : List[Any] = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif _re_between_brackets.search(UpperCamelCase ) is not None: lowerCAmelCase__ : List[Any] = _re_between_brackets.search(UpperCamelCase ).groups()[0].split(''', ''' ) lowerCAmelCase__ : Optional[int] = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif _re_quote_object.search(UpperCamelCase ) is not None: objects.append(_re_quote_object.search(UpperCamelCase ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 lowerCAmelCase__ : Optional[Any] = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend lowerCAmelCase__ : Any = [] while ( line_index < len(UpperCamelCase ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): lowerCAmelCase__ : Tuple = lines[line_index] lowerCAmelCase__ : List[Any] = _re_import.search(UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 lowerCAmelCase__ : Dict = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(UpperCamelCase ): # If the line is an if is_backend_available, we grab all objects associated. lowerCAmelCase__ : str = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase__ : Tuple = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase__ : str = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): lowerCAmelCase__ : Any = lines[line_index] lowerCAmelCase__ : Union[str, Any] = _re_import.search(UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 lowerCAmelCase__ : str = objects else: line_index += 1 return import_dict_objects, type_hint_objects def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> List[Any]: def find_duplicates(UpperCamelCase ): return [k for k, v in collections.Counter(UpperCamelCase ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] lowerCAmelCase__ : Optional[Any] = [] for key in import_dict_objects.keys(): lowerCAmelCase__ : str = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) lowerCAmelCase__ : int = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): lowerCAmelCase__ : List[Any] = '''base imports''' if key == '''none''' else F"""{key} backend""" errors.append(F"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def __lowerCAmelCase ( ) -> Optional[Any]: lowerCAmelCase__ : Dict = [] for root, _, files in os.walk(UpperCamelCase ): if "__init__.py" in files: lowerCAmelCase__ : Any = os.path.join(UpperCamelCase , '''__init__.py''' ) lowerCAmelCase__ : List[Any] = parse_init(UpperCamelCase ) if objects is not None: lowerCAmelCase__ : Optional[int] = analyze_results(*UpperCamelCase ) if len(UpperCamelCase ) > 0: lowerCAmelCase__ : Tuple = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append('''\n'''.join(UpperCamelCase ) ) if len(UpperCamelCase ) > 0: raise ValueError('''\n\n'''.join(UpperCamelCase ) ) def __lowerCAmelCase ( ) -> Tuple: lowerCAmelCase__ : str = [] for path, directories, files in os.walk(UpperCamelCase ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(UpperCamelCase ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(UpperCamelCase ) / folder).glob('''*.py''' ) ) ) == 0: continue lowerCAmelCase__ : Tuple = str((Path(UpperCamelCase ) / folder).relative_to(UpperCamelCase ) ) lowerCAmelCase__ : Dict = short_path.replace(os.path.sep , '''.''' ) submodules.append(UpperCamelCase ) for fname in files: if fname == "__init__.py": continue lowerCAmelCase__ : Union[str, Any] = str((Path(UpperCamelCase ) / fname).relative_to(UpperCamelCase ) ) lowerCAmelCase__ : Tuple = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(UpperCamelCase ) return submodules lowerCAmelCase_ = [ """convert_pytorch_checkpoint_to_tf2""", """modeling_flax_pytorch_utils""", """models.esm.openfold_utils""", ] def __lowerCAmelCase ( ) -> List[str]: # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import lowerCAmelCase__ : Dict = direct_transformers_import(UpperCamelCase ) lowerCAmelCase__ : int = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(UpperCamelCase , '''__init__.py''' ) , '''r''' ) as f: lowerCAmelCase__ : str = f.read() import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , UpperCamelCase ) ) ) lowerCAmelCase__ : Optional[int] = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(UpperCamelCase ) > 0: lowerCAmelCase__ : List[Any] = '''\n'''.join(F"""- {module}""" for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registed in the main init of Transformers:\n''' F"""{list_of_modules}\n""" '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
678
0
from ... import PretrainedConfig __UpperCamelCase : int = { """sijunhe/nezha-cn-base""": """https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json""", } class __UpperCamelCase ( _lowerCAmelCase ): __snake_case :Any = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP __snake_case :Dict = 'nezha' def __init__( self : int , _lowerCAmelCase : List[Any]=2_1128 , _lowerCAmelCase : Tuple=768 , _lowerCAmelCase : Dict=12 , _lowerCAmelCase : Optional[Any]=12 , _lowerCAmelCase : Optional[Any]=3072 , _lowerCAmelCase : Union[str, Any]="gelu" , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : Optional[int]=0.1 , _lowerCAmelCase : List[Any]=512 , _lowerCAmelCase : List[Any]=64 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : int=0.02 , _lowerCAmelCase : Any=1e-12 , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : int=0 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : int=True , **_lowerCAmelCase : Optional[int] , ) -> Union[str, Any]: """simple docstring""" super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase ) __lowercase = vocab_size __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = hidden_act __lowercase = intermediate_size __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = max_position_embeddings __lowercase = max_relative_position __lowercase = type_vocab_size __lowercase = initializer_range __lowercase = layer_norm_eps __lowercase = classifier_dropout __lowercase = use_cache
80
import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class _lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=64 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ): lowerCAmelCase__ : Optional[int] = parent lowerCAmelCase__ : Tuple = batch_size lowerCAmelCase__ : Union[str, Any] = seq_length lowerCAmelCase__ : str = is_training lowerCAmelCase__ : Union[str, Any] = use_input_mask lowerCAmelCase__ : List[Any] = use_token_type_ids lowerCAmelCase__ : int = use_labels lowerCAmelCase__ : List[Any] = vocab_size lowerCAmelCase__ : Optional[int] = hidden_size lowerCAmelCase__ : List[str] = embedding_size lowerCAmelCase__ : Optional[int] = num_hidden_layers lowerCAmelCase__ : Optional[int] = num_attention_heads lowerCAmelCase__ : List[str] = intermediate_size lowerCAmelCase__ : Tuple = hidden_act lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob lowerCAmelCase__ : Union[str, Any] = max_position_embeddings lowerCAmelCase__ : List[Any] = type_vocab_size lowerCAmelCase__ : Optional[Any] = type_sequence_label_size lowerCAmelCase__ : List[Any] = initializer_range lowerCAmelCase__ : Optional[Any] = num_labels lowerCAmelCase__ : List[str] = num_choices lowerCAmelCase__ : Any = scope def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase__ : str = None if self.use_input_mask: lowerCAmelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase__ : Optional[Any] = None if self.use_token_type_ids: lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase__ : Dict = None lowerCAmelCase__ : Dict = None lowerCAmelCase__ : Optional[int] = None if self.use_labels: lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase__ : Union[str, Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __magic_name__( self ): return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = MegatronBertModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : List[Any] = MegatronBertForMaskedLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = MegatronBertForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = MegatronBertForNextSentencePrediction(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Dict = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = MegatronBertForPreTraining(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : int = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , next_sentence_label=__UpperCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = MegatronBertForQuestionAnswering(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = self.num_labels lowerCAmelCase__ : Union[str, Any] = MegatronBertForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = self.num_labels lowerCAmelCase__ : str = MegatronBertForTokenClassification(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = self.num_choices lowerCAmelCase__ : Dict = MegatronBertForMultipleChoice(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : Any = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs() ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) : Optional[int] = config_and_inputs lowerCAmelCase__ : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _lowerCAmelCase ( _lowercase , _lowercase , unittest.TestCase ): A__ = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) A__ = ( { 'feature-extraction': MegatronBertModel, 'fill-mask': MegatronBertForMaskedLM, 'question-answering': MegatronBertForQuestionAnswering, 'text-classification': MegatronBertForSequenceClassification, 'text-generation': MegatronBertForCausalLM, 'token-classification': MegatronBertForTokenClassification, 'zero-shot': MegatronBertForSequenceClassification, } if is_torch_available() else {} ) A__ = True # test_resize_embeddings = False A__ = False def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ): lowerCAmelCase__ : List[Any] = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) if return_labels: if model_class in get_values(__UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase ) return inputs_dict def __magic_name__( self ): lowerCAmelCase__ : str = MegatronBertModelTester(self ) lowerCAmelCase__ : Dict = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def __magic_name__( self ): self.config_tester.run_common_tests() def __magic_name__( self ): lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*__UpperCAmelCase ) def __lowerCAmelCase ( UpperCamelCase ) -> Optional[int]: return torch.tensor( UpperCamelCase , dtype=torch.long , device=UpperCamelCase , ) lowerCAmelCase_ = 1e-4 @require_torch @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( unittest.TestCase ): @slow @unittest.skip('''Model is not available.''' ) def __magic_name__( self ): lowerCAmelCase__ : int = '''nvidia/megatron-bert-uncased-345m''' if "MYDIR" in os.environ: lowerCAmelCase__ : Union[str, Any] = os.path.join(os.environ['''MYDIR'''] , __UpperCAmelCase ) lowerCAmelCase__ : Tuple = MegatronBertModel.from_pretrained(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.half() lowerCAmelCase__ : Optional[int] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] ) with torch.no_grad(): lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase )[0] lowerCAmelCase__ : List[Any] = torch.Size((1, 9, 1024) ) self.assertEqual(output.shape , __UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728] for ii in range(3 ): for jj in range(3 ): lowerCAmelCase__ : Union[str, Any] = output[0, ii, jj] lowerCAmelCase__ : Optional[Any] = expected[3 * ii + jj] lowerCAmelCase__ : List[str] = '''ii={} jj={} a={} b={}'''.format(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) self.assertTrue(math.isclose(__UpperCAmelCase , __UpperCAmelCase , rel_tol=__UpperCAmelCase , abs_tol=__UpperCAmelCase ) , msg=__UpperCAmelCase )
678
0
import functools def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ): __snake_case : Optional[int] = len(__lowerCamelCase ) __snake_case : List[Any] = len(__lowerCamelCase ) @functools.cache def min_distance(__lowerCamelCase , __lowerCamelCase ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa __snake_case : List[str] = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , __lowerCamelCase ) , 1 + min_distance(__lowerCamelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
81
import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""", # See all BART models at https://huggingface.co/models?filter=bart } class _lowerCAmelCase ( _lowercase ): A__ = 'bart' A__ = ['past_key_values'] A__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , __UpperCAmelCase=5_0265 , __UpperCAmelCase=1024 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase="gelu" , __UpperCAmelCase=1024 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.0 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=2 , **__UpperCAmelCase , ): lowerCAmelCase__ : Union[str, Any] = vocab_size lowerCAmelCase__ : Optional[Any] = max_position_embeddings lowerCAmelCase__ : int = d_model lowerCAmelCase__ : str = encoder_ffn_dim lowerCAmelCase__ : Any = encoder_layers lowerCAmelCase__ : Dict = encoder_attention_heads lowerCAmelCase__ : Union[str, Any] = decoder_ffn_dim lowerCAmelCase__ : Union[str, Any] = decoder_layers lowerCAmelCase__ : Any = decoder_attention_heads lowerCAmelCase__ : Tuple = dropout lowerCAmelCase__ : Any = attention_dropout lowerCAmelCase__ : Any = activation_dropout lowerCAmelCase__ : Optional[Any] = activation_function lowerCAmelCase__ : Union[str, Any] = init_std lowerCAmelCase__ : Union[str, Any] = encoder_layerdrop lowerCAmelCase__ : int = decoder_layerdrop lowerCAmelCase__ : Optional[int] = classifier_dropout lowerCAmelCase__ : str = use_cache lowerCAmelCase__ : int = encoder_layers lowerCAmelCase__ : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , __UpperCAmelCase ): lowerCAmelCase__ : str = self.bos_token_id warnings.warn( f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """ '''The config can simply be saved and uploaded again to be fixed.''' ) class _lowerCAmelCase ( _lowercase ): @property def __magic_name__( self ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Tuple = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: lowerCAmelCase__ : Any = {0: '''batch'''} lowerCAmelCase__ : Tuple = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: lowerCAmelCase__ : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''} lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. lowerCAmelCase__ : List[str] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.num_layers for i in range(__UpperCAmelCase ): lowerCAmelCase__ : int = {0: '''batch''', 2: '''past_sequence + sequence'''} lowerCAmelCase__ : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} else: lowerCAmelCase__ : List[str] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property def __magic_name__( self ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Dict = super().outputs else: lowerCAmelCase__ : Any = super(__UpperCAmelCase , self ).outputs if self.use_past: lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.num_layers for i in range(__UpperCAmelCase ): lowerCAmelCase__ : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): lowerCAmelCase__ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # Generate decoder inputs lowerCAmelCase__ : int = seq_length if not self.use_past else 1 lowerCAmelCase__ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()} lowerCAmelCase__ : str = dict(**__UpperCAmelCase , **__UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = common_inputs['''input_ids'''].shape lowerCAmelCase__ : List[str] = common_inputs['''decoder_input_ids'''].shape[1] lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.num_attention_heads lowerCAmelCase__ : Any = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCAmelCase__ : List[Any] = decoder_seq_length + 3 lowerCAmelCase__ : Any = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) lowerCAmelCase__ : Any = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase )] , dim=1 ) lowerCAmelCase__ : List[str] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered lowerCAmelCase__ , lowerCAmelCase__ : Any = self.num_layers lowerCAmelCase__ : Union[str, Any] = min(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Any = max(__UpperCAmelCase , __UpperCAmelCase ) - min_num_layers lowerCAmelCase__ : Dict = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(__UpperCAmelCase ): common_inputs["past_key_values"].append( ( torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), ) ) # TODO: test this. lowerCAmelCase__ : Any = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(__UpperCAmelCase , __UpperCAmelCase ): common_inputs["past_key_values"].append((torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) ) return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): lowerCAmelCase__ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowerCAmelCase__ , lowerCAmelCase__ : Any = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values lowerCAmelCase__ : List[str] = seqlen + 2 lowerCAmelCase__ , lowerCAmelCase__ : int = self.num_layers lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.num_attention_heads lowerCAmelCase__ : Optional[Any] = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCAmelCase__ : Optional[Any] = common_inputs['''attention_mask'''].dtype lowerCAmelCase__ : List[Any] = torch.cat( [common_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 ) lowerCAmelCase__ : List[Any] = [ (torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(__UpperCAmelCase ) ] return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowerCAmelCase__ : Tuple = compute_effective_axis_dimension( __UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowerCAmelCase__ : Optional[Any] = tokenizer.num_special_tokens_to_add(__UpperCAmelCase ) lowerCAmelCase__ : int = compute_effective_axis_dimension( __UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCAmelCase ) # Generate dummy inputs according to compute batch and sequence lowerCAmelCase__ : Optional[int] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size lowerCAmelCase__ : str = dict(tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase ) ) return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) elif self.task == "causal-lm": lowerCAmelCase__ : List[str] = self._generate_dummy_inputs_for_causal_lm( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) else: lowerCAmelCase__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Any = super()._flatten_past_key_values_(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) else: lowerCAmelCase__ : Dict = super(__UpperCAmelCase , self )._flatten_past_key_values_( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
678
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase = logging.get_logger(__name__) lowerCamelCase = { """bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""", """bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""", """bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""", """bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""", """bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""", """bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""", """bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""", """bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""", """bert-large-uncased-whole-word-masking""": ( """https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json""" ), """bert-large-cased-whole-word-masking""": ( """https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json""" ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( """https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json""" ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( """https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json""" ), """bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""", """bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""", """bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""", """cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""", """cl-tohoku/bert-base-japanese-whole-word-masking""": ( """https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json""" ), """cl-tohoku/bert-base-japanese-char""": ( """https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json""" ), """cl-tohoku/bert-base-japanese-char-whole-word-masking""": ( """https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json""" ), """TurkuNLP/bert-base-finnish-cased-v1""": ( """https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json""" ), """TurkuNLP/bert-base-finnish-uncased-v1""": ( """https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json""" ), """wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""", # See all BERT models at https://huggingface.co/models?filter=bert } class lowercase__ ( SCREAMING_SNAKE_CASE ): '''simple docstring''' UpperCamelCase = '''bert''' def __init__( self : Optional[Any] , _UpperCAmelCase : Optional[int]=30522 , _UpperCAmelCase : Tuple=768 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Optional[Any]=3072 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : int=512 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Optional[Any]=0.02 , _UpperCAmelCase : int=1e-12 , _UpperCAmelCase : Union[str, Any]=0 , _UpperCAmelCase : List[str]="absolute" , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Dict=None , **_UpperCAmelCase : str , ) -> int: '''simple docstring''' super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase ) UpperCAmelCase_ = vocab_size UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = hidden_act UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = max_position_embeddings UpperCAmelCase_ = type_vocab_size UpperCAmelCase_ = initializer_range UpperCAmelCase_ = layer_norm_eps UpperCAmelCase_ = position_embedding_type UpperCAmelCase_ = use_cache UpperCAmelCase_ = classifier_dropout class lowercase__ ( SCREAMING_SNAKE_CASE ): '''simple docstring''' @property def lowercase__ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"} else: UpperCAmelCase_ = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] )
82
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""", # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class _lowerCAmelCase ( _lowercase ): A__ = 'sew-d' def __init__( self , __UpperCAmelCase=32 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase=2 , __UpperCAmelCase=512 , __UpperCAmelCase=256 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=("p2c", "c2p") , __UpperCAmelCase="layer_norm" , __UpperCAmelCase="gelu_python" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-7 , __UpperCAmelCase=1e-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __UpperCAmelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __UpperCAmelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __UpperCAmelCase=False , __UpperCAmelCase=128 , __UpperCAmelCase=16 , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=10 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=10 , __UpperCAmelCase=0 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=256 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , **__UpperCAmelCase , ): super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = hidden_size lowerCAmelCase__ : Optional[int] = feat_extract_norm lowerCAmelCase__ : str = feat_extract_activation lowerCAmelCase__ : int = list(__UpperCAmelCase ) lowerCAmelCase__ : int = list(__UpperCAmelCase ) lowerCAmelCase__ : Any = list(__UpperCAmelCase ) lowerCAmelCase__ : int = conv_bias lowerCAmelCase__ : List[Any] = num_conv_pos_embeddings lowerCAmelCase__ : Optional[int] = num_conv_pos_embedding_groups lowerCAmelCase__ : int = len(self.conv_dim ) lowerCAmelCase__ : Union[str, Any] = num_hidden_layers lowerCAmelCase__ : Any = intermediate_size lowerCAmelCase__ : int = squeeze_factor lowerCAmelCase__ : int = max_position_embeddings lowerCAmelCase__ : Any = position_buckets lowerCAmelCase__ : Optional[int] = share_att_key lowerCAmelCase__ : Tuple = relative_attention lowerCAmelCase__ : Optional[int] = norm_rel_ebd lowerCAmelCase__ : Tuple = list(__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = hidden_act lowerCAmelCase__ : Any = num_attention_heads lowerCAmelCase__ : Optional[int] = hidden_dropout lowerCAmelCase__ : Union[str, Any] = attention_dropout lowerCAmelCase__ : str = activation_dropout lowerCAmelCase__ : List[Any] = feat_proj_dropout lowerCAmelCase__ : Any = final_dropout lowerCAmelCase__ : Optional[int] = layer_norm_eps lowerCAmelCase__ : List[str] = feature_layer_norm_eps lowerCAmelCase__ : Tuple = initializer_range lowerCAmelCase__ : Tuple = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)""" f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCAmelCase__ : Tuple = apply_spec_augment lowerCAmelCase__ : List[str] = mask_time_prob lowerCAmelCase__ : int = mask_time_length lowerCAmelCase__ : int = mask_time_min_masks lowerCAmelCase__ : Optional[int] = mask_feature_prob lowerCAmelCase__ : int = mask_feature_length lowerCAmelCase__ : int = mask_feature_min_masks # ctc loss lowerCAmelCase__ : Optional[Any] = ctc_loss_reduction lowerCAmelCase__ : Any = ctc_zero_infinity # sequence classification lowerCAmelCase__ : Tuple = use_weighted_layer_sum lowerCAmelCase__ : Dict = classifier_proj_size @property def __magic_name__( self ): return functools.reduce(operator.mul , self.conv_stride , 1 )
678
0
"""simple docstring""" from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowerCAmelCase__ = logging.get_logger(__name__) class __snake_case ( _lowercase): snake_case__ : Dict = ["audio_values", "audio_mask"] def __init__( self : List[str] , __lowerCAmelCase : Optional[Any]=2_0_4_8 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Dict=[1_6, 1_6] , __lowerCAmelCase : Optional[Any]=1_2_8 , __lowerCAmelCase : Optional[int]=4_4_1_0_0 , __lowerCAmelCase : Optional[Any]=8_6 , __lowerCAmelCase : Dict=2_0_4_8 , __lowerCAmelCase : Tuple=0.0 , **__lowerCAmelCase : Dict , ): """simple docstring""" super().__init__( feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , **__lowerCAmelCase , ) _lowerCamelCase : str = spectrogram_length _lowerCamelCase : List[str] = num_channels _lowerCamelCase : List[str] = patch_size _lowerCamelCase : Optional[Any] = feature_size // self.patch_size[1] _lowerCamelCase : Any = n_fft _lowerCamelCase : int = sampling_rate // hop_length_to_sampling_rate _lowerCamelCase : Optional[int] = sampling_rate _lowerCamelCase : Optional[Any] = padding_value _lowerCamelCase : str = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=__lowerCAmelCase , norm='''slaney''' , mel_scale='''slaney''' , ).T def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : np.array ): """simple docstring""" _lowerCamelCase : Tuple = spectrogram( __lowerCAmelCase , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , ) _lowerCamelCase : Union[str, Any] = log_spec[:, :-1] _lowerCamelCase : Optional[Any] = log_spec - 20.0 _lowerCamelCase : List[Any] = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self : Optional[Any] , __lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : Optional[bool] = True , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , **__lowerCAmelCase : List[str] , ): """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( '''This feature extractor is set to support sampling rate''' f''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled''' f''' with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) _lowerCamelCase : Union[str, Any] = isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) _lowerCamelCase : List[str] = is_batched_numpy or ( isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _lowerCamelCase : str = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ): _lowerCamelCase : Any = np.asarray(__lowerCAmelCase , dtype=np.floataa ) elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): _lowerCamelCase : Union[str, Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: _lowerCamelCase : Tuple = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis _lowerCamelCase : int = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , __lowerCAmelCase ): _lowerCamelCase : Union[str, Any] = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask _lowerCamelCase : List[Any] = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: _lowerCamelCase : Any = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] _lowerCamelCase : str = np.array(__lowerCAmelCase ).astype(np.floataa ) # convert into correct format for padding _lowerCamelCase : List[str] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch _lowerCamelCase : Optional[int] = np.ones([len(__lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) _lowerCamelCase : int = padded_audio_features * self.padding_value for i in range(len(__lowerCAmelCase ) ): _lowerCamelCase : List[str] = audio_features[i] _lowerCamelCase : Optional[Any] = feature # return as BatchFeature if return_attention_mask: _lowerCamelCase : Union[str, Any] = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask} else: _lowerCamelCase : Any = {'''audio_values''': padded_audio_features} _lowerCamelCase : Optional[int] = BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase ) return encoded_inputs
83
import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase_ = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""") @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( _lowercase , unittest.TestCase ): A__ = PegasusTokenizer A__ = PegasusTokenizerFast A__ = True A__ = True def __magic_name__( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ : Union[str, Any] = PegasusTokenizer(__UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __magic_name__( self ): return PegasusTokenizer.from_pretrained('''google/pegasus-large''' ) def __magic_name__( self , **__UpperCAmelCase ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __magic_name__( self , __UpperCAmelCase ): return ("This is a test", "This is a test") def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = '''</s>''' lowerCAmelCase__ : Optional[int] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Tuple = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''</s>''' ) self.assertEqual(vocab_keys[-1] , '''v''' ) self.assertEqual(len(__UpperCAmelCase ) , 1103 ) def __magic_name__( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1103 ) def __magic_name__( self ): lowerCAmelCase__ : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : Tuple = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : int = ( '''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important''' ''' </s> <pad> <pad> <pad>''' ) lowerCAmelCase__ : Any = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] lowerCAmelCase__ : Dict = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Any = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word lowerCAmelCase__ : List[str] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.''' lowerCAmelCase__ : Tuple = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1] lowerCAmelCase__ : Tuple = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Dict = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_6103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 lowerCAmelCase__ : str = '''To ensure a smooth flow of bank resolutions.''' lowerCAmelCase__ : int = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1] lowerCAmelCase__ : List[Any] = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = ['''This is going to be way too long.''' * 150, '''short example'''] lowerCAmelCase__ : List[str] = ['''not super long but more than 5 tokens''', '''tiny'''] lowerCAmelCase__ : Tuple = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) lowerCAmelCase__ : Optional[int] = self._large_tokenizer( text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask. @slow def __magic_name__( self ): # fmt: off lowerCAmelCase__ : Optional[int] = {'''input_ids''': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCAmelCase , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , ) @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( _lowercase , unittest.TestCase ): A__ = PegasusTokenizer A__ = PegasusTokenizerFast A__ = True A__ = True def __magic_name__( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ : List[Any] = PegasusTokenizer(__UpperCAmelCase , offset=0 , mask_token_sent=__UpperCAmelCase , mask_token='''[MASK]''' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __magic_name__( self ): return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' ) def __magic_name__( self , **__UpperCAmelCase ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __magic_name__( self , __UpperCAmelCase ): return ("This is a test", "This is a test") def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : int = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : str = ( '''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>''' ''' <pad> <pad> <pad>''' ) lowerCAmelCase__ : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] lowerCAmelCase__ : int = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) @require_torch def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = ['''This is going to be way too long.''' * 1000, '''short example'''] lowerCAmelCase__ : int = ['''not super long but more than 5 tokens''', '''tiny'''] lowerCAmelCase__ : Tuple = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) lowerCAmelCase__ : Tuple = self._large_tokenizer( text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask. def __magic_name__( self ): lowerCAmelCase__ : List[str] = ( '''This is an example string that is used to test the original TF implementation against the HF''' ''' implementation''' ) lowerCAmelCase__ : Union[str, Any] = self._large_tokenizer(__UpperCAmelCase ).input_ids self.assertListEqual( __UpperCAmelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
678
0
from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { '''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''', } class A_ ( __lowerCamelCase ): '''simple docstring''' _UpperCamelCase : int = """efficientnet""" def __init__( self , snake_case = 3 , snake_case = 600 , snake_case = 2.0 , snake_case = 3.1 , snake_case = 8 , snake_case = [3, 3, 5, 3, 5, 5, 3] , snake_case = [32, 16, 24, 40, 80, 112, 192] , snake_case = [16, 24, 40, 80, 112, 192, 320] , snake_case = [] , snake_case = [1, 2, 2, 2, 1, 2, 1] , snake_case = [1, 2, 2, 3, 3, 4, 1] , snake_case = [1, 6, 6, 6, 6, 6, 6] , snake_case = 0.25 , snake_case = "swish" , snake_case = 2560 , snake_case = "mean" , snake_case = 0.02 , snake_case = 0.001 , snake_case = 0.99 , snake_case = 0.5 , snake_case = 0.2 , **snake_case , ): super().__init__(**snake_case ) lowercase = num_channels lowercase = image_size lowercase = width_coefficient lowercase = depth_coefficient lowercase = depth_divisor lowercase = kernel_sizes lowercase = in_channels lowercase = out_channels lowercase = depthwise_padding lowercase = strides lowercase = num_block_repeats lowercase = expand_ratios lowercase = squeeze_expansion_ratio lowercase = hidden_act lowercase = hidden_dim lowercase = pooling_type lowercase = initializer_range lowercase = batch_norm_eps lowercase = batch_norm_momentum lowercase = dropout_rate lowercase = drop_connect_rate lowercase = sum(snake_case ) * 4 class A_ ( __lowerCamelCase ): '''simple docstring''' _UpperCamelCase : int = version.parse("""1.11""" ) @property def SCREAMING_SNAKE_CASE__ ( self ): return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def SCREAMING_SNAKE_CASE__ ( self ): return 1E-5
84
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""", # See all Donut models at https://huggingface.co/models?filter=donut-swin } class _lowerCAmelCase ( _lowercase ): A__ = 'donut-swin' A__ = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[3, 6, 12, 24] , __UpperCAmelCase=7 , __UpperCAmelCase=4.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , **__UpperCAmelCase , ): super().__init__(**__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = image_size lowerCAmelCase__ : List[str] = patch_size lowerCAmelCase__ : int = num_channels lowerCAmelCase__ : Optional[Any] = embed_dim lowerCAmelCase__ : int = depths lowerCAmelCase__ : Dict = len(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = num_heads lowerCAmelCase__ : Dict = window_size lowerCAmelCase__ : str = mlp_ratio lowerCAmelCase__ : Optional[int] = qkv_bias lowerCAmelCase__ : Any = hidden_dropout_prob lowerCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob lowerCAmelCase__ : List[str] = drop_path_rate lowerCAmelCase__ : Tuple = hidden_act lowerCAmelCase__ : List[str] = use_absolute_embeddings lowerCAmelCase__ : Dict = layer_norm_eps lowerCAmelCase__ : Any = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCAmelCase__ : List[Any] = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
678
0
from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class snake_case ( UpperCamelCase_ ): def __init__( self : Optional[int] , a_ : TransformeraDModel , a_ : AutoencoderKL , a_ : KarrasDiffusionSchedulers , a_ : Optional[Dict[int, str]] = None , )-> Union[str, Any]: """simple docstring""" super().__init__() self.register_modules(transformer=a_ , vae=a_ , scheduler=a_ ) # create a imagenet -> id dictionary for easier use SCREAMING_SNAKE_CASE__ : Dict = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split(',' ): SCREAMING_SNAKE_CASE__ : List[Any] = int(a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = dict(sorted(self.labels.items() ) ) def __lowercase( self : Dict , a_ : Union[str, List[str]] )-> List[int]: """simple docstring""" if not isinstance(a_ , a_ ): SCREAMING_SNAKE_CASE__ : List[Any] = list(a_ ) for l in label: if l not in self.labels: raise ValueError( F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' ) return [self.labels[l] for l in label] @torch.no_grad() def __call__( self : Optional[Any] , a_ : List[int] , a_ : float = 4.0 , a_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a_ : int = 50 , a_ : Optional[str] = "pil" , a_ : bool = True , )-> Union[ImagePipelineOutput, Tuple]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = len(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = self.transformer.config.sample_size SCREAMING_SNAKE_CASE__ : int = self.transformer.config.in_channels SCREAMING_SNAKE_CASE__ : Union[str, Any] = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=a_ , device=self.device , dtype=self.transformer.dtype , ) SCREAMING_SNAKE_CASE__ : str = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents SCREAMING_SNAKE_CASE__ : int = torch.tensor(a_ , device=self.device ).reshape(-1 ) SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([1000] * batch_size , device=self.device ) SCREAMING_SNAKE_CASE__ : Tuple = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(a_ ) for t in self.progress_bar(self.scheduler.timesteps ): if guidance_scale > 1: SCREAMING_SNAKE_CASE__ : List[Any] = latent_model_input[: len(a_ ) // 2] SCREAMING_SNAKE_CASE__ : Dict = torch.cat([half, half] , dim=0 ) SCREAMING_SNAKE_CASE__ : Dict = self.scheduler.scale_model_input(a_ , a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = t if not torch.is_tensor(a_ ): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) SCREAMING_SNAKE_CASE__ : Optional[int] = latent_model_input.device.type == 'mps' if isinstance(a_ , a_ ): SCREAMING_SNAKE_CASE__ : Optional[int] = torch.floataa if is_mps else torch.floataa else: SCREAMING_SNAKE_CASE__ : int = torch.intaa if is_mps else torch.intaa SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor([timesteps] , dtype=a_ , device=latent_model_input.device ) elif len(timesteps.shape ) == 0: SCREAMING_SNAKE_CASE__ : Union[str, Any] = timesteps[None].to(latent_model_input.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML SCREAMING_SNAKE_CASE__ : List[str] = timesteps.expand(latent_model_input.shape[0] ) # predict noise model_output SCREAMING_SNAKE_CASE__ : Tuple = self.transformer( a_ , timestep=a_ , class_labels=a_ ).sample # perform guidance if guidance_scale > 1: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.split(a_ , len(a_ ) // 2 , dim=0 ) SCREAMING_SNAKE_CASE__ : str = uncond_eps + guidance_scale * (cond_eps - uncond_eps) SCREAMING_SNAKE_CASE__ : Any = torch.cat([half_eps, half_eps] , dim=0 ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.cat([eps, rest] , dim=1 ) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = torch.split(a_ , a_ , dim=1 ) else: SCREAMING_SNAKE_CASE__ : int = noise_pred # compute previous image: x_t -> x_t-1 SCREAMING_SNAKE_CASE__ : Tuple = self.scheduler.step(a_ , a_ , a_ ).prev_sample if guidance_scale > 1: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = latent_model_input.chunk(2 , dim=0 ) else: SCREAMING_SNAKE_CASE__ : Optional[Any] = latent_model_input SCREAMING_SNAKE_CASE__ : List[str] = 1 / self.vae.config.scaling_factor * latents SCREAMING_SNAKE_CASE__ : List[Any] = self.vae.decode(a_ ).sample SCREAMING_SNAKE_CASE__ : List[str] = (samples / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 SCREAMING_SNAKE_CASE__ : Dict = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.numpy_to_pil(a_ ) if not return_dict: return (samples,) return ImagePipelineOutput(images=a_ )
85
lowerCAmelCase_ = { """meter""": """m""", """kilometer""": """km""", """megametre""": """Mm""", """gigametre""": """Gm""", """terametre""": """Tm""", """petametre""": """Pm""", """exametre""": """Em""", """zettametre""": """Zm""", """yottametre""": """Ym""", } # Exponent of the factor(meter) lowerCAmelCase_ = { """m""": 0, """km""": 3, """Mm""": 6, """Gm""": 9, """Tm""": 12, """Pm""": 15, """Em""": 18, """Zm""": 21, """Ym""": 24, } def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> float: lowerCAmelCase__ : List[str] = from_type.lower().strip('''s''' ) lowerCAmelCase__ : List[str] = to_type.lower().strip('''s''' ) lowerCAmelCase__ : str = UNIT_SYMBOL.get(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Any = UNIT_SYMBOL.get(UpperCamelCase , UpperCamelCase ) if from_sanitized not in METRIC_CONVERSION: lowerCAmelCase__ : Tuple = ( F"""Invalid 'from_type' value: {from_type!r}.\n""" F"""Conversion abbreviations are: {", ".join(UpperCamelCase )}""" ) raise ValueError(UpperCamelCase ) if to_sanitized not in METRIC_CONVERSION: lowerCAmelCase__ : List[Any] = ( F"""Invalid 'to_type' value: {to_type!r}.\n""" F"""Conversion abbreviations are: {", ".join(UpperCamelCase )}""" ) raise ValueError(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = METRIC_CONVERSION[from_sanitized] lowerCAmelCase__ : Optional[Any] = METRIC_CONVERSION[to_sanitized] lowerCAmelCase__ : int = 1 if from_exponent > to_exponent: lowerCAmelCase__ : List[str] = from_exponent - to_exponent else: lowerCAmelCase__ : Dict = -(to_exponent - from_exponent) return value * pow(10 , UpperCamelCase ) if __name__ == "__main__": from doctest import testmod testmod()
678
0
import unittest from transformers.testing_utils import require_bsa from transformers.utils import is_bsa_available from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin if is_bsa_available(): from transformers import MarkupLMFeatureExtractor class _a ( unittest.TestCase ): """simple docstring""" def __init__( self : Optional[int] , UpperCAmelCase : Union[str, Any] ): A_ = parent def __A ( self : List[str] ): return {} def __snake_case ( ): """simple docstring""" A_ = "<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>" A_ = "\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n " return [html_string_a, html_string_a] @require_bsa class _a ( snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : Dict = MarkupLMFeatureExtractor if is_bsa_available() else None def __A ( self : int ): A_ = MarkupLMFeatureExtractionTester(self ) @property def __A ( self : List[str] ): return self.feature_extract_tester.prepare_feat_extract_dict() def __A ( self : Tuple ): # Initialize feature_extractor A_ = self.feature_extraction_class() # Test not batched input A_ = get_html_strings()[0] A_ = feature_extractor(UpperCAmelCase ) # fmt: off A_ = [["sample document", "Goog", "This is one header", "This is a another Header", "Travel from", "SFO to JFK", "on May 2, 2015 at 2:00 pm. For details go to confirm.com", "Traveler", "name", "is", "John Doe"]] A_ = [["/html/head/title", "/html/body/a", "/html/body/h1", "/html/body/h2", "/html/body/p", "/html/body/p/p/b[1]", "/html/body/p/p/b[2]/i", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/b", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/p"]] # fmt: on self.assertEqual(encoding.nodes , UpperCAmelCase ) self.assertEqual(encoding.xpaths , UpperCAmelCase ) # Test batched A_ = get_html_strings() A_ = feature_extractor(UpperCAmelCase ) # fmt: off A_ = expected_nodes + [["My First Heading", "My first paragraph."]] A_ = expected_xpaths + [["/html/body/h1", "/html/body/p"]] self.assertEqual(len(encoding.nodes ) , 2 ) self.assertEqual(len(encoding.xpaths ) , 2 ) self.assertEqual(encoding.nodes , UpperCAmelCase ) self.assertEqual(encoding.xpaths , UpperCAmelCase )
86
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _lowerCAmelCase : @staticmethod def __magic_name__( *__UpperCAmelCase , **__UpperCAmelCase ): pass @is_pipeline_test @require_vision class _lowerCAmelCase ( unittest.TestCase ): @require_torch def __magic_name__( self ): lowerCAmelCase__ : int = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , ) lowerCAmelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : List[str] = image_classifier(__UpperCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__UpperCAmelCase ) , [ [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}], [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}], ] , ) lowerCAmelCase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], ] , ) @require_tf def __magic_name__( self ): lowerCAmelCase__ : List[Any] = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' ) lowerCAmelCase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : str = image_classifier(__UpperCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , ) lowerCAmelCase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], ] , ) @slow @require_torch def __magic_name__( self ): lowerCAmelCase__ : str = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , ) # This is an image of 2 cats with remotes and no planes lowerCAmelCase__ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : str = image_classifier(__UpperCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) lowerCAmelCase__ : Tuple = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , ) @slow @require_tf def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' ) # This is an image of 2 cats with remotes and no planes lowerCAmelCase__ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : Union[str, Any] = image_classifier(__UpperCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) lowerCAmelCase__ : Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , )
678
0
import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase : Optional[Any] = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) _lowerCamelCase : str = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''', F'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''', F'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''')) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""), ("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ] ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]: """simple docstring""" A__ = state_dict.pop(lowercase_ ) A__ = val def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]: """simple docstring""" A__ = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: A__ = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' ) A__ = value else: A__ = value return new_state_dict def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int: """simple docstring""" A__ = '''''' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) A__ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) A__ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict A__ = in_proj_weight[:256, :] A__ = in_proj_bias[:256] A__ = in_proj_weight[256:512, :] A__ = in_proj_bias[256:512] A__ = in_proj_weight[-256:, :] A__ = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention A__ = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" ) A__ = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict A__ = in_proj_weight[:256, :] A__ = in_proj_bias[:256] A__ = in_proj_weight[256:512, :] A__ = in_proj_bias[256:512] A__ = in_proj_weight[-256:, :] A__ = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention A__ = state_dict.pop( f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" ) A__ = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) of cross-attention to the state dict A__ = in_proj_weight_cross_attn[:256, :] A__ = in_proj_bias_cross_attn[:256] A__ = in_proj_weight_cross_attn[256:512, :] A__ = in_proj_bias_cross_attn[256:512] A__ = in_proj_weight_cross_attn[-256:, :] A__ = in_proj_bias_cross_attn[-256:] def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[Any]: """simple docstring""" A__ , A__ = image.size A__ = max(lowercase_ , lowercase_ ) A__ = 800 if '''detection''' in checkpoint_url else 1_000 A__ = target_max_size / current_max_size A__ = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]: """simple docstring""" A__ = F.to_tensor(lowercase_ ) A__ = F.normalize(lowercase_ , mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ) return image @torch.no_grad() def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]: """simple docstring""" logger.info('''Converting model...''' ) # load original state dict A__ = torch.hub.load_state_dict_from_url(lowercase_ , map_location='''cpu''' ) # rename keys for src, dest in rename_keys: rename_key(lowercase_ , lowercase_ , lowercase_ ) A__ = rename_backbone_keys(lowercase_ ) # query, key and value matrices need special treatment read_in_q_k_v(lowercase_ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them A__ = '''model.''' for key in state_dict.copy().keys(): if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ): A__ = state_dict.pop(lowercase_ ) A__ = val # create HuggingFace model and load state dict A__ = TableTransformerConfig( backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: A__ = 15 A__ = 2 A__ = {0: '''table''', 1: '''table rotated'''} A__ = idalabel A__ = {v: k for k, v in idalabel.items()} else: A__ = 125 A__ = 6 A__ = { 0: '''table''', 1: '''table column''', 2: '''table row''', 3: '''table column header''', 4: '''table projected row header''', 5: '''table spanning cell''', } A__ = idalabel A__ = {v: k for k, v in idalabel.items()} A__ = DetrImageProcessor( format='''coco_detection''' , max_size=800 if '''detection''' in checkpoint_url else 1_000 ) A__ = TableTransformerForObjectDetection(lowercase_ ) model.load_state_dict(lowercase_ ) model.eval() # verify our conversion A__ = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png''' A__ = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=lowercase_ ) A__ = Image.open(lowercase_ ).convert('''RGB''' ) A__ = normalize(resize(lowercase_ , lowercase_ ) ).unsqueeze(0 ) A__ = model(lowercase_ ) if "detection" in checkpoint_url: A__ = (1, 15, 3) A__ = torch.tensor( [[-6.78_97, -16.99_85, 6.79_37], [-8.01_86, -22.21_92, 6.96_77], [-7.31_17, -21.07_08, 7.40_55]] ) A__ = torch.tensor([[0.48_67, 0.17_67, 0.67_32], [0.67_18, 0.44_79, 0.38_30], [0.47_16, 0.17_60, 0.63_64]] ) else: A__ = (1, 125, 7) A__ = torch.tensor( [[-18.14_30, -8.32_14, 4.82_74], [-18.46_85, -7.13_61, -4.26_67], [-26.36_93, -9.34_29, -4.99_62]] ) A__ = torch.tensor([[0.49_83, 0.55_95, 0.94_40], [0.49_16, 0.63_15, 0.59_54], [0.61_08, 0.86_37, 0.11_35]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , lowercase_ , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowercase_ , atol=1E-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(lowercase_ ).mkdir(exist_ok=lowercase_ ) model.save_pretrained(lowercase_ ) image_processor.save_pretrained(lowercase_ ) if push_to_hub: # Push model to HF hub logger.info('''Pushing model to the hub...''' ) A__ = ( '''microsoft/table-transformer-detection''' if '''detection''' in checkpoint_url else '''microsoft/table-transformer-structure-recognition''' ) model.push_to_hub(lowercase_ ) image_processor.push_to_hub(lowercase_ ) if __name__ == "__main__": _lowerCamelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument( """--checkpoint_url""", default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""", type=str, choices=[ """https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""", """https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""", ], help="""URL of the Table Transformer checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) _lowerCamelCase : Optional[Any] = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
87
lowerCAmelCase_ = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]: # Return True if there is node that has not iterated. lowerCAmelCase__ : Optional[int] = [False] * len(UpperCamelCase ) lowerCAmelCase__ : Tuple = [s] lowerCAmelCase__ : Dict = True while queue: lowerCAmelCase__ : int = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = True lowerCAmelCase__ : Optional[int] = u return visited[t] def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: lowerCAmelCase__ : Any = [-1] * (len(UpperCamelCase )) lowerCAmelCase__ : List[Any] = 0 lowerCAmelCase__ : Tuple = [] lowerCAmelCase__ : Optional[int] = [i[:] for i in graph] # Record original cut, copy. while bfs(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : List[Any] = float('''Inf''' ) lowerCAmelCase__ : Dict = sink while s != source: # Find the minimum value in select path lowerCAmelCase__ : Tuple = min(UpperCamelCase , graph[parent[s]][s] ) lowerCAmelCase__ : List[Any] = parent[s] max_flow += path_flow lowerCAmelCase__ : List[Any] = sink while v != source: lowerCAmelCase__ : Dict = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowerCAmelCase__ : Optional[Any] = parent[v] for i in range(len(UpperCamelCase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
678
0
"""simple docstring""" from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = OrderedDict( [ # Base model mapping ("""albert""", """FlaxAlbertModel"""), ("""bart""", """FlaxBartModel"""), ("""beit""", """FlaxBeitModel"""), ("""bert""", """FlaxBertModel"""), ("""big_bird""", """FlaxBigBirdModel"""), ("""blenderbot""", """FlaxBlenderbotModel"""), ("""blenderbot-small""", """FlaxBlenderbotSmallModel"""), ("""clip""", """FlaxCLIPModel"""), ("""distilbert""", """FlaxDistilBertModel"""), ("""electra""", """FlaxElectraModel"""), ("""gpt-sw3""", """FlaxGPT2Model"""), ("""gpt2""", """FlaxGPT2Model"""), ("""gpt_neo""", """FlaxGPTNeoModel"""), ("""gptj""", """FlaxGPTJModel"""), ("""longt5""", """FlaxLongT5Model"""), ("""marian""", """FlaxMarianModel"""), ("""mbart""", """FlaxMBartModel"""), ("""mt5""", """FlaxMT5Model"""), ("""opt""", """FlaxOPTModel"""), ("""pegasus""", """FlaxPegasusModel"""), ("""regnet""", """FlaxRegNetModel"""), ("""resnet""", """FlaxResNetModel"""), ("""roberta""", """FlaxRobertaModel"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""), ("""roformer""", """FlaxRoFormerModel"""), ("""t5""", """FlaxT5Model"""), ("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""), ("""vit""", """FlaxViTModel"""), ("""wav2vec2""", """FlaxWav2Vec2Model"""), ("""whisper""", """FlaxWhisperModel"""), ("""xglm""", """FlaxXGLMModel"""), ("""xlm-roberta""", """FlaxXLMRobertaModel"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for pre-training mapping ("""albert""", """FlaxAlbertForPreTraining"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForPreTraining"""), ("""big_bird""", """FlaxBigBirdForPreTraining"""), ("""electra""", """FlaxElectraForPreTraining"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Masked LM mapping ("""albert""", """FlaxAlbertForMaskedLM"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForMaskedLM"""), ("""big_bird""", """FlaxBigBirdForMaskedLM"""), ("""distilbert""", """FlaxDistilBertForMaskedLM"""), ("""electra""", """FlaxElectraForMaskedLM"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("""bart""", """FlaxBartForConditionalGeneration"""), ("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""), ("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""), ("""encoder-decoder""", """FlaxEncoderDecoderModel"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""marian""", """FlaxMarianMTModel"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""pegasus""", """FlaxPegasusForConditionalGeneration"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Image-classsification ("""beit""", """FlaxBeitForImageClassification"""), ("""regnet""", """FlaxRegNetForImageClassification"""), ("""resnet""", """FlaxResNetForImageClassification"""), ("""vit""", """FlaxViTForImageClassification"""), ] ) UpperCAmelCase = OrderedDict( [ ("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Causal LM mapping ("""bart""", """FlaxBartForCausalLM"""), ("""bert""", """FlaxBertForCausalLM"""), ("""big_bird""", """FlaxBigBirdForCausalLM"""), ("""electra""", """FlaxElectraForCausalLM"""), ("""gpt-sw3""", """FlaxGPT2LMHeadModel"""), ("""gpt2""", """FlaxGPT2LMHeadModel"""), ("""gpt_neo""", """FlaxGPTNeoForCausalLM"""), ("""gptj""", """FlaxGPTJForCausalLM"""), ("""opt""", """FlaxOPTForCausalLM"""), ("""roberta""", """FlaxRobertaForCausalLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""), ("""xglm""", """FlaxXGLMForCausalLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Sequence Classification mapping ("""albert""", """FlaxAlbertForSequenceClassification"""), ("""bart""", """FlaxBartForSequenceClassification"""), ("""bert""", """FlaxBertForSequenceClassification"""), ("""big_bird""", """FlaxBigBirdForSequenceClassification"""), ("""distilbert""", """FlaxDistilBertForSequenceClassification"""), ("""electra""", """FlaxElectraForSequenceClassification"""), ("""mbart""", """FlaxMBartForSequenceClassification"""), ("""roberta""", """FlaxRobertaForSequenceClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""), ("""roformer""", """FlaxRoFormerForSequenceClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Question Answering mapping ("""albert""", """FlaxAlbertForQuestionAnswering"""), ("""bart""", """FlaxBartForQuestionAnswering"""), ("""bert""", """FlaxBertForQuestionAnswering"""), ("""big_bird""", """FlaxBigBirdForQuestionAnswering"""), ("""distilbert""", """FlaxDistilBertForQuestionAnswering"""), ("""electra""", """FlaxElectraForQuestionAnswering"""), ("""mbart""", """FlaxMBartForQuestionAnswering"""), ("""roberta""", """FlaxRobertaForQuestionAnswering"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""), ("""roformer""", """FlaxRoFormerForQuestionAnswering"""), ("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Token Classification mapping ("""albert""", """FlaxAlbertForTokenClassification"""), ("""bert""", """FlaxBertForTokenClassification"""), ("""big_bird""", """FlaxBigBirdForTokenClassification"""), ("""distilbert""", """FlaxDistilBertForTokenClassification"""), ("""electra""", """FlaxElectraForTokenClassification"""), ("""roberta""", """FlaxRobertaForTokenClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""), ("""roformer""", """FlaxRoFormerForTokenClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Multiple Choice mapping ("""albert""", """FlaxAlbertForMultipleChoice"""), ("""bert""", """FlaxBertForMultipleChoice"""), ("""big_bird""", """FlaxBigBirdForMultipleChoice"""), ("""distilbert""", """FlaxDistilBertForMultipleChoice"""), ("""electra""", """FlaxElectraForMultipleChoice"""), ("""roberta""", """FlaxRobertaForMultipleChoice"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""), ("""roformer""", """FlaxRoFormerForMultipleChoice"""), ("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""), ] ) UpperCAmelCase = OrderedDict( [ ("""bert""", """FlaxBertForNextSentencePrediction"""), ] ) UpperCAmelCase = OrderedDict( [ ("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ] ) UpperCAmelCase = OrderedDict( [ ("""whisper""", """FlaxWhisperForAudioClassification"""), ] ) UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModel) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base""" ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="""sequence classification""" ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="""token classification""" ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction""" ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForImageClassification, head_doc="""image classification""" ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling""" )
88
import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, require_torch_or_tf, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class _lowerCAmelCase ( unittest.TestCase ): A__ = MODEL_FOR_CAUSAL_LM_MAPPING A__ = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def __magic_name__( self ): lowerCAmelCase__ : Tuple = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' ) # Using `do_sample=False` to force deterministic output lowerCAmelCase__ : Optional[int] = text_generator('''This is a test''' , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ] , ) lowerCAmelCase__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] ) self.assertEqual( __UpperCAmelCase , [ [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ], [ { '''generated_text''': ( '''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy''' ''' oscope. oscope. FiliFili@@''' ) } ], ] , ) lowerCAmelCase__ : str = text_generator('''This is a test''' , do_sample=__UpperCAmelCase , num_return_sequences=2 , return_tensors=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ {'''generated_token_ids''': ANY(__UpperCAmelCase )}, {'''generated_token_ids''': ANY(__UpperCAmelCase )}, ] , ) lowerCAmelCase__ : List[Any] = text_generator.model.config.eos_token_id lowerCAmelCase__ : List[Any] = '''<pad>''' lowerCAmelCase__ : List[Any] = text_generator( ['''This is a test''', '''This is a second test'''] , do_sample=__UpperCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__UpperCAmelCase , ) self.assertEqual( __UpperCAmelCase , [ [ {'''generated_token_ids''': ANY(__UpperCAmelCase )}, {'''generated_token_ids''': ANY(__UpperCAmelCase )}, ], [ {'''generated_token_ids''': ANY(__UpperCAmelCase )}, {'''generated_token_ids''': ANY(__UpperCAmelCase )}, ], ] , ) @require_tf def __magic_name__( self ): lowerCAmelCase__ : int = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' ) # Using `do_sample=False` to force deterministic output lowerCAmelCase__ : List[Any] = text_generator('''This is a test''' , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ] , ) lowerCAmelCase__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ], [ { '''generated_text''': ( '''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes''' ''' Cannes 閲閲Cannes Cannes Cannes 攵 please,''' ) } ], ] , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Dict = TextGenerationPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) return text_generator, ["This is a test", "Another test"] def __magic_name__( self ): lowerCAmelCase__ : Any = '''Hello I believe in''' lowerCAmelCase__ : List[Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) lowerCAmelCase__ : Optional[int] = text_generator(__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , ) lowerCAmelCase__ : List[str] = text_generator(__UpperCAmelCase , stop_sequence=''' fe''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe'''}] ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = text_generator.model lowerCAmelCase__ : Optional[int] = text_generator.tokenizer lowerCAmelCase__ : Tuple = text_generator('''This is a test''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) lowerCAmelCase__ : Optional[int] = text_generator('''This is a test''' , return_full_text=__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) lowerCAmelCase__ : Dict = pipeline(task='''text-generation''' , model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , return_full_text=__UpperCAmelCase ) lowerCAmelCase__ : Dict = text_generator('''This is a test''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) lowerCAmelCase__ : List[str] = text_generator('''This is a test''' , return_full_text=__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) lowerCAmelCase__ : Optional[int] = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], ] , ) if text_generator.tokenizer.pad_token is not None: lowerCAmelCase__ : List[str] = text_generator( ['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], ] , ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Any = text_generator('''test''' , return_full_text=__UpperCAmelCase , return_text=__UpperCAmelCase ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = text_generator('''test''' , return_full_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : str = text_generator('''test''' , return_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase ) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): lowerCAmelCase__ : str = text_generator('''''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) else: with self.assertRaises((ValueError, AssertionError) ): lowerCAmelCase__ : List[str] = text_generator('''''' ) if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. lowerCAmelCase__ : Optional[Any] = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM'''] if ( tokenizer.model_max_length < 1_0000 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ): text_generator('''This is a test''' * 500 , max_new_tokens=20 ) lowerCAmelCase__ : Optional[Any] = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 ) # Hole strategy cannot work with self.assertRaises(__UpperCAmelCase ): text_generator( '''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , ) @require_torch @require_accelerate @require_torch_gpu def __magic_name__( self ): import torch # Classic `model_kwargs` lowerCAmelCase__ : List[str] = pipeline( model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowerCAmelCase__ : Any = pipe('''This is a test''' ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) lowerCAmelCase__ : Dict = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowerCAmelCase__ : Union[str, Any] = pipe('''This is a test''' ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 lowerCAmelCase__ : str = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa ) lowerCAmelCase__ : Any = pipe('''This is a test''' ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) @require_torch @require_torch_gpu def __magic_name__( self ): import torch lowerCAmelCase__ : List[str] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa ) pipe('''This is a test''' ) @require_torch @require_accelerate @require_torch_gpu def __magic_name__( self ): import torch lowerCAmelCase__ : Any = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa ) pipe('''This is a test''' , do_sample=__UpperCAmelCase , top_p=0.5 ) def __magic_name__( self ): lowerCAmelCase__ : int = '''Hello world''' lowerCAmelCase__ : Union[str, Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) if text_generator.model.framework == "tf": lowerCAmelCase__ : List[Any] = logging.get_logger('''transformers.generation.tf_utils''' ) else: lowerCAmelCase__ : Dict = logging.get_logger('''transformers.generation.utils''' ) lowerCAmelCase__ : Optional[Any] = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(__UpperCAmelCase ) as cl: lowerCAmelCase__ : List[str] = text_generator(__UpperCAmelCase , max_length=10 , max_new_tokens=1 ) self.assertIn(__UpperCAmelCase , cl.out ) # The user only sets one -> no warning with CaptureLogger(__UpperCAmelCase ) as cl: lowerCAmelCase__ : Any = text_generator(__UpperCAmelCase , max_new_tokens=1 ) self.assertNotIn(__UpperCAmelCase , cl.out ) with CaptureLogger(__UpperCAmelCase ) as cl: lowerCAmelCase__ : Union[str, Any] = text_generator(__UpperCAmelCase , max_length=10 ) self.assertNotIn(__UpperCAmelCase , cl.out )
678
0
import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger("transformers.models.speecht5") def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]: hf_model.apply_weight_norm() _lowercase : Tuple = checkpoint['input_conv.weight_g'] _lowercase : Any = checkpoint['input_conv.weight_v'] _lowercase : Tuple = checkpoint['input_conv.bias'] for i in range(len(config.upsample_rates ) ): _lowercase : Any = checkpoint[F'''upsamples.{i}.1.weight_g'''] _lowercase : Dict = checkpoint[F'''upsamples.{i}.1.weight_v'''] _lowercase : Optional[Any] = checkpoint[F'''upsamples.{i}.1.bias'''] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): _lowercase : Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g'''] _lowercase : str = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v'''] _lowercase : Dict = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias'''] _lowercase : Tuple = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g'''] _lowercase : str = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v'''] _lowercase : Tuple = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias'''] _lowercase : Union[str, Any] = checkpoint['output_conv.1.weight_g'] _lowercase : Dict = checkpoint['output_conv.1.weight_v'] _lowercase : Tuple = checkpoint['output_conv.1.bias'] hf_model.remove_weight_norm() @torch.no_grad() def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , ) -> Tuple: if config_path is not None: _lowercase : Union[str, Any] = SpeechTaHifiGanConfig.from_pretrained(lowerCamelCase_ ) else: _lowercase : str = SpeechTaHifiGanConfig() _lowercase : Any = SpeechTaHifiGan(lowerCamelCase_ ) _lowercase : Optional[int] = torch.load(lowerCamelCase_ ) load_weights(orig_checkpoint['model']['generator'] , lowerCamelCase_ , lowerCamelCase_ ) _lowercase : str = np.load(lowerCamelCase_ ) _lowercase : List[str] = stats[0].reshape(-1 ) _lowercase : List[str] = stats[1].reshape(-1 ) _lowercase : List[Any] = torch.from_numpy(lowerCamelCase_ ).float() _lowercase : int = torch.from_numpy(lowerCamelCase_ ).float() model.save_pretrained(lowerCamelCase_ ) if repo_id: print('Pushing to the hub...' ) model.push_to_hub(lowerCamelCase_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser() parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint") parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) SCREAMING_SNAKE_CASE : List[str] = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
89
def __lowerCAmelCase ( UpperCamelCase ) -> str: return "".join([hex(UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase )] ) def __lowerCAmelCase ( UpperCamelCase ) -> bytes: # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(UpperCamelCase ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(UpperCamelCase ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCamelCase ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
678
0
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {'''vocab_file''': '''sentencepiece.model'''} __UpperCAmelCase = { '''vocab_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''', }, } __UpperCAmelCase = { '''google/rembert''': 256, } class a__ ( a__ ): '''simple docstring''' lowercase__ : str = VOCAB_FILES_NAMES lowercase__ : Dict = PRETRAINED_VOCAB_FILES_MAP lowercase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_="[CLS]" , lowerCamelCase_="[SEP]" , lowerCamelCase_="[UNK]" , lowerCamelCase_="[SEP]" , lowerCamelCase_="[PAD]" , lowerCamelCase_="[CLS]" , lowerCamelCase_="[MASK]" , **lowerCamelCase_ , ) -> Tuple: super().__init__( do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , **lowerCamelCase_ , ) lowerCAmelCase__ = do_lower_case lowerCAmelCase__ = remove_space lowerCAmelCase__ = keep_accents lowerCAmelCase__ = vocab_file lowerCAmelCase__ = spm.SentencePieceProcessor() self.sp_model.Load(lowerCamelCase_ ) @property def __SCREAMING_SNAKE_CASE ( self ) -> Any: return len(self.sp_model ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: lowerCAmelCase__ = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Dict: lowerCAmelCase__ = self.__dict__.copy() lowerCAmelCase__ = None return state def __setstate__( self , lowerCamelCase_ ) -> str: lowerCAmelCase__ = d lowerCAmelCase__ = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=False ) -> int: lowerCAmelCase__ = self.sp_model.EncodeAsPieces(lowerCamelCase_ ) return pieces def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Any: return self.sp_model.PieceToId(lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Tuple: return self.sp_model.IdToPiece(lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Union[str, Any]: lowerCAmelCase__ = self.sp_model.decode_pieces(lowerCamelCase_ ) return out_string def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]: lowerCAmelCase__ = [self.sep_token_id] lowerCAmelCase__ = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False ) -> List[int]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1] return [1] + ([0] * len(lowerCamelCase_ )) + [1] def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]: lowerCAmelCase__ = [self.sep_token_id] lowerCAmelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> Tuple[str]: if not os.path.isdir(lowerCamelCase_ ): logger.error('''Vocabulary path ({}) should be a directory'''.format(lowerCamelCase_ ) ) return lowerCAmelCase__ = os.path.join( lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ): copyfile(self.vocab_file , lowerCamelCase_ ) return (out_vocab_file,)
90
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class _lowerCAmelCase ( _lowercase ): A__ = (DPMSolverSDEScheduler,) A__ = 10 def __magic_name__( self , **__UpperCAmelCase ): lowerCAmelCase__ : Dict = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''noise_sampler_seed''': 0, } config.update(**__UpperCAmelCase ) return config def __magic_name__( self ): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=__UpperCAmelCase ) def __magic_name__( self ): for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=__UpperCAmelCase , beta_end=__UpperCAmelCase ) def __magic_name__( self ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__UpperCAmelCase ) def __magic_name__( self ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[Any] = self.scheduler_classes[0] lowerCAmelCase__ : str = self.get_scheduler_config() lowerCAmelCase__ : Optional[Any] = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase__ : Union[str, Any] = self.dummy_model() lowerCAmelCase__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase__ : Optional[Any] = sample.to(__UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : int = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[str] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[str] = output.prev_sample lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2 assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2 assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def __magic_name__( self ): lowerCAmelCase__ : Dict = self.scheduler_classes[0] lowerCAmelCase__ : Tuple = self.get_scheduler_config(prediction_type='''v_prediction''' ) lowerCAmelCase__ : Tuple = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase__ : Optional[Any] = self.dummy_model() lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase__ : Tuple = sample.to(__UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase__ : List[str] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : int = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : int = output.prev_sample lowerCAmelCase__ : Any = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2 assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2 assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3 else: assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2 assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3 def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0] lowerCAmelCase__ : Tuple = self.get_scheduler_config() lowerCAmelCase__ : Tuple = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase ) lowerCAmelCase__ : Dict = self.dummy_model() lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter.to(__UpperCAmelCase ) * scheduler.init_noise_sigma for t in scheduler.timesteps: lowerCAmelCase__ : List[str] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Tuple = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = output.prev_sample lowerCAmelCase__ : Dict = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Dict = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2 assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2 assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = self.scheduler_classes[0] lowerCAmelCase__ : Dict = self.get_scheduler_config() lowerCAmelCase__ : Optional[int] = scheduler_class(**__UpperCAmelCase , use_karras_sigmas=__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = self.dummy_model() lowerCAmelCase__ : int = self.dummy_sample_deter.to(__UpperCAmelCase ) * scheduler.init_noise_sigma lowerCAmelCase__ : Union[str, Any] = sample.to(__UpperCAmelCase ) for t in scheduler.timesteps: lowerCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Tuple = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Dict = output.prev_sample lowerCAmelCase__ : int = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 else: assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
678
0
"""simple docstring""" # Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union _lowercase = re.compile(r'''^(?P<major>\d+)''' r'''\.(?P<minor>\d+)''' r'''\.(?P<patch>\d+)$''') @total_ordering @dataclass class lowerCAmelCase_ : '''simple docstring''' _lowerCamelCase: str _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[Union[str, int]] = None _lowerCamelCase: Optional[Union[str, int]] = None _lowerCamelCase: Optional[Union[str, int]] = None def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]: A , A , A = _str_to_version_tuple(self.version_str ) def __repr__( self : Optional[Any] ) -> Tuple: return F'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}' @property def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: return self.major, self.minor, self.patch def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[str] ) -> Any: if isinstance(A_ ,A_ ): return Version(A_ ) elif isinstance(A_ ,A_ ): return other raise TypeError(F'{other} (type {type(A_ )}) cannot be compared to version.' ) def __eq__( self : Union[str, Any] ,A_ : Optional[int] ) -> List[Any]: try: A = self._validate_operand(A_ ) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self : Optional[int] ,A_ : Optional[int] ) -> Any: A = self._validate_operand(A_ ) return self.tuple < other.tuple def __hash__( self : Union[str, Any] ) -> List[str]: return hash(_version_tuple_to_str(self.tuple ) ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] ,A_ : str ) -> Optional[int]: A = {f.name for f in dataclasses.fields(cls )} return cls(**{k: v for k, v in dic.items() if k in field_names} ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: return self.version_str def _snake_case ( snake_case__ : Optional[int] ): A = _VERSION_REG.match(snake_case__ ) if not res: raise ValueError(F'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' ) return tuple(int(snake_case__ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] ) def _snake_case ( snake_case__ : Optional[Any] ): return ".".join(str(snake_case__ ) for v in version_tuple )
91
import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class _lowerCAmelCase ( unittest.TestCase ): def __magic_name__( self , __UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = 3 lowerCAmelCase__ : Tuple = 250 lowerCAmelCase__ : List[Any] = ids_tensor((batch_size, length) , __UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = torch.ones((batch_size, length) , device=__UpperCAmelCase , dtype=torch.float ) / length return input_ids, scores def __magic_name__( self ): lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._get_tensors(5 ) lowerCAmelCase__ : List[str] = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._get_tensors(9 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._get_tensors(10 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : Tuple = MaxLengthCriteria(max_length=10 ) lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self._get_tensors(5 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._get_tensors(9 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(10 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(5 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(9 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._get_tensors(10 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ : int = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 10 ) def __magic_name__( self ): lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._get_tensors(5 ) lowerCAmelCase__ : Any = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ : int = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 ) with self.assertWarns(__UpperCAmelCase ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 ) lowerCAmelCase__ : List[str] = validate_stopping_criteria(StoppingCriteriaList() , 11 ) self.assertEqual(len(__UpperCAmelCase ) , 1 )
678
0
'''simple docstring''' import numpy as np def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : str ) -> List[str]: lowercase : str =int(np.ceil((x_end - xa) / h ) ) lowercase : Optional[Any] =np.zeros((n + 1,) ) lowercase : Optional[int] =ya lowercase : List[Any] =xa for k in range(__magic_name__ ): lowercase : str =f(__magic_name__ , y[k] ) lowercase : Optional[int] =f(x + 0.5 * h , y[k] + 0.5 * h * ka ) lowercase : List[str] =f(x + 0.5 * h , y[k] + 0.5 * h * ka ) lowercase : Optional[int] =f(x + h , y[k] + h * ka ) lowercase : List[Any] =y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka) x += h return y if __name__ == "__main__": import doctest doctest.testmod()
92
from functools import reduce lowerCAmelCase_ = ( """73167176531330624919225119674426574742355349194934""" """96983520312774506326239578318016984801869478851843""" """85861560789112949495459501737958331952853208805511""" """12540698747158523863050715693290963295227443043557""" """66896648950445244523161731856403098711121722383113""" """62229893423380308135336276614282806444486645238749""" """30358907296290491560440772390713810515859307960866""" """70172427121883998797908792274921901699720888093776""" """65727333001053367881220235421809751254540594752243""" """52584907711670556013604839586446706324415722155397""" """53697817977846174064955149290862569321978468622482""" """83972241375657056057490261407972968652414535100474""" """82166370484403199890008895243450658541227588666881""" """16427171479924442928230863465674813919123162824586""" """17866458359124566529476545682848912883142607690042""" """24219022671055626321111109370544217506941658960408""" """07198403850962455444362981230987879927244284909188""" """84580156166097919133875499200524063689912560717606""" """05886116467109405077541002256983155200055935729725""" """71636269561882670428252483600823257530420752963450""" ) def __lowerCAmelCase ( UpperCamelCase = N ) -> int: return max( # mypy cannot properly interpret reduce int(reduce(lambda UpperCamelCase , UpperCamelCase : str(int(UpperCamelCase ) * int(UpperCamelCase ) ) , n[i : i + 13] ) ) for i in range(len(UpperCamelCase ) - 12 ) ) if __name__ == "__main__": print(F"""{solution() = }""")
678
0
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() __A = logging.get_logger(__name__) def __A (_SCREAMING_SNAKE_CASE ) ->YolosConfig: """simple docstring""" lowerCAmelCase__ :List[str] = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: lowerCAmelCase__ :Tuple = 192 lowerCAmelCase__ :List[str] = 768 lowerCAmelCase__ :Optional[int] = 12 lowerCAmelCase__ :int = 3 lowerCAmelCase__ :List[str] = [800, 1333] lowerCAmelCase__ :Optional[Any] = False elif yolos_name == "yolos_s_dWr": lowerCAmelCase__ :List[Any] = 330 lowerCAmelCase__ :str = 14 lowerCAmelCase__ :str = 6 lowerCAmelCase__ :Dict = 1320 elif "yolos_s" in yolos_name: lowerCAmelCase__ :int = 384 lowerCAmelCase__ :int = 1536 lowerCAmelCase__ :int = 12 lowerCAmelCase__ :List[str] = 6 elif "yolos_b" in yolos_name: lowerCAmelCase__ :Tuple = [800, 1344] lowerCAmelCase__ :List[str] = 91 lowerCAmelCase__ :Dict = 'huggingface/label-files' lowerCAmelCase__ :Union[str, Any] = 'coco-detection-id2label.json' lowerCAmelCase__ :List[str] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) lowerCAmelCase__ :Optional[Any] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowerCAmelCase__ :int = idalabel lowerCAmelCase__ :List[Any] = {v: k for k, v in idalabel.items()} return config def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) ->List[str]: """simple docstring""" for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCAmelCase__ :Optional[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.weight" ) lowerCAmelCase__ :Union[str, Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase__ :int = in_proj_weight[: config.hidden_size, :] lowerCAmelCase__ :int = in_proj_bias[: config.hidden_size] lowerCAmelCase__ :Any = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCAmelCase__ :Optional[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCAmelCase__ :Dict = in_proj_weight[-config.hidden_size :, :] lowerCAmelCase__ :List[str] = in_proj_bias[-config.hidden_size :] def __A (_SCREAMING_SNAKE_CASE ) ->str: """simple docstring""" if "backbone" in name: lowerCAmelCase__ :str = name.replace('backbone' , 'vit' ) if "cls_token" in name: lowerCAmelCase__ :str = name.replace('cls_token' , 'embeddings.cls_token' ) if "det_token" in name: lowerCAmelCase__ :Tuple = name.replace('det_token' , 'embeddings.detection_tokens' ) if "mid_pos_embed" in name: lowerCAmelCase__ :Dict = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' ) if "pos_embed" in name: lowerCAmelCase__ :List[str] = name.replace('pos_embed' , 'embeddings.position_embeddings' ) if "patch_embed.proj" in name: lowerCAmelCase__ :Optional[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "blocks" in name: lowerCAmelCase__ :List[Any] = name.replace('blocks' , 'encoder.layer' ) if "attn.proj" in name: lowerCAmelCase__ :str = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: lowerCAmelCase__ :List[Any] = name.replace('attn' , 'attention.self' ) if "norm1" in name: lowerCAmelCase__ :List[Any] = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: lowerCAmelCase__ :Union[str, Any] = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: lowerCAmelCase__ :str = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: lowerCAmelCase__ :int = name.replace('mlp.fc2' , 'output.dense' ) if "class_embed" in name: lowerCAmelCase__ :Tuple = name.replace('class_embed' , 'class_labels_classifier' ) if "bbox_embed" in name: lowerCAmelCase__ :Tuple = name.replace('bbox_embed' , 'bbox_predictor' ) if "vit.norm" in name: lowerCAmelCase__ :Tuple = name.replace('vit.norm' , 'vit.layernorm' ) return name def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->dict: """simple docstring""" for key in orig_state_dict.copy().keys(): lowerCAmelCase__ :Any = orig_state_dict.pop(_SCREAMING_SNAKE_CASE ) if "qkv" in key: lowerCAmelCase__ :str = key.split('.' ) lowerCAmelCase__ :Any = int(key_split[2] ) lowerCAmelCase__ :Union[str, Any] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: lowerCAmelCase__ :Dict = val[:dim, :] lowerCAmelCase__ :Optional[Any] = val[ dim : dim * 2, : ] lowerCAmelCase__ :Union[str, Any] = val[-dim:, :] else: lowerCAmelCase__ :Dict = val[:dim] lowerCAmelCase__ :List[Any] = val[dim : dim * 2] lowerCAmelCase__ :Any = val[-dim:] else: lowerCAmelCase__ :int = val return orig_state_dict def __A () ->torch.Tensor: """simple docstring""" lowerCAmelCase__ :Any = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowerCAmelCase__ :Union[str, Any] = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) ->Tuple: """simple docstring""" lowerCAmelCase__ :str = get_yolos_config(_SCREAMING_SNAKE_CASE ) # load original state_dict lowerCAmelCase__ :Optional[Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )['model'] # load 🤗 model lowerCAmelCase__ :Optional[Any] = YolosForObjectDetection(_SCREAMING_SNAKE_CASE ) model.eval() lowerCAmelCase__ :str = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) model.load_state_dict(_SCREAMING_SNAKE_CASE ) # Check outputs on an image, prepared by YolosImageProcessor lowerCAmelCase__ :Dict = 800 if yolos_name != 'yolos_ti' else 512 lowerCAmelCase__ :Dict = YolosImageProcessor(format='coco_detection' , size=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Optional[int] = image_processor(images=prepare_img() , return_tensors='pt' ) lowerCAmelCase__ :List[Any] = model(**_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ , lowerCAmelCase__ :List[str] = outputs.logits, outputs.pred_boxes lowerCAmelCase__ , lowerCAmelCase__ :int = None, None if yolos_name == "yolos_ti": lowerCAmelCase__ :List[str] = torch.tensor( [[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] ) lowerCAmelCase__ :Union[str, Any] = torch.tensor( [[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] ) elif yolos_name == "yolos_s_200_pre": lowerCAmelCase__ :int = torch.tensor( [[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] ) lowerCAmelCase__ :Tuple = torch.tensor( [[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] ) elif yolos_name == "yolos_s_300_pre": lowerCAmelCase__ :List[str] = torch.tensor( [[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] ) lowerCAmelCase__ :Dict = torch.tensor( [[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] ) elif yolos_name == "yolos_s_dWr": lowerCAmelCase__ :Dict = torch.tensor( [[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] ) lowerCAmelCase__ :Optional[int] = torch.tensor( [[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] ) elif yolos_name == "yolos_base": lowerCAmelCase__ :str = torch.tensor( [[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] ) lowerCAmelCase__ :Any = torch.tensor( [[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] ) else: raise ValueError(F"Unknown yolos_name: {yolos_name}" ) assert torch.allclose(logits[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: lowerCAmelCase__ :Union[str, Any] = { 'yolos_ti': 'yolos-tiny', 'yolos_s_200_pre': 'yolos-small', 'yolos_s_300_pre': 'yolos-small-300', 'yolos_s_dWr': 'yolos-small-dwr', 'yolos_base': 'yolos-base', } print('Pushing to the hub...' ) lowerCAmelCase__ :Union[str, Any] = model_mapping[yolos_name] image_processor.push_to_hub(_SCREAMING_SNAKE_CASE , organization='hustvl' ) model.push_to_hub(_SCREAMING_SNAKE_CASE , organization='hustvl' ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( """--yolos_name""", default="""yolos_s_200_pre""", type=str, help=( """Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',""" """ 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'.""" ), ) parser.add_argument( """--checkpoint_path""", default=None, type=str, help="""Path to the original state dict (.pth file).""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __A = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
93
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging lowerCAmelCase_ = logging.get_logger(__name__) def __lowerCAmelCase ( UpperCamelCase ) -> List[str]: lowerCAmelCase__ : int = R'''\w+[.]\d+''' lowerCAmelCase__ : Tuple = re.findall(UpperCamelCase , UpperCamelCase ) for pat in pats: lowerCAmelCase__ : List[str] = key.replace(UpperCamelCase , '''_'''.join(pat.split('''.''' ) ) ) return key def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: lowerCAmelCase__ : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',) if ( any('''norm''' in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowerCAmelCase__ : List[str] = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''embedding''',) return renamed_pt_tuple_key, pt_tensor # conv layer lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowerCAmelCase__ : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCAmelCase__ : List[Any] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight": lowerCAmelCase__ : str = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCAmelCase__ : Any = pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCAmelCase__ : List[Any] = pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=42 ) -> Any: # Step 1: Convert pytorch tensor to numpy lowerCAmelCase__ : Optional[Any] = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowerCAmelCase__ : Tuple = flax_model.init_weights(PRNGKey(UpperCamelCase ) ) lowerCAmelCase__ : Any = flatten_dict(UpperCamelCase ) lowerCAmelCase__ : List[Any] = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCAmelCase__ : str = rename_key(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = tuple(renamed_pt_key.split('''.''' ) ) # Correctly rename weight parameters lowerCAmelCase__ , lowerCAmelCase__ : List[str] = rename_key_and_reshape_tensor(UpperCamelCase , UpperCamelCase , UpperCamelCase ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """ F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) # also add unexpected weight so that warning is thrown lowerCAmelCase__ : List[str] = jnp.asarray(UpperCamelCase ) return unflatten_dict(UpperCamelCase )
678
0
'''simple docstring''' import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel SCREAMING_SNAKE_CASE = HfApi() SCREAMING_SNAKE_CASE = {} # fmt: off SCREAMING_SNAKE_CASE = torch.tensor([ -0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467, 1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189, -1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839, 0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557 ]) SCREAMING_SNAKE_CASE = torch.tensor([ -2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436, 1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208, -2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948, 2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365 ]) SCREAMING_SNAKE_CASE = torch.tensor([ -0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869, -0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304, -0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925, 0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943 ]) SCREAMING_SNAKE_CASE = torch.tensor([ 0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172, -0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309, 0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805, -0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505 ]) SCREAMING_SNAKE_CASE = torch.tensor([ 0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133, -0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395, 0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559, -0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386 ]) SCREAMING_SNAKE_CASE = torch.tensor([ 0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078, -0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330, 0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683, -0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431 ]) SCREAMING_SNAKE_CASE = torch.tensor([ 0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042, -0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398, 0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574, -0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390 ]) SCREAMING_SNAKE_CASE = torch.tensor([ 0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042, -0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290, 0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746, -0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473 ]) SCREAMING_SNAKE_CASE = torch.tensor([ -1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330, 1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243, -2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810, 1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251]) SCREAMING_SNAKE_CASE = torch.tensor([ -1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324, 0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181, -2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259, 1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266 ]) SCREAMING_SNAKE_CASE = torch.tensor([ -1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212, 0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027, -2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131, 1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355 ]) SCREAMING_SNAKE_CASE = torch.tensor([ -2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959, 1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351, -3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341, 3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066 ]) SCREAMING_SNAKE_CASE = torch.tensor([ -2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740, 1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398, -2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395, 2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243 ]) SCREAMING_SNAKE_CASE = torch.tensor([ -2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336, 1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908, -3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560, 3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343 ]) SCREAMING_SNAKE_CASE = torch.tensor([ -1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344, 1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391, -2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439, 1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219 ]) # fmt: on SCREAMING_SNAKE_CASE = api.list_models(filter='diffusers') for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": SCREAMING_SNAKE_CASE = '/home/patrick/google_checkpoints/' + mod.modelId.split('/')[-1] print(f"""Started running {mod.modelId}!!!""") if mod.modelId.startswith('CompVis'): SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(local_checkpoint, subfolder='unet') else: SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) SCREAMING_SNAKE_CASE = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) SCREAMING_SNAKE_CASE = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): SCREAMING_SNAKE_CASE = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results['_'.join('_'.join(mod.modelId.split('/')).split('-'))], atol=1E-3 ) print(f"""{mod.modelId} has passed successfully!!!""")
94
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase_ = { """configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ """NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""", """NezhaForNextSentencePrediction""", """NezhaForMaskedLM""", """NezhaForPreTraining""", """NezhaForMultipleChoice""", """NezhaForQuestionAnswering""", """NezhaForSequenceClassification""", """NezhaForTokenClassification""", """NezhaModel""", """NezhaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
678
0
"""simple docstring""" def snake_case ( A__ ,A__ ,A__ ,A__ ): UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = len(A__ ), len(grid[0] ) if ( min(A__ ,A__ ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) UpperCAmelCase_ : Tuple = 0 count += depth_first_search(A__ ,row + 1 ,A__ ,A__ ) count += depth_first_search(A__ ,row - 1 ,A__ ,A__ ) count += depth_first_search(A__ ,A__ ,col + 1 ,A__ ) count += depth_first_search(A__ ,A__ ,col - 1 ,A__ ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
95
import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel lowerCAmelCase_ = """0.12""" # assumed parallelism: 8 @require_flax @is_staging_test class _lowerCAmelCase ( unittest.TestCase ): @classmethod def __magic_name__( cls ): lowerCAmelCase__ : Dict = TOKEN HfFolder.save_token(__UpperCAmelCase ) @classmethod def __magic_name__( cls ): try: delete_repo(token=cls._token , repo_id='''test-model-flax''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' ) except HTTPError: pass def __magic_name__( self ): lowerCAmelCase__ : List[Any] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) lowerCAmelCase__ : List[Any] = FlaxBertModel(__UpperCAmelCase ) model.push_to_hub('''test-model-flax''' , use_auth_token=self._token ) lowerCAmelCase__ : List[Any] = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" ) lowerCAmelCase__ : Union[str, Any] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : List[Any] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : str = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id='''test-model-flax''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(__UpperCAmelCase , repo_id='''test-model-flax''' , push_to_hub=__UpperCAmelCase , use_auth_token=self._token ) lowerCAmelCase__ : Tuple = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" ) lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : Optional[int] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) lowerCAmelCase__ : Optional[Any] = FlaxBertModel(__UpperCAmelCase ) model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token ) lowerCAmelCase__ : str = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : Dict = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : Tuple = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( __UpperCAmelCase , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=__UpperCAmelCase , use_auth_token=self._token ) lowerCAmelCase__ : Union[str, Any] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) lowerCAmelCase__ : Optional[int] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : Tuple = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCAmelCase__ : Any = True lowerCAmelCase__ : Any = flatten_dict(modela.params ) lowerCAmelCase__ : List[str] = flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4: lowerCAmelCase__ : Optional[Any] = False return models_are_equal @require_flax class _lowerCAmelCase ( unittest.TestCase ): def __magic_name__( self ): lowerCAmelCase__ : List[str] = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) lowerCAmelCase__ : List[str] = FlaxBertModel(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertTrue(check_models_equal(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : Tuple = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) lowerCAmelCase__ : Union[str, Any] = FlaxBertModel(__UpperCAmelCase ) lowerCAmelCase__ : Dict = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , max_shard_size='''10KB''' ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Tuple = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Any = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertTrue(check_models_equal(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = '''bert''' lowerCAmelCase__ : int = '''hf-internal-testing/tiny-random-bert-subfolder''' with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Dict = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[Any] = '''bert''' lowerCAmelCase__ : Tuple = '''hf-internal-testing/tiny-random-bert-sharded-subfolder''' with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase )
678
0
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __lowerCamelCase = logging.get_logger(__name__) class __A ( SCREAMING_SNAKE_CASE_ ): UpperCAmelCase__ = ["pixel_values"] def __init__( self : Any , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = PIL.Image.BICUBIC , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : Union[int, float] = 1 / 2_5_5 , __snake_case : bool = True , __snake_case : bool = True , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , **__snake_case : Optional[Any] , ) -> None: super().__init__(**__snake_case ) __magic_name__: Optional[Any] = size if size is not None else {"""height""": 2_5_6, """width""": 2_5_6} __magic_name__: Dict = get_size_dict(__snake_case ) __magic_name__: Tuple = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4} __magic_name__: Optional[Any] = get_size_dict(__snake_case , param_name="""crop_size""" ) __magic_name__: Any = do_resize __magic_name__: Optional[Any] = size __magic_name__: Union[str, Any] = resample __magic_name__: List[Any] = do_center_crop __magic_name__: Union[str, Any] = crop_size __magic_name__: int = do_rescale __magic_name__: Optional[Any] = rescale_factor __magic_name__: Any = do_normalize __magic_name__: List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __magic_name__: int = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowerCamelCase__ ( self : int , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : PILImageResampling = PIL.Image.BICUBIC , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : int , ) -> np.ndarray: __magic_name__: List[str] = get_size_dict(__snake_case ) if "height" not in size or "width" not in size: raise ValueError(F'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' ) return resize( __snake_case , size=(size["""height"""], size["""width"""]) , resample=__snake_case , data_format=__snake_case , **__snake_case ) def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : List[str] , ) -> np.ndarray: __magic_name__: Optional[Any] = get_size_dict(__snake_case ) if "height" not in size or "width" not in size: raise ValueError(F'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' ) return center_crop(__snake_case , size=(size["""height"""], size["""width"""]) , data_format=__snake_case , **__snake_case ) def lowerCamelCase__ ( self : Any , __snake_case : np.ndarray , __snake_case : Union[int, float] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Union[str, Any] , ) -> Any: return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case ) def lowerCamelCase__ ( self : Optional[Any] , __snake_case : np.ndarray , __snake_case : Union[float, List[float]] , __snake_case : Union[float, List[float]] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Optional[Any] , ) -> np.ndarray: return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case ) def lowerCamelCase__ ( self : Tuple , __snake_case : ImageInput , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : Dict=None , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : bool = None , __snake_case : float = None , __snake_case : bool = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[str, TensorType]] = None , __snake_case : ChannelDimension = ChannelDimension.FIRST , **__snake_case : List[Any] , ) -> PIL.Image.Image: __magic_name__: Optional[int] = do_resize if do_resize is not None else self.do_resize __magic_name__: Dict = resample if resample is not None else self.resample __magic_name__: Dict = do_center_crop if do_center_crop is not None else self.do_center_crop __magic_name__: Any = do_rescale if do_rescale is not None else self.do_rescale __magic_name__: Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__: str = do_normalize if do_normalize is not None else self.do_normalize __magic_name__: Any = image_mean if image_mean is not None else self.image_mean __magic_name__: Dict = image_std if image_std is not None else self.image_std __magic_name__: Optional[Any] = size if size is not None else self.size __magic_name__: List[str] = get_size_dict(__snake_case ) __magic_name__: int = crop_size if crop_size is not None else self.crop_size __magic_name__: str = get_size_dict(__snake_case , param_name="""crop_size""" ) __magic_name__: Optional[Any] = make_list_of_images(__snake_case ) if not valid_images(__snake_case ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. __magic_name__: Optional[Any] = [to_numpy_array(__snake_case ) for image in images] if do_resize: __magic_name__: str = [self.resize(image=__snake_case , size=__snake_case , resample=__snake_case ) for image in images] if do_center_crop: __magic_name__: List[str] = [self.center_crop(image=__snake_case , size=__snake_case ) for image in images] if do_rescale: __magic_name__: Union[str, Any] = [self.rescale(image=__snake_case , scale=__snake_case ) for image in images] if do_normalize: __magic_name__: Optional[int] = [self.normalize(image=__snake_case , mean=__snake_case , std=__snake_case ) for image in images] __magic_name__: List[str] = [to_channel_dimension_format(__snake_case , __snake_case ) for image in images] __magic_name__: Optional[int] = {"""pixel_values""": images} return BatchFeature(data=__snake_case , tensor_type=__snake_case )
96
from random import randint from tempfile import TemporaryFile import numpy as np def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any: lowerCAmelCase__ : Optional[Any] = 0 if start < end: lowerCAmelCase__ : Union[str, Any] = randint(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : List[str] = a[end] lowerCAmelCase__ : List[Any] = a[pivot] lowerCAmelCase__ : str = temp lowerCAmelCase__ , lowerCAmelCase__ : Tuple = _in_place_partition(UpperCamelCase , UpperCamelCase , UpperCamelCase ) count += _in_place_quick_sort(UpperCamelCase , UpperCamelCase , p - 1 ) count += _in_place_quick_sort(UpperCamelCase , p + 1 , UpperCamelCase ) return count def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: lowerCAmelCase__ : List[str] = 0 lowerCAmelCase__ : List[Any] = randint(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : str = a[end] lowerCAmelCase__ : Optional[int] = a[pivot] lowerCAmelCase__ : List[str] = temp lowerCAmelCase__ : str = start - 1 for index in range(UpperCamelCase , UpperCamelCase ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value lowerCAmelCase__ : List[str] = new_pivot_index + 1 lowerCAmelCase__ : int = a[new_pivot_index] lowerCAmelCase__ : int = a[index] lowerCAmelCase__ : Tuple = temp lowerCAmelCase__ : Optional[Any] = a[new_pivot_index + 1] lowerCAmelCase__ : List[str] = a[end] lowerCAmelCase__ : Union[str, Any] = temp return new_pivot_index + 1, count lowerCAmelCase_ = TemporaryFile() lowerCAmelCase_ = 1_00 # 1000 elements are to be sorted lowerCAmelCase_ , lowerCAmelCase_ = 0, 1 # mean and standard deviation lowerCAmelCase_ = np.random.normal(mu, sigma, p) np.save(outfile, X) print("""The array is""") print(X) outfile.seek(0) # using the same array lowerCAmelCase_ = np.load(outfile) lowerCAmelCase_ = len(M) - 1 lowerCAmelCase_ = _in_place_quick_sort(M, 0, r) print( """No of Comparisons for 100 elements selected from a standard normal distribution""" """is :""" ) print(z)
678
0
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { 'microsoft/unispeech-sat-base-100h-libri-ft': ( 'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json' ), # See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat } class lowercase__( UpperCAmelCase ): """simple docstring""" a :Any = 'unispeech-sat' def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Any=3_2 , SCREAMING_SNAKE_CASE_ : Any=7_6_8 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_2 , SCREAMING_SNAKE_CASE_ : int=1_2 , SCREAMING_SNAKE_CASE_ : str=3_0_7_2 , SCREAMING_SNAKE_CASE_ : Dict="gelu" , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : Any=0.1 , SCREAMING_SNAKE_CASE_ : Tuple=0.1 , SCREAMING_SNAKE_CASE_ : Tuple=0.0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE_ : Tuple=0.1 , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=0.02 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1e-5 , SCREAMING_SNAKE_CASE_ : Union[str, Any]="group" , SCREAMING_SNAKE_CASE_ : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE_ : List[str]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , SCREAMING_SNAKE_CASE_ : Tuple=(5, 2, 2, 2, 2, 2, 2) , SCREAMING_SNAKE_CASE_ : List[str]=(1_0, 3, 3, 3, 3, 2, 2) , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , SCREAMING_SNAKE_CASE_ : Any=1_2_8 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_6 , SCREAMING_SNAKE_CASE_ : Dict=False , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Tuple=0.05 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_0 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : Tuple=0.0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_0 , SCREAMING_SNAKE_CASE_ : int=0 , SCREAMING_SNAKE_CASE_ : Tuple=3_2_0 , SCREAMING_SNAKE_CASE_ : Dict=2 , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=1_0_0 , SCREAMING_SNAKE_CASE_ : str=2_5_6 , SCREAMING_SNAKE_CASE_ : List[str]=2_5_6 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : Dict="mean" , SCREAMING_SNAKE_CASE_ : Dict=False , SCREAMING_SNAKE_CASE_ : Dict=False , SCREAMING_SNAKE_CASE_ : Tuple=2_5_6 , SCREAMING_SNAKE_CASE_ : Optional[int]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , SCREAMING_SNAKE_CASE_ : List[Any]=(5, 3, 3, 1, 1) , SCREAMING_SNAKE_CASE_ : Dict=(1, 2, 3, 1, 1) , SCREAMING_SNAKE_CASE_ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE_ : List[Any]=0 , SCREAMING_SNAKE_CASE_ : List[Any]=1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : List[str]=5_0_4 , **SCREAMING_SNAKE_CASE_ : Dict , ) -> Any: super().__init__(**SCREAMING_SNAKE_CASE_ , pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ ) lowercase_ = hidden_size lowercase_ = feat_extract_norm lowercase_ = feat_extract_activation lowercase_ = list(SCREAMING_SNAKE_CASE_ ) lowercase_ = list(SCREAMING_SNAKE_CASE_ ) lowercase_ = list(SCREAMING_SNAKE_CASE_ ) lowercase_ = conv_bias lowercase_ = num_conv_pos_embeddings lowercase_ = num_conv_pos_embedding_groups lowercase_ = len(self.conv_dim ) lowercase_ = num_hidden_layers lowercase_ = intermediate_size lowercase_ = hidden_act lowercase_ = num_attention_heads lowercase_ = hidden_dropout lowercase_ = attention_dropout lowercase_ = activation_dropout lowercase_ = feat_proj_dropout lowercase_ = final_dropout lowercase_ = layerdrop lowercase_ = layer_norm_eps lowercase_ = initializer_range lowercase_ = vocab_size lowercase_ = num_clusters lowercase_ = do_stable_layer_norm lowercase_ = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowercase_ = apply_spec_augment lowercase_ = mask_time_prob lowercase_ = mask_time_length lowercase_ = mask_time_min_masks lowercase_ = mask_feature_prob lowercase_ = mask_feature_length lowercase_ = mask_feature_min_masks # parameters for pretraining with codevector quantized representations lowercase_ = num_codevectors_per_group lowercase_ = num_codevector_groups lowercase_ = contrastive_logits_temperature lowercase_ = feat_quantizer_dropout lowercase_ = num_negatives lowercase_ = codevector_dim lowercase_ = proj_codevector_dim lowercase_ = diversity_loss_weight # ctc loss lowercase_ = ctc_loss_reduction lowercase_ = ctc_zero_infinity # SequenceClassification-specific parameter. Feel free to ignore for other classes. lowercase_ = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. lowercase_ = list(SCREAMING_SNAKE_CASE_ ) lowercase_ = list(SCREAMING_SNAKE_CASE_ ) lowercase_ = list(SCREAMING_SNAKE_CASE_ ) lowercase_ = xvector_output_dim @property def _lowercase ( self : Optional[Any] ) -> Any: return functools.reduce(operator.mul , self.conv_stride , 1 )
97
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[int]: assert isinstance(UpperCamelCase , UpperCamelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]: lowerCAmelCase__ : List[str] = tmp_path / '''cache''' lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase__ : List[Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]: lowerCAmelCase__ : str = tmp_path / '''cache''' lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : str = features.copy() if features else default_expected_features lowerCAmelCase__ : List[Any] = ( Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: lowerCAmelCase__ : str = tmp_path / '''cache''' lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , split=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str: if issubclass(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : Any = parquet_path elif issubclass(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : Any = [parquet_path] lowerCAmelCase__ : int = tmp_path / '''cache''' lowerCAmelCase__ : str = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=("train",) ) -> str: assert isinstance(UpperCamelCase , UpperCamelCase ) for split in splits: lowerCAmelCase__ : str = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]: lowerCAmelCase__ : Any = tmp_path / '''cache''' lowerCAmelCase__ : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase__ : Optional[Any] = ParquetDatasetReader( {'''train''': parquet_path} , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read() _check_parquet_datasetdict(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: lowerCAmelCase__ : Any = tmp_path / '''cache''' lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : Tuple = features.copy() if features else default_expected_features lowerCAmelCase__ : Optional[int] = ( Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase__ : List[str] = ParquetDatasetReader({'''train''': parquet_path} , features=UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_datasetdict(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: if split: lowerCAmelCase__ : Tuple = {split: parquet_path} else: lowerCAmelCase__ : int = '''train''' lowerCAmelCase__ : List[Any] = {'''train''': parquet_path, '''test''': parquet_path} lowerCAmelCase__ : Optional[int] = tmp_path / '''cache''' lowerCAmelCase__ : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : List[str] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_datasetdict(UpperCamelCase , UpperCamelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCAmelCase__ : Optional[Any] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' ) assert writer.write() > 0 lowerCAmelCase__ : Union[str, Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' ) lowerCAmelCase__ : int = pf.read() assert dataset.data.table == output_table def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCAmelCase__ : List[str] = str(shared_datadir / '''test_image_rgb.jpg''' ) lowerCAmelCase__ : Dict = {'''image''': [image_path]} lowerCAmelCase__ : int = Features({'''image''': Image()} ) lowerCAmelCase__ : Dict = Dataset.from_dict(UpperCamelCase , features=UpperCamelCase ) lowerCAmelCase__ : List[str] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' ) assert writer.write() > 0 lowerCAmelCase__ : Dict = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) ) assert dataset.features == reloaded_dataset.features lowerCAmelCase__ : int = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=UpperCamelCase ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( '''feature, expected''' , [ (Features({'''foo''': Value('''int32''' )} ), None), (Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Any: assert get_writer_batch_size(UpperCamelCase ) == expected
678
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowercase__ : Union[str, Any] = { 'configuration_mask2former': [ 'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Mask2FormerConfig', ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Tuple = ['Mask2FormerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Any = [ 'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'Mask2FormerForUniversalSegmentation', 'Mask2FormerModel', 'Mask2FormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_maskaformer import MaskaFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskaformer import ( MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskaFormerForUniversalSegmentation, MaskaFormerModel, MaskaFormerPreTrainedModel, ) else: import sys lowercase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure)
98
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""", } class _lowerCAmelCase ( _lowercase , _lowercase ): A__ = 'focalnet' def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=False , __UpperCAmelCase=[192, 384, 768, 768] , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[2, 2, 2, 2] , __UpperCAmelCase=[3, 3, 3, 3] , __UpperCAmelCase="gelu" , __UpperCAmelCase=4.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=False , __UpperCAmelCase=1e-4 , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ): super().__init__(**__UpperCAmelCase ) lowerCAmelCase__ : Any = image_size lowerCAmelCase__ : Any = patch_size lowerCAmelCase__ : Tuple = num_channels lowerCAmelCase__ : Dict = embed_dim lowerCAmelCase__ : Optional[int] = use_conv_embed lowerCAmelCase__ : Optional[int] = hidden_sizes lowerCAmelCase__ : Optional[Any] = depths lowerCAmelCase__ : Dict = focal_levels lowerCAmelCase__ : int = focal_windows lowerCAmelCase__ : Optional[Any] = hidden_act lowerCAmelCase__ : Optional[int] = mlp_ratio lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob lowerCAmelCase__ : List[Any] = drop_path_rate lowerCAmelCase__ : Tuple = use_layerscale lowerCAmelCase__ : List[Any] = layerscale_value lowerCAmelCase__ : Dict = use_post_layernorm lowerCAmelCase__ : Dict = use_post_layernorm_in_modulation lowerCAmelCase__ : Dict = normalize_modulator lowerCAmelCase__ : Union[str, Any] = initializer_range lowerCAmelCase__ : List[str] = layer_norm_eps lowerCAmelCase__ : Tuple = encoder_stride lowerCAmelCase__ : Dict = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = get_aligned_output_features_output_indices( out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
678
0
import argparse import os import re SCREAMING_SNAKE_CASE = 'src/transformers' # Pattern that looks at the indentation in a line. SCREAMING_SNAKE_CASE = re.compile(r'^(\s*)\S') # Pattern that matches `"key":" and puts `key` in group 0. SCREAMING_SNAKE_CASE = re.compile(r'^\s*"([^"]+)":') # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. SCREAMING_SNAKE_CASE = re.compile(r'^\s*_import_structure\["([^"]+)"\]') # Pattern that matches `"key",` and puts `key` in group 0. SCREAMING_SNAKE_CASE = re.compile(r'^\s*"([^"]+)",\s*$') # Pattern that matches any `[stuff]` and puts `stuff` in group 0. SCREAMING_SNAKE_CASE = re.compile(r'\[([^\]]+)\]') def a (lowerCAmelCase__ ): __a = _re_indent.search(lowerCAmelCase__ ) return "" if search is None else search.groups()[0] def a (lowerCAmelCase__ , lowerCAmelCase__="" , lowerCAmelCase__=None , lowerCAmelCase__=None ): __a = 0 __a = code.split("""\n""" ) if start_prompt is not None: while not lines[index].startswith(lowerCAmelCase__ ): index += 1 __a = ["""\n""".join(lines[:index] )] else: __a = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). __a = [lines[index]] index += 1 while index < len(lowerCAmelCase__ ) and (end_prompt is None or not lines[index].startswith(lowerCAmelCase__ )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(lowerCAmelCase__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ): current_block.append(lines[index] ) blocks.append("""\n""".join(lowerCAmelCase__ ) ) if index < len(lowerCAmelCase__ ) - 1: __a = [lines[index + 1]] index += 1 else: __a = [] else: blocks.append("""\n""".join(lowerCAmelCase__ ) ) __a = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(lowerCAmelCase__ ) > 0: blocks.append("""\n""".join(lowerCAmelCase__ ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(lowerCAmelCase__ ): blocks.append("""\n""".join(lines[index:] ) ) return blocks def a (lowerCAmelCase__ ): def _inner(lowerCAmelCase__ ): return key(lowerCAmelCase__ ).lower().replace("""_""" , """""" ) return _inner def a (lowerCAmelCase__ , lowerCAmelCase__=None ): # If no key is provided, we use a noop. def noop(lowerCAmelCase__ ): return x if key is None: __a = noop # Constants are all uppercase, they go first. __a = [obj for obj in objects if key(lowerCAmelCase__ ).isupper()] # Classes are not all uppercase but start with a capital, they go second. __a = [obj for obj in objects if key(lowerCAmelCase__ )[0].isupper() and not key(lowerCAmelCase__ ).isupper()] # Functions begin with a lowercase, they go last. __a = [obj for obj in objects if not key(lowerCAmelCase__ )[0].isupper()] __a = ignore_underscore(lowerCAmelCase__ ) return sorted(lowerCAmelCase__ , key=lowerCAmelCase__ ) + sorted(lowerCAmelCase__ , key=lowerCAmelCase__ ) + sorted(lowerCAmelCase__ , key=lowerCAmelCase__ ) def a (lowerCAmelCase__ ): # This inner function sort imports between [ ]. def _replace(lowerCAmelCase__ ): __a = match.groups()[0] if "," not in imports: return f'''[{imports}]''' __a = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: __a = keys[:-1] return "[" + ", ".join([f'''"{k}"''' for k in sort_objects(lowerCAmelCase__ )] ) + "]" __a = import_statement.split("""\n""" ) if len(lowerCAmelCase__ ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. __a = 2 if lines[1].strip() == """[""" else 1 __a = [(i, _re_strip_line.search(lowerCAmelCase__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] __a = sort_objects(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x[1] ) __a = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(lowerCAmelCase__ ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: __a = _re_bracket_content.sub(_replace , lines[1] ) else: __a = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: __a = keys[:-1] __a = get_indent(lines[1] ) + """, """.join([f'''"{k}"''' for k in sort_objects(lowerCAmelCase__ )] ) return "\n".join(lowerCAmelCase__ ) else: # Finally we have to deal with imports fitting on one line __a = _re_bracket_content.sub(_replace , lowerCAmelCase__ ) return import_statement def a (lowerCAmelCase__ , lowerCAmelCase__=True ): with open(lowerCAmelCase__ , encoding="""utf-8""" ) as f: __a = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 __a = split_code_in_indented_blocks( lowerCAmelCase__ , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" ) # We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(lowerCAmelCase__ ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. __a = main_blocks[block_idx] __a = block.split("""\n""" ) # Get to the start of the imports. __a = 0 while line_idx < len(lowerCAmelCase__ ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: __a = len(lowerCAmelCase__ ) else: line_idx += 1 if line_idx >= len(lowerCAmelCase__ ): continue # Ignore beginning and last line: they don't contain anything. __a = """\n""".join(block_lines[line_idx:-1] ) __a = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. __a = split_code_in_indented_blocks(lowerCAmelCase__ , indent_level=lowerCAmelCase__ ) # We have two categories of import key: list or _import_structure[key].append/extend __a = _re_direct_key if """_import_structure = {""" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. __a = [(pattern.search(lowerCAmelCase__ ).groups()[0] if pattern.search(lowerCAmelCase__ ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. __a = [(i, key) for i, key in enumerate(lowerCAmelCase__ ) if key is not None] __a = [x[0] for x in sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. __a = 0 __a = [] for i in range(len(lowerCAmelCase__ ) ): if keys[i] is None: reorderded_blocks.append(internal_blocks[i] ) else: __a = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reorderded_blocks.append(lowerCAmelCase__ ) count += 1 # And we put our main block back together with its first and last line. __a = """\n""".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] ) if code != "\n".join(lowerCAmelCase__ ): if check_only: return True else: print(f'''Overwriting {file}.''' ) with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f: f.write("""\n""".join(lowerCAmelCase__ ) ) def a (lowerCAmelCase__=True ): __a = [] for root, _, files in os.walk(lowerCAmelCase__ ): if "__init__.py" in files: __a = sort_imports(os.path.join(lowerCAmelCase__ , """__init__.py""" ) , check_only=lowerCAmelCase__ ) if result: __a = [os.path.join(lowerCAmelCase__ , """__init__.py""" )] if len(lowerCAmelCase__ ) > 0: raise ValueError(f'''Would overwrite {len(lowerCAmelCase__ )} files, run `make style`.''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') SCREAMING_SNAKE_CASE = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
99
from scipy.stats import pearsonr import datasets lowerCAmelCase_ = """ Pearson correlation coefficient and p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. """ lowerCAmelCase_ = """ Args: predictions (`list` of `int`): Predicted class labels, as returned by a model. references (`list` of `int`): Ground truth labels. return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`. Returns: pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation. p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities. Examples: Example 1-A simple example using only predictions and references. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5]) >>> print(round(results['pearsonr'], 2)) -0.74 Example 2-The same as Example 1, but that also returns the `p-value`. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True) >>> print(sorted(list(results.keys()))) ['p-value', 'pearsonr'] >>> print(round(results['pearsonr'], 2)) -0.74 >>> print(round(results['p-value'], 2)) 0.15 """ lowerCAmelCase_ = """ @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, Ilhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Antonio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowerCAmelCase ( datasets.Metric ): def __magic_name__( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float''' ), '''references''': datasets.Value('''float''' ), } ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ): if return_pvalue: lowerCAmelCase__ : Union[str, Any] = pearsonr(__UpperCAmelCase , __UpperCAmelCase ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(__UpperCAmelCase , __UpperCAmelCase )[0] )}
678
0
# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from packaging import version from .. import __version__ from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD from .doc import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, copy_func, replace_return_docstrings, ) from .generic import ( ContextManagers, ExplicitEnum, ModelOutput, PaddingStrategy, TensorType, add_model_info_to_auto_map, cached_property, can_return_loss, expand_dims, find_labels, flatten_dict, infer_framework, is_jax_tensor, is_numpy_array, is_tensor, is_tf_symbolic_tensor, is_tf_tensor, is_torch_device, is_torch_dtype, is_torch_tensor, reshape, squeeze, strtobool, tensor_size, to_numpy, to_py_obj, transpose, working_or_temp_dir, ) from .hub import ( CLOUDFRONT_DISTRIB_PREFIX, DISABLE_TELEMETRY, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, EntryNotFoundError, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, cached_file, default_cache_path, define_sagemaker_information, download_url, extract_commit_hash, get_cached_models, get_file_from_repo, get_full_repo_name, has_file, http_user_agent, is_offline_mode, is_remote_url, move_cache, send_example_telemetry, try_to_load_from_cache, ) from .import_utils import ( ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, USE_JAX, USE_TF, USE_TORCH, DummyObject, OptionalDependencyNotAvailable, _LazyModule, ccl_version, direct_transformers_import, get_torch_version, is_accelerate_available, is_apex_available, is_bitsandbytes_available, is_bsa_available, is_coloredlogs_available, is_cython_available, is_datasets_available, is_decord_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_jieba_available, is_jumanpp_available, is_kenlm_available, is_keras_nlp_available, is_librosa_available, is_natten_available, is_ninja_available, is_onnx_available, is_openai_available, is_optimum_available, is_pandas_available, is_peft_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytest_available, is_pytorch_quantization_available, is_rjieba_available, is_sacremoses_available, is_safetensors_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_sudachi_available, is_tensorflow_probability_available, is_tensorflow_text_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_bfaa_cpu_available, is_torch_bfaa_gpu_available, is_torch_compile_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_neuroncore_available, is_torch_tensorrt_fx_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_torchdistx_available, is_torchdynamo_available, is_torchvision_available, is_training_run_on_sagemaker, is_vision_available, requires_backends, torch_only_method, ) _A : Union[str, Any] = """pytorch_model.bin""" _A : Optional[int] = """pytorch_model.bin.index.json""" _A : str = """adapter_config.json""" _A : Any = """adapter_model.bin""" _A : Tuple = """adapter_model.safetensors""" _A : List[Any] = """tf_model.h5""" _A : Dict = """tf_model.h5.index.json""" _A : List[Any] = """model.ckpt""" _A : Optional[Any] = """flax_model.msgpack""" _A : int = """flax_model.msgpack.index.json""" _A : Optional[Any] = """model.safetensors""" _A : str = """model.safetensors.index.json""" _A : Dict = """config.json""" _A : int = """preprocessor_config.json""" _A : Dict = FEATURE_EXTRACTOR_NAME _A : Union[str, Any] = """generation_config.json""" _A : Optional[Any] = """modelcard.json""" _A : List[str] = """▁""" _A : Any = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility _A : List[Any] = [ [[0, 1, 0, 1], [1, 0, 0, 1]] ] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too. _A : Union[str, Any] = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]] _A : str = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]] def __snake_case ( lowerCAmelCase_ ) -> List[str]: if version.parse(lowerCAmelCase_ ) < version.parse(lowerCAmelCase_ ): if "dev" in min_version: SCREAMING_SNAKE_CASE__ = ( '''This example requires a source install from HuggingFace Transformers (see ''' '''`https://huggingface.co/docs/transformers/installation#install-from-source`),''' ) else: SCREAMING_SNAKE_CASE__ = f'''This example requires a minimum version of {min_version},''' error_message += f''' but the version found is {__version__}.\n''' raise ImportError( error_message + '''Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other ''' '''versions of HuggingFace Transformers.''' )
100
from manim import * class _lowerCAmelCase ( _lowercase ): def __magic_name__( self ): lowerCAmelCase__ : Tuple = Rectangle(height=0.5 , width=0.5 ) lowerCAmelCase__ : Dict = Rectangle(height=0.25 , width=0.25 ) lowerCAmelCase__ : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) lowerCAmelCase__ : Optional[Any] = [mem.copy() for i in range(6 )] lowerCAmelCase__ : int = [mem.copy() for i in range(6 )] lowerCAmelCase__ : Optional[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : str = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : List[str] = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : int = Text('''CPU''' , font_size=24 ) lowerCAmelCase__ : int = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = [mem.copy() for i in range(4 )] lowerCAmelCase__ : Tuple = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Tuple = Text('''GPU''' , font_size=24 ) lowerCAmelCase__ : int = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) gpu.move_to([-1, -1, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : int = [mem.copy() for i in range(6 )] lowerCAmelCase__ : List[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Tuple = Text('''Model''' , font_size=24 ) lowerCAmelCase__ : List[Any] = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) model.move_to([3, -1.0, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : Dict = [] lowerCAmelCase__ : List[str] = [] lowerCAmelCase__ : Optional[Any] = [] for i, rect in enumerate(__UpperCAmelCase ): rect.set_stroke(__UpperCAmelCase ) lowerCAmelCase__ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCAmelCase ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=__UpperCAmelCase , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=__UpperCAmelCase , buff=0.0 ) self.add(__UpperCAmelCase ) model_cpu_arr.append(__UpperCAmelCase ) self.add(*__UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase ) lowerCAmelCase__ : Any = [mem.copy() for i in range(6 )] lowerCAmelCase__ : Optional[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Any = Text('''Loaded Checkpoint''' , font_size=24 ) lowerCAmelCase__ : Optional[Any] = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) checkpoint.move_to([3, 0.5, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : Dict = [] lowerCAmelCase__ : str = [] for i, rect in enumerate(__UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = fill.copy().set_fill(__UpperCAmelCase , opacity=0.7 ) target.move_to(__UpperCAmelCase ) ckpt_arr.append(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(__UpperCAmelCase ) self.add(*__UpperCAmelCase , *__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCAmelCase__ : List[Any] = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[str] = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , ) blue_text.next_to(__UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : str = MarkupText( f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) lowerCAmelCase__ : Optional[Any] = [meta_mem.copy() for i in range(6 )] lowerCAmelCase__ : Dict = [meta_mem.copy() for i in range(6 )] lowerCAmelCase__ : Union[str, Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Dict = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : str = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : List[str] = Text('''Disk''' , font_size=24 ) lowerCAmelCase__ : Any = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(__UpperCAmelCase , run_time=3 ) , Write(__UpperCAmelCase , run_time=1 ) , Create(__UpperCAmelCase , run_time=1 ) ) lowerCAmelCase__ : str = [] for i, rect in enumerate(__UpperCAmelCase ): lowerCAmelCase__ : Dict = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(__UpperCAmelCase , run_time=1.5 ) ) self.play(*__UpperCAmelCase ) self.play(FadeOut(__UpperCAmelCase ) ) lowerCAmelCase__ : int = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(__UpperCAmelCase , run_time=3 ) ) self.play( FadeOut(__UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase ) , ) self.wait()
678
0
import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class __lowercase (unittest.TestCase ): """simple docstring""" def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = tempfile.mkdtemp() # fmt: off SCREAMING_SNAKE_CASE_ : Dict = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest'] # fmt: on SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = { 'do_resize': True, 'size': {'height': 1_8, 'width': 1_8}, 'do_normalize': True, 'image_mean': [0.5, 0.5, 0.5], 'image_std': [0.5, 0.5, 0.5], } SCREAMING_SNAKE_CASE_ : str = os.path.join(self.tmpdirname , lowerCAmelCase__ ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(lowerCAmelCase__ , lowerCAmelCase__ ) def UpperCamelCase__ ( self , **lowerCAmelCase__ ): """simple docstring""" return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def UpperCamelCase__ ( self , **lowerCAmelCase__ ): """simple docstring""" return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def UpperCamelCase__ ( self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] SCREAMING_SNAKE_CASE_ : str = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : str = self.get_image_processor() SCREAMING_SNAKE_CASE_ : str = VisionTextDualEncoderProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE_ : Any = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , lowerCAmelCase__ ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE_ : List[str] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) SCREAMING_SNAKE_CASE_ : List[str] = self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0 ) SCREAMING_SNAKE_CASE_ : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowerCAmelCase__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowerCAmelCase__ ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = self.get_image_processor() SCREAMING_SNAKE_CASE_ : Tuple = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ : Any = image_processor(lowerCAmelCase__ , return_tensors='np' ) SCREAMING_SNAKE_CASE_ : Tuple = processor(images=lowerCAmelCase__ , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_image_processor() SCREAMING_SNAKE_CASE_ : Dict = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : str = 'lower newer' SCREAMING_SNAKE_CASE_ : Tuple = processor(text=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer(lowerCAmelCase__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str = self.get_image_processor() SCREAMING_SNAKE_CASE_ : Tuple = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : str = VisionTextDualEncoderProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'lower newer' SCREAMING_SNAKE_CASE_ : Any = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ : List[str] = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with self.assertRaises(lowerCAmelCase__ ): processor() def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_image_processor() SCREAMING_SNAKE_CASE_ : int = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : str = VisionTextDualEncoderProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] SCREAMING_SNAKE_CASE_ : List[str] = processor.batch_decode(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : int = tokenizer.batch_decode(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = self.get_image_processor() SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : str = VisionTextDualEncoderProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : str = 'lower newer' SCREAMING_SNAKE_CASE_ : str = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ : Tuple = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
101
import collections import os import re from pathlib import Path lowerCAmelCase_ = """src/transformers""" # Matches is_xxx_available() lowerCAmelCase_ = re.compile(R"""is\_([a-z_]*)_available()""") # Catches a one-line _import_struct = {xxx} lowerCAmelCase_ = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] lowerCAmelCase_ = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""") # Catches a line if not is_foo_available lowerCAmelCase_ = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""") # Catches a line _import_struct["bla"].append("foo") lowerCAmelCase_ = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] lowerCAmelCase_ = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""") # Catches a line with an object between quotes and a comma: "MyModel", lowerCAmelCase_ = re.compile(R"""^\s+\"([^\"]+)\",""") # Catches a line with objects between brackets only: ["foo", "bar"], lowerCAmelCase_ = re.compile(R"""^\s+\[([^\]]+)\]""") # Catches a line with from foo import bar, bla, boo lowerCAmelCase_ = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") # Catches a line with try: lowerCAmelCase_ = re.compile(R"""^\s*try:""") # Catches a line with else: lowerCAmelCase_ = re.compile(R"""^\s*else:""") def __lowerCAmelCase ( UpperCamelCase ) -> int: if _re_test_backend.search(UpperCamelCase ) is None: return None lowerCAmelCase__ : int = [b[0] for b in _re_backend.findall(UpperCamelCase )] backends.sort() return "_and_".join(UpperCamelCase ) def __lowerCAmelCase ( UpperCamelCase ) -> Any: with open(UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCAmelCase__ : Union[str, Any] = f.readlines() lowerCAmelCase__ : Tuple = 0 while line_index < len(UpperCamelCase ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(UpperCamelCase ): return None # First grab the objects without a specific backend in _import_structure lowerCAmelCase__ : List[str] = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: lowerCAmelCase__ : str = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(UpperCamelCase ): lowerCAmelCase__ : str = _re_one_line_import_struct.search(UpperCamelCase ).groups()[0] lowerCAmelCase__ : Optional[Any] = re.findall(R'''\[([^\]]+)\]''' , UpperCamelCase ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue lowerCAmelCase__ : Tuple = _re_import_struct_key_value.search(UpperCamelCase ) if single_line_import_search is not None: lowerCAmelCase__ : Optional[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 lowerCAmelCase__ : Any = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. lowerCAmelCase__ : List[str] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase__ : Union[str, Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase__ : Optional[int] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): lowerCAmelCase__ : str = lines[line_index] if _re_import_struct_add_one.search(UpperCamelCase ) is not None: objects.append(_re_import_struct_add_one.search(UpperCamelCase ).groups()[0] ) elif _re_import_struct_add_many.search(UpperCamelCase ) is not None: lowerCAmelCase__ : Optional[int] = _re_import_struct_add_many.search(UpperCamelCase ).groups()[0].split(''', ''' ) lowerCAmelCase__ : List[Any] = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif _re_between_brackets.search(UpperCamelCase ) is not None: lowerCAmelCase__ : List[Any] = _re_between_brackets.search(UpperCamelCase ).groups()[0].split(''', ''' ) lowerCAmelCase__ : Optional[int] = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif _re_quote_object.search(UpperCamelCase ) is not None: objects.append(_re_quote_object.search(UpperCamelCase ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 lowerCAmelCase__ : Optional[Any] = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend lowerCAmelCase__ : Any = [] while ( line_index < len(UpperCamelCase ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): lowerCAmelCase__ : Tuple = lines[line_index] lowerCAmelCase__ : List[Any] = _re_import.search(UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 lowerCAmelCase__ : Dict = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(UpperCamelCase ): # If the line is an if is_backend_available, we grab all objects associated. lowerCAmelCase__ : str = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase__ : Tuple = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase__ : str = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): lowerCAmelCase__ : Any = lines[line_index] lowerCAmelCase__ : Union[str, Any] = _re_import.search(UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 lowerCAmelCase__ : str = objects else: line_index += 1 return import_dict_objects, type_hint_objects def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> List[Any]: def find_duplicates(UpperCamelCase ): return [k for k, v in collections.Counter(UpperCamelCase ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] lowerCAmelCase__ : Optional[Any] = [] for key in import_dict_objects.keys(): lowerCAmelCase__ : str = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) lowerCAmelCase__ : int = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): lowerCAmelCase__ : List[Any] = '''base imports''' if key == '''none''' else F"""{key} backend""" errors.append(F"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def __lowerCAmelCase ( ) -> Optional[Any]: lowerCAmelCase__ : Dict = [] for root, _, files in os.walk(UpperCamelCase ): if "__init__.py" in files: lowerCAmelCase__ : Any = os.path.join(UpperCamelCase , '''__init__.py''' ) lowerCAmelCase__ : List[Any] = parse_init(UpperCamelCase ) if objects is not None: lowerCAmelCase__ : Optional[int] = analyze_results(*UpperCamelCase ) if len(UpperCamelCase ) > 0: lowerCAmelCase__ : Tuple = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append('''\n'''.join(UpperCamelCase ) ) if len(UpperCamelCase ) > 0: raise ValueError('''\n\n'''.join(UpperCamelCase ) ) def __lowerCAmelCase ( ) -> Tuple: lowerCAmelCase__ : str = [] for path, directories, files in os.walk(UpperCamelCase ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(UpperCamelCase ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(UpperCamelCase ) / folder).glob('''*.py''' ) ) ) == 0: continue lowerCAmelCase__ : Tuple = str((Path(UpperCamelCase ) / folder).relative_to(UpperCamelCase ) ) lowerCAmelCase__ : Dict = short_path.replace(os.path.sep , '''.''' ) submodules.append(UpperCamelCase ) for fname in files: if fname == "__init__.py": continue lowerCAmelCase__ : Union[str, Any] = str((Path(UpperCamelCase ) / fname).relative_to(UpperCamelCase ) ) lowerCAmelCase__ : Tuple = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(UpperCamelCase ) return submodules lowerCAmelCase_ = [ """convert_pytorch_checkpoint_to_tf2""", """modeling_flax_pytorch_utils""", """models.esm.openfold_utils""", ] def __lowerCAmelCase ( ) -> List[str]: # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import lowerCAmelCase__ : Dict = direct_transformers_import(UpperCamelCase ) lowerCAmelCase__ : int = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(UpperCamelCase , '''__init__.py''' ) , '''r''' ) as f: lowerCAmelCase__ : str = f.read() import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , UpperCamelCase ) ) ) lowerCAmelCase__ : Optional[int] = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(UpperCamelCase ) > 0: lowerCAmelCase__ : List[Any] = '''\n'''.join(F"""- {module}""" for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registed in the main init of Transformers:\n''' F"""{list_of_modules}\n""" '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
678
0
"""simple docstring""" import qiskit def UpperCamelCase (SCREAMING_SNAKE_CASE = 2 ): UpperCamelCase : List[Any] = qubits # Using Aer's simulator UpperCamelCase : int = qiskit.Aer.get_backend("""aer_simulator""" ) # Creating a Quantum Circuit acting on the q register UpperCamelCase : Dict = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Adding a H gate on qubit 0 (now q0 in superposition) circuit.h(0 ) for i in range(1 , SCREAMING_SNAKE_CASE ): # Adding CX (CNOT) gate circuit.cx(i - 1 , SCREAMING_SNAKE_CASE ) # Mapping the quantum measurement to the classical bits circuit.measure(list(range(SCREAMING_SNAKE_CASE ) ) , list(range(SCREAMING_SNAKE_CASE ) ) ) # Now measuring any one qubit would affect other qubits to collapse # their super position and have same state as the measured one. # Executing the circuit on the simulator UpperCamelCase : Optional[int] = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=1000 ) return job.result().get_counts(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": print(f'''Total count for various states are: {quantum_entanglement(3)}''')
102
import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class _lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=64 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ): lowerCAmelCase__ : Optional[int] = parent lowerCAmelCase__ : Tuple = batch_size lowerCAmelCase__ : Union[str, Any] = seq_length lowerCAmelCase__ : str = is_training lowerCAmelCase__ : Union[str, Any] = use_input_mask lowerCAmelCase__ : List[Any] = use_token_type_ids lowerCAmelCase__ : int = use_labels lowerCAmelCase__ : List[Any] = vocab_size lowerCAmelCase__ : Optional[int] = hidden_size lowerCAmelCase__ : List[str] = embedding_size lowerCAmelCase__ : Optional[int] = num_hidden_layers lowerCAmelCase__ : Optional[int] = num_attention_heads lowerCAmelCase__ : List[str] = intermediate_size lowerCAmelCase__ : Tuple = hidden_act lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob lowerCAmelCase__ : Union[str, Any] = max_position_embeddings lowerCAmelCase__ : List[Any] = type_vocab_size lowerCAmelCase__ : Optional[Any] = type_sequence_label_size lowerCAmelCase__ : List[Any] = initializer_range lowerCAmelCase__ : Optional[Any] = num_labels lowerCAmelCase__ : List[str] = num_choices lowerCAmelCase__ : Any = scope def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase__ : str = None if self.use_input_mask: lowerCAmelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase__ : Optional[Any] = None if self.use_token_type_ids: lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase__ : Dict = None lowerCAmelCase__ : Dict = None lowerCAmelCase__ : Optional[int] = None if self.use_labels: lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase__ : Union[str, Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __magic_name__( self ): return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = MegatronBertModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : List[Any] = MegatronBertForMaskedLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = MegatronBertForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = MegatronBertForNextSentencePrediction(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Dict = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = MegatronBertForPreTraining(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : int = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , next_sentence_label=__UpperCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = MegatronBertForQuestionAnswering(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = self.num_labels lowerCAmelCase__ : Union[str, Any] = MegatronBertForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = self.num_labels lowerCAmelCase__ : str = MegatronBertForTokenClassification(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = self.num_choices lowerCAmelCase__ : Dict = MegatronBertForMultipleChoice(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : Any = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs() ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) : Optional[int] = config_and_inputs lowerCAmelCase__ : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _lowerCAmelCase ( _lowercase , _lowercase , unittest.TestCase ): A__ = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) A__ = ( { 'feature-extraction': MegatronBertModel, 'fill-mask': MegatronBertForMaskedLM, 'question-answering': MegatronBertForQuestionAnswering, 'text-classification': MegatronBertForSequenceClassification, 'text-generation': MegatronBertForCausalLM, 'token-classification': MegatronBertForTokenClassification, 'zero-shot': MegatronBertForSequenceClassification, } if is_torch_available() else {} ) A__ = True # test_resize_embeddings = False A__ = False def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ): lowerCAmelCase__ : List[Any] = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) if return_labels: if model_class in get_values(__UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase ) return inputs_dict def __magic_name__( self ): lowerCAmelCase__ : str = MegatronBertModelTester(self ) lowerCAmelCase__ : Dict = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def __magic_name__( self ): self.config_tester.run_common_tests() def __magic_name__( self ): lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*__UpperCAmelCase ) def __lowerCAmelCase ( UpperCamelCase ) -> Optional[int]: return torch.tensor( UpperCamelCase , dtype=torch.long , device=UpperCamelCase , ) lowerCAmelCase_ = 1e-4 @require_torch @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( unittest.TestCase ): @slow @unittest.skip('''Model is not available.''' ) def __magic_name__( self ): lowerCAmelCase__ : int = '''nvidia/megatron-bert-uncased-345m''' if "MYDIR" in os.environ: lowerCAmelCase__ : Union[str, Any] = os.path.join(os.environ['''MYDIR'''] , __UpperCAmelCase ) lowerCAmelCase__ : Tuple = MegatronBertModel.from_pretrained(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.half() lowerCAmelCase__ : Optional[int] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] ) with torch.no_grad(): lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase )[0] lowerCAmelCase__ : List[Any] = torch.Size((1, 9, 1024) ) self.assertEqual(output.shape , __UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728] for ii in range(3 ): for jj in range(3 ): lowerCAmelCase__ : Union[str, Any] = output[0, ii, jj] lowerCAmelCase__ : Optional[Any] = expected[3 * ii + jj] lowerCAmelCase__ : List[str] = '''ii={} jj={} a={} b={}'''.format(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) self.assertTrue(math.isclose(__UpperCAmelCase , __UpperCAmelCase , rel_tol=__UpperCAmelCase , abs_tol=__UpperCAmelCase ) , msg=__UpperCAmelCase )
678
0
"""simple docstring""" import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,unittest.TestCase ): A__ : Optional[int] = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline''' def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : Dict=0 ): """simple docstring""" _snake_case = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(__lowerCamelCase ) ) _snake_case = np.random.RandomState(__lowerCamelCase ) _snake_case = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''strength''': 0.7_5, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def __UpperCAmelCase ( self : str ): """simple docstring""" _snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) _snake_case = self.get_dummy_inputs() _snake_case = pipe(**__lowerCamelCase ).images _snake_case = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 1_2_8, 1_2_8, 3) _snake_case = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def __UpperCAmelCase ( self : Tuple ): """simple docstring""" _snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) _snake_case = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) _snake_case = self.get_dummy_inputs() _snake_case = pipe(**__lowerCamelCase ).images _snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) _snake_case = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __UpperCAmelCase ( self : Optional[int] ): """simple docstring""" _snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) _snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) # warmup pass to apply optimizations _snake_case = pipe(**self.get_dummy_inputs() ) _snake_case = self.get_dummy_inputs() _snake_case = pipe(**__lowerCamelCase ).images _snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) _snake_case = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __UpperCAmelCase ( self : int ): """simple docstring""" _snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) _snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) _snake_case = self.get_dummy_inputs() _snake_case = pipe(**__lowerCamelCase ).images _snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) _snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __UpperCAmelCase ( self : Optional[int] ): """simple docstring""" _snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) _snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) _snake_case = self.get_dummy_inputs() _snake_case = pipe(**__lowerCamelCase ).images _snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) _snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __UpperCAmelCase ( self : Optional[Any] ): """simple docstring""" _snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) _snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) _snake_case = self.get_dummy_inputs() _snake_case = pipe(**__lowerCamelCase ).images _snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) _snake_case = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class UpperCAmelCase ( unittest.TestCase ): @property def __UpperCAmelCase ( self : Optional[int] ): """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __UpperCAmelCase ( self : List[str] ): """simple docstring""" _snake_case = ort.SessionOptions() _snake_case = False return options def __UpperCAmelCase ( self : Optional[Any] ): """simple docstring""" _snake_case = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) _snake_case = init_image.resize((7_6_8, 5_1_2) ) # using the PNDM scheduler by default _snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=__lowerCamelCase , feature_extractor=__lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) _snake_case = '''A fantasy landscape, trending on artstation''' _snake_case = np.random.RandomState(0 ) _snake_case = pipe( prompt=__lowerCamelCase , image=__lowerCamelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__lowerCamelCase , output_type='''np''' , ) _snake_case = output.images _snake_case = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 7_6_8, 3) _snake_case = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def __UpperCAmelCase ( self : Dict ): """simple docstring""" _snake_case = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) _snake_case = init_image.resize((7_6_8, 5_1_2) ) _snake_case = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' ) _snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=__lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) _snake_case = '''A fantasy landscape, trending on artstation''' _snake_case = np.random.RandomState(0 ) _snake_case = pipe( prompt=__lowerCamelCase , image=__lowerCamelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=2_0 , generator=__lowerCamelCase , output_type='''np''' , ) _snake_case = output.images _snake_case = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 7_6_8, 3) _snake_case = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
103
import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""", # See all BART models at https://huggingface.co/models?filter=bart } class _lowerCAmelCase ( _lowercase ): A__ = 'bart' A__ = ['past_key_values'] A__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , __UpperCAmelCase=5_0265 , __UpperCAmelCase=1024 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase="gelu" , __UpperCAmelCase=1024 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.0 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=2 , **__UpperCAmelCase , ): lowerCAmelCase__ : Union[str, Any] = vocab_size lowerCAmelCase__ : Optional[Any] = max_position_embeddings lowerCAmelCase__ : int = d_model lowerCAmelCase__ : str = encoder_ffn_dim lowerCAmelCase__ : Any = encoder_layers lowerCAmelCase__ : Dict = encoder_attention_heads lowerCAmelCase__ : Union[str, Any] = decoder_ffn_dim lowerCAmelCase__ : Union[str, Any] = decoder_layers lowerCAmelCase__ : Any = decoder_attention_heads lowerCAmelCase__ : Tuple = dropout lowerCAmelCase__ : Any = attention_dropout lowerCAmelCase__ : Any = activation_dropout lowerCAmelCase__ : Optional[Any] = activation_function lowerCAmelCase__ : Union[str, Any] = init_std lowerCAmelCase__ : Union[str, Any] = encoder_layerdrop lowerCAmelCase__ : int = decoder_layerdrop lowerCAmelCase__ : Optional[int] = classifier_dropout lowerCAmelCase__ : str = use_cache lowerCAmelCase__ : int = encoder_layers lowerCAmelCase__ : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , __UpperCAmelCase ): lowerCAmelCase__ : str = self.bos_token_id warnings.warn( f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """ '''The config can simply be saved and uploaded again to be fixed.''' ) class _lowerCAmelCase ( _lowercase ): @property def __magic_name__( self ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Tuple = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: lowerCAmelCase__ : Any = {0: '''batch'''} lowerCAmelCase__ : Tuple = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: lowerCAmelCase__ : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''} lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. lowerCAmelCase__ : List[str] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.num_layers for i in range(__UpperCAmelCase ): lowerCAmelCase__ : int = {0: '''batch''', 2: '''past_sequence + sequence'''} lowerCAmelCase__ : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} else: lowerCAmelCase__ : List[str] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property def __magic_name__( self ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Dict = super().outputs else: lowerCAmelCase__ : Any = super(__UpperCAmelCase , self ).outputs if self.use_past: lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.num_layers for i in range(__UpperCAmelCase ): lowerCAmelCase__ : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): lowerCAmelCase__ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # Generate decoder inputs lowerCAmelCase__ : int = seq_length if not self.use_past else 1 lowerCAmelCase__ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()} lowerCAmelCase__ : str = dict(**__UpperCAmelCase , **__UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = common_inputs['''input_ids'''].shape lowerCAmelCase__ : List[str] = common_inputs['''decoder_input_ids'''].shape[1] lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.num_attention_heads lowerCAmelCase__ : Any = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCAmelCase__ : List[Any] = decoder_seq_length + 3 lowerCAmelCase__ : Any = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) lowerCAmelCase__ : Any = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase )] , dim=1 ) lowerCAmelCase__ : List[str] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered lowerCAmelCase__ , lowerCAmelCase__ : Any = self.num_layers lowerCAmelCase__ : Union[str, Any] = min(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Any = max(__UpperCAmelCase , __UpperCAmelCase ) - min_num_layers lowerCAmelCase__ : Dict = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(__UpperCAmelCase ): common_inputs["past_key_values"].append( ( torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), ) ) # TODO: test this. lowerCAmelCase__ : Any = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(__UpperCAmelCase , __UpperCAmelCase ): common_inputs["past_key_values"].append((torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) ) return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): lowerCAmelCase__ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowerCAmelCase__ , lowerCAmelCase__ : Any = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values lowerCAmelCase__ : List[str] = seqlen + 2 lowerCAmelCase__ , lowerCAmelCase__ : int = self.num_layers lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.num_attention_heads lowerCAmelCase__ : Optional[Any] = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCAmelCase__ : Optional[Any] = common_inputs['''attention_mask'''].dtype lowerCAmelCase__ : List[Any] = torch.cat( [common_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 ) lowerCAmelCase__ : List[Any] = [ (torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(__UpperCAmelCase ) ] return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowerCAmelCase__ : Tuple = compute_effective_axis_dimension( __UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowerCAmelCase__ : Optional[Any] = tokenizer.num_special_tokens_to_add(__UpperCAmelCase ) lowerCAmelCase__ : int = compute_effective_axis_dimension( __UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCAmelCase ) # Generate dummy inputs according to compute batch and sequence lowerCAmelCase__ : Optional[int] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size lowerCAmelCase__ : str = dict(tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase ) ) return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) elif self.task == "causal-lm": lowerCAmelCase__ : List[str] = self._generate_dummy_inputs_for_causal_lm( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) else: lowerCAmelCase__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Any = super()._flatten_past_key_values_(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) else: lowerCAmelCase__ : Dict = super(__UpperCAmelCase , self )._flatten_past_key_values_( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
678
0
"""simple docstring""" import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand UpperCamelCase = ( """4S 3H 2C 7S 5H""", """9D 8H 2C 6S 7H""", """2D 6D 9D TH 7D""", """TC 8C 2S JH 6C""", """JH 8S TH AH QH""", """TS KS 5S 9S AC""", """KD 6S 9D TH AD""", """KS 8D 4D 9S 4S""", # pair """8C 4S KH JS 4D""", # pair """QH 8H KD JH 8S""", # pair """KC 4H KS 2H 8D""", # pair """KD 4S KC 3H 8S""", # pair """AH 8S AS KC JH""", # pair """3H 4C 4H 3S 2H""", # 2 pairs """5S 5D 2C KH KH""", # 2 pairs """3C KH 5D 5S KH""", # 2 pairs """AS 3C KH AD KH""", # 2 pairs """7C 7S 3S 7H 5S""", # 3 of a kind """7C 7S KH 2H 7H""", # 3 of a kind """AC KH QH AH AS""", # 3 of a kind """2H 4D 3C AS 5S""", # straight (low ace) """3C 5C 4C 2C 6H""", # straight """6S 8S 7S 5H 9H""", # straight """JS QS 9H TS KH""", # straight """QC KH TS JS AH""", # straight (high ace) """8C 9C 5C 3C TC""", # flush """3S 8S 9S 5S KS""", # flush """4C 5C 9C 8C KC""", # flush """JH 8H AH KH QH""", # flush """3D 2H 3H 2C 2D""", # full house """2H 2C 3S 3H 3D""", # full house """KH KC 3S 3H 3D""", # full house """JC 6H JS JD JH""", # 4 of a kind """JC 7H JS JD JH""", # 4 of a kind """JC KH JS JD JH""", # 4 of a kind """2S AS 4S 5S 3S""", # straight flush (low ace) """2D 6D 3D 4D 5D""", # straight flush """5C 6C 3C 7C 4C""", # straight flush """JH 9H TH KH QH""", # straight flush """JH AH TH KH QH""", # royal flush (high ace straight flush) ) UpperCamelCase = ( ("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""), ("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""), ("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""), ("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""), ("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""), ("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""), ("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""), ("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""), ("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""), ("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""), ("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""), ("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""), ("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""), ("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""), ("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""), ("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""), ("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""), ("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""), ("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""), ("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""), ("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""), ("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""), ("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""), ("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""), ("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""), ("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""), ("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""), ("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""), ("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""), ("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""), ("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""), ("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""), ("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""), ) UpperCamelCase = ( ("""2H 3H 4H 5H 6H""", True), ("""AS AH 2H AD AC""", False), ("""2H 3H 5H 6H 7H""", True), ("""KS AS TS QS JS""", True), ("""8H 9H QS JS TH""", False), ("""AS 3S 4S 8S 2S""", True), ) UpperCamelCase = ( ("""2H 3H 4H 5H 6H""", True), ("""AS AH 2H AD AC""", False), ("""2H 3H 5H 6H 7H""", False), ("""KS AS TS QS JS""", True), ("""8H 9H QS JS TH""", True), ) UpperCamelCase = ( ("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 14]), ("""2H 5D 3C AS 5S""", False, [14, 5, 5, 3, 2]), ("""JH QD KC AS TS""", False, [14, 13, 12, 11, 10]), ("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]), ) UpperCamelCase = ( ("""JH AH TH KH QH""", 0), ("""JH 9H TH KH QH""", 0), ("""JC KH JS JD JH""", 7), ("""KH KC 3S 3H 3D""", 6), ("""8C 9C 5C 3C TC""", 0), ("""JS QS 9H TS KH""", 0), ("""7C 7S KH 2H 7H""", 3), ("""3C KH 5D 5S KH""", 2), ("""QH 8H KD JH 8S""", 1), ("""2D 6D 9D TH 7D""", 0), ) UpperCamelCase = ( ("""JH AH TH KH QH""", 23), ("""JH 9H TH KH QH""", 22), ("""JC KH JS JD JH""", 21), ("""KH KC 3S 3H 3D""", 20), ("""8C 9C 5C 3C TC""", 19), ("""JS QS 9H TS KH""", 18), ("""7C 7S KH 2H 7H""", 17), ("""3C KH 5D 5S KH""", 16), ("""QH 8H KD JH 8S""", 15), ("""2D 6D 9D TH 7D""", 14), ) def _lowerCamelCase ( ) -> int: """simple docstring""" A__ , A__ = randrange(len(UpperCAmelCase_ ) ), randrange(len(UpperCAmelCase_ ) ) A__ = ["Loss", "Tie", "Win"][(play >= oppo) + (play > oppo)] A__ , A__ = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def _lowerCamelCase ( UpperCAmelCase_ : int = 100 ) -> int: """simple docstring""" return (generate_random_hand() for _ in range(UpperCAmelCase_ )) @pytest.mark.parametrize("hand, expected", UpperCAmelCase_ ) def _lowerCamelCase ( UpperCAmelCase_ : Optional[int], UpperCAmelCase_ : Optional[Any] ) -> int: """simple docstring""" assert PokerHand(UpperCAmelCase_ )._is_flush() == expected @pytest.mark.parametrize("hand, expected", UpperCAmelCase_ ) def _lowerCamelCase ( UpperCAmelCase_ : List[Any], UpperCAmelCase_ : Dict ) -> List[str]: """simple docstring""" assert PokerHand(UpperCAmelCase_ )._is_straight() == expected @pytest.mark.parametrize("hand, expected, card_values", UpperCAmelCase_ ) def _lowerCamelCase ( UpperCAmelCase_ : Optional[Any], UpperCAmelCase_ : Dict, UpperCAmelCase_ : str ) -> int: """simple docstring""" A__ = PokerHand(UpperCAmelCase_ ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize("hand, expected", UpperCAmelCase_ ) def _lowerCamelCase ( UpperCAmelCase_ : List[str], UpperCAmelCase_ : Union[str, Any] ) -> List[str]: """simple docstring""" assert PokerHand(UpperCAmelCase_ )._is_same_kind() == expected @pytest.mark.parametrize("hand, expected", UpperCAmelCase_ ) def _lowerCamelCase ( UpperCAmelCase_ : List[str], UpperCAmelCase_ : Optional[Any] ) -> Tuple: """simple docstring""" assert PokerHand(UpperCAmelCase_ )._hand_type == expected @pytest.mark.parametrize("hand, other, expected", UpperCAmelCase_ ) def _lowerCamelCase ( UpperCAmelCase_ : str, UpperCAmelCase_ : Optional[int], UpperCAmelCase_ : Union[str, Any] ) -> Any: """simple docstring""" assert PokerHand(UpperCAmelCase_ ).compare_with(PokerHand(UpperCAmelCase_ ) ) == expected @pytest.mark.parametrize("hand, other, expected", generate_random_hands() ) def _lowerCamelCase ( UpperCAmelCase_ : List[Any], UpperCAmelCase_ : Any, UpperCAmelCase_ : Optional[int] ) -> Any: """simple docstring""" assert PokerHand(UpperCAmelCase_ ).compare_with(PokerHand(UpperCAmelCase_ ) ) == expected def _lowerCamelCase ( ) -> Optional[int]: """simple docstring""" A__ = [PokerHand(UpperCAmelCase_ ) for hand in SORTED_HANDS] A__ = poker_hands.copy() shuffle(UpperCAmelCase_ ) A__ = chain(sorted(UpperCAmelCase_ ) ) for index, hand in enumerate(UpperCAmelCase_ ): assert hand == poker_hands[index] def _lowerCamelCase ( ) -> List[str]: """simple docstring""" A__ = [PokerHand("2D AC 3H 4H 5S" ), PokerHand("2S 3H 4H 5S 6C" )] pokerhands.sort(reverse=UpperCAmelCase_ ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def _lowerCamelCase ( ) -> Optional[Any]: """simple docstring""" A__ = PokerHand("2C 4S AS 3D 5C" ) A__ = True A__ = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def _lowerCamelCase ( ) -> Optional[Any]: """simple docstring""" A__ = 0 A__ = os.path.abspath(os.path.dirname(UpperCAmelCase_ ) ) A__ = os.path.join(UpperCAmelCase_, "poker_hands.txt" ) with open(UpperCAmelCase_ ) as file_hand: for line in file_hand: A__ = line[:14].strip() A__ = line[15:].strip() A__ , A__ = PokerHand(UpperCAmelCase_ ), PokerHand(UpperCAmelCase_ ) A__ = player.compare_with(UpperCAmelCase_ ) if output == "Win": answer += 1 assert answer == 376
104
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""", # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class _lowerCAmelCase ( _lowercase ): A__ = 'sew-d' def __init__( self , __UpperCAmelCase=32 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase=2 , __UpperCAmelCase=512 , __UpperCAmelCase=256 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=("p2c", "c2p") , __UpperCAmelCase="layer_norm" , __UpperCAmelCase="gelu_python" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-7 , __UpperCAmelCase=1e-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __UpperCAmelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __UpperCAmelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __UpperCAmelCase=False , __UpperCAmelCase=128 , __UpperCAmelCase=16 , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=10 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=10 , __UpperCAmelCase=0 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=256 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , **__UpperCAmelCase , ): super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = hidden_size lowerCAmelCase__ : Optional[int] = feat_extract_norm lowerCAmelCase__ : str = feat_extract_activation lowerCAmelCase__ : int = list(__UpperCAmelCase ) lowerCAmelCase__ : int = list(__UpperCAmelCase ) lowerCAmelCase__ : Any = list(__UpperCAmelCase ) lowerCAmelCase__ : int = conv_bias lowerCAmelCase__ : List[Any] = num_conv_pos_embeddings lowerCAmelCase__ : Optional[int] = num_conv_pos_embedding_groups lowerCAmelCase__ : int = len(self.conv_dim ) lowerCAmelCase__ : Union[str, Any] = num_hidden_layers lowerCAmelCase__ : Any = intermediate_size lowerCAmelCase__ : int = squeeze_factor lowerCAmelCase__ : int = max_position_embeddings lowerCAmelCase__ : Any = position_buckets lowerCAmelCase__ : Optional[int] = share_att_key lowerCAmelCase__ : Tuple = relative_attention lowerCAmelCase__ : Optional[int] = norm_rel_ebd lowerCAmelCase__ : Tuple = list(__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = hidden_act lowerCAmelCase__ : Any = num_attention_heads lowerCAmelCase__ : Optional[int] = hidden_dropout lowerCAmelCase__ : Union[str, Any] = attention_dropout lowerCAmelCase__ : str = activation_dropout lowerCAmelCase__ : List[Any] = feat_proj_dropout lowerCAmelCase__ : Any = final_dropout lowerCAmelCase__ : Optional[int] = layer_norm_eps lowerCAmelCase__ : List[str] = feature_layer_norm_eps lowerCAmelCase__ : Tuple = initializer_range lowerCAmelCase__ : Tuple = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)""" f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCAmelCase__ : Tuple = apply_spec_augment lowerCAmelCase__ : List[str] = mask_time_prob lowerCAmelCase__ : int = mask_time_length lowerCAmelCase__ : int = mask_time_min_masks lowerCAmelCase__ : Optional[int] = mask_feature_prob lowerCAmelCase__ : int = mask_feature_length lowerCAmelCase__ : int = mask_feature_min_masks # ctc loss lowerCAmelCase__ : Optional[Any] = ctc_loss_reduction lowerCAmelCase__ : Any = ctc_zero_infinity # sequence classification lowerCAmelCase__ : Tuple = use_weighted_layer_sum lowerCAmelCase__ : Dict = classifier_proj_size @property def __magic_name__( self ): return functools.reduce(operator.mul , self.conv_stride , 1 )
678
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer UpperCamelCase__ : Dict = logging.get_logger(__name__) UpperCamelCase__ : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} UpperCamelCase__ : Union[str, Any] = { '''vocab_file''': { '''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''', '''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''', '''junnyu/roformer_chinese_char_small''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt''' ), '''junnyu/roformer_chinese_char_base''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt''' ), '''junnyu/roformer_small_discriminator''': ( '''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt''' ), '''junnyu/roformer_small_generator''': ( '''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt''' ), } } UpperCamelCase__ : Tuple = { '''junnyu/roformer_chinese_small''': 15_36, '''junnyu/roformer_chinese_base''': 15_36, '''junnyu/roformer_chinese_char_small''': 5_12, '''junnyu/roformer_chinese_char_base''': 5_12, '''junnyu/roformer_small_discriminator''': 1_28, '''junnyu/roformer_small_generator''': 1_28, } UpperCamelCase__ : List[Any] = { '''junnyu/roformer_chinese_small''': {'''do_lower_case''': True}, '''junnyu/roformer_chinese_base''': {'''do_lower_case''': True}, '''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True}, '''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True}, '''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True}, '''junnyu/roformer_small_generator''': {'''do_lower_case''': True}, } class lowerCAmelCase_ ( lowerCamelCase_ ): __a : Dict = VOCAB_FILES_NAMES __a : List[str] = PRETRAINED_VOCAB_FILES_MAP __a : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a : str = PRETRAINED_INIT_CONFIGURATION __a : str = RoFormerTokenizer def __init__( self ,snake_case__=None ,snake_case__=None ,snake_case__=True ,snake_case__="[UNK]" ,snake_case__="[SEP]" ,snake_case__="[PAD]" ,snake_case__="[CLS]" ,snake_case__="[MASK]" ,snake_case__=True ,snake_case__=None ,**snake_case__ ,): super().__init__( snake_case__ ,tokenizer_file=snake_case__ ,do_lower_case=snake_case__ ,unk_token=snake_case__ ,sep_token=snake_case__ ,pad_token=snake_case__ ,cls_token=snake_case__ ,mask_token=snake_case__ ,tokenize_chinese_chars=snake_case__ ,strip_accents=snake_case__ ,**snake_case__ ,) SCREAMING_SNAKE_CASE_ : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get('lowercase' ,snake_case__ ) != do_lower_case or pre_tok_state.get('strip_accents' ,snake_case__ ) != strip_accents ): SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(snake_case__ ,pre_tok_state.pop('type' ) ) SCREAMING_SNAKE_CASE_ : Any = do_lower_case SCREAMING_SNAKE_CASE_ : Optional[Any] = strip_accents SCREAMING_SNAKE_CASE_ : int = pre_tok_class(**snake_case__ ) SCREAMING_SNAKE_CASE_ : Optional[Any] = do_lower_case def __getstate__( self ): SCREAMING_SNAKE_CASE_ : Tuple = self.__dict__.copy() SCREAMING_SNAKE_CASE_ : str = BertPreTokenizer() return state def __setstate__( self ,snake_case__ ): SCREAMING_SNAKE_CASE_ : Any = d SCREAMING_SNAKE_CASE_ : Any = self.__dict__['_tokenizer'].get_vocab() SCREAMING_SNAKE_CASE_ : Union[str, Any] = PreTokenizer.custom(JiebaPreTokenizer(snake_case__ ) ) def snake_case ( self ,snake_case__ ,snake_case__=None ): SCREAMING_SNAKE_CASE_ : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def snake_case ( self ,snake_case__ ,snake_case__ = None ): SCREAMING_SNAKE_CASE_ : List[str] = [self.sep_token_id] SCREAMING_SNAKE_CASE_ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case ( self ,snake_case__ ,snake_case__ = None ): SCREAMING_SNAKE_CASE_ : int = self._tokenizer.model.save(snake_case__ ,name=snake_case__ ) return tuple(snake_case__ ) def snake_case ( self ,snake_case__ ,snake_case__=None ,snake_case__=None ,snake_case__=False ,**snake_case__ ,): SCREAMING_SNAKE_CASE_ : Optional[Any] = BertPreTokenizer() return super().save_pretrained(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,**snake_case__ )
105
import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase_ = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""") @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( _lowercase , unittest.TestCase ): A__ = PegasusTokenizer A__ = PegasusTokenizerFast A__ = True A__ = True def __magic_name__( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ : Union[str, Any] = PegasusTokenizer(__UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __magic_name__( self ): return PegasusTokenizer.from_pretrained('''google/pegasus-large''' ) def __magic_name__( self , **__UpperCAmelCase ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __magic_name__( self , __UpperCAmelCase ): return ("This is a test", "This is a test") def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = '''</s>''' lowerCAmelCase__ : Optional[int] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Tuple = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''</s>''' ) self.assertEqual(vocab_keys[-1] , '''v''' ) self.assertEqual(len(__UpperCAmelCase ) , 1103 ) def __magic_name__( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1103 ) def __magic_name__( self ): lowerCAmelCase__ : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : Tuple = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : int = ( '''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important''' ''' </s> <pad> <pad> <pad>''' ) lowerCAmelCase__ : Any = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] lowerCAmelCase__ : Dict = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Any = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word lowerCAmelCase__ : List[str] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.''' lowerCAmelCase__ : Tuple = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1] lowerCAmelCase__ : Tuple = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Dict = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_6103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 lowerCAmelCase__ : str = '''To ensure a smooth flow of bank resolutions.''' lowerCAmelCase__ : int = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1] lowerCAmelCase__ : List[Any] = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = ['''This is going to be way too long.''' * 150, '''short example'''] lowerCAmelCase__ : List[str] = ['''not super long but more than 5 tokens''', '''tiny'''] lowerCAmelCase__ : Tuple = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) lowerCAmelCase__ : Optional[int] = self._large_tokenizer( text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask. @slow def __magic_name__( self ): # fmt: off lowerCAmelCase__ : Optional[int] = {'''input_ids''': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCAmelCase , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , ) @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( _lowercase , unittest.TestCase ): A__ = PegasusTokenizer A__ = PegasusTokenizerFast A__ = True A__ = True def __magic_name__( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ : List[Any] = PegasusTokenizer(__UpperCAmelCase , offset=0 , mask_token_sent=__UpperCAmelCase , mask_token='''[MASK]''' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __magic_name__( self ): return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' ) def __magic_name__( self , **__UpperCAmelCase ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __magic_name__( self , __UpperCAmelCase ): return ("This is a test", "This is a test") def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : int = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : str = ( '''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>''' ''' <pad> <pad> <pad>''' ) lowerCAmelCase__ : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] lowerCAmelCase__ : int = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) @require_torch def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = ['''This is going to be way too long.''' * 1000, '''short example'''] lowerCAmelCase__ : int = ['''not super long but more than 5 tokens''', '''tiny'''] lowerCAmelCase__ : Tuple = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) lowerCAmelCase__ : Tuple = self._large_tokenizer( text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask. def __magic_name__( self ): lowerCAmelCase__ : List[str] = ( '''This is an example string that is used to test the original TF implementation against the HF''' ''' implementation''' ) lowerCAmelCase__ : Union[str, Any] = self._large_tokenizer(__UpperCAmelCase ).input_ids self.assertListEqual( __UpperCAmelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
678
0
from __future__ import annotations __snake_case :List[str] =[] def lowerCamelCase_ ( lowerCAmelCase__ : list[list[int]] , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> bool: '''simple docstring''' for i in range(len(lowerCAmelCase__ ) ): if board[row][i] == 1: return False for i in range(len(lowerCAmelCase__ ) ): if board[i][column] == 1: return False for i, j in zip(range(lowerCAmelCase__ , -1 , -1 ) , range(lowerCAmelCase__ , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(lowerCAmelCase__ , -1 , -1 ) , range(lowerCAmelCase__ , len(lowerCAmelCase__ ) ) ): if board[i][j] == 1: return False return True def lowerCamelCase_ ( lowerCAmelCase__ : list[list[int]] , lowerCAmelCase__ : int ) -> bool: '''simple docstring''' if row >= len(lowerCAmelCase__ ): solution.append(lowerCAmelCase__ ) printboard(lowerCAmelCase__ ) print() return True for i in range(len(lowerCAmelCase__ ) ): if is_safe(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): A = 1 solve(lowerCAmelCase__ , row + 1 ) A = 0 return False def lowerCamelCase_ ( lowerCAmelCase__ : list[list[int]] ) -> None: '''simple docstring''' for i in range(len(lowerCAmelCase__ ) ): for j in range(len(lowerCAmelCase__ ) ): if board[i][j] == 1: print('Q' , end=' ' ) else: print('.' , end=' ' ) print() # n=int(input("The no. of queens")) __snake_case :Dict =8 __snake_case :List[str] =[[0 for i in range(n)] for j in range(n)] solve(board, 0) print('The total no. of solutions are :', len(solution))
106
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""", # See all Donut models at https://huggingface.co/models?filter=donut-swin } class _lowerCAmelCase ( _lowercase ): A__ = 'donut-swin' A__ = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[3, 6, 12, 24] , __UpperCAmelCase=7 , __UpperCAmelCase=4.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , **__UpperCAmelCase , ): super().__init__(**__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = image_size lowerCAmelCase__ : List[str] = patch_size lowerCAmelCase__ : int = num_channels lowerCAmelCase__ : Optional[Any] = embed_dim lowerCAmelCase__ : int = depths lowerCAmelCase__ : Dict = len(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = num_heads lowerCAmelCase__ : Dict = window_size lowerCAmelCase__ : str = mlp_ratio lowerCAmelCase__ : Optional[int] = qkv_bias lowerCAmelCase__ : Any = hidden_dropout_prob lowerCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob lowerCAmelCase__ : List[str] = drop_path_rate lowerCAmelCase__ : Tuple = hidden_act lowerCAmelCase__ : List[str] = use_absolute_embeddings lowerCAmelCase__ : Dict = layer_norm_eps lowerCAmelCase__ : Any = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCAmelCase__ : List[Any] = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
678
0
'''simple docstring''' import os from datetime import datetime as dt from github import Github _UpperCAmelCase : List[Any] = [ '''good first issue''', '''feature request''', '''wip''', ] def _SCREAMING_SNAKE_CASE ( ): _A = Github(os.environ['GITHUB_TOKEN'] ) _A = g.get_repo('huggingface/accelerate' ) _A = repo.get_issues(state='open' ) for issue in open_issues: _A = sorted([comment for comment in issue.get_comments()] , key=lambda __snake_case : i.created_at , reverse=__snake_case ) _A = comments[0] if len(__snake_case ) > 0 else None _A = dt.utcnow() _A = (current_time - issue.updated_at).days _A = (current_time - issue.created_at).days if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and days_since_updated > 7 and days_since_creation >= 3_0 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Close issue since it has been 7 days of inactivity since bot mention. issue.edit(state='closed' ) elif ( days_since_updated > 2_3 and days_since_creation >= 3_0 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Add stale comment issue.create_comment( 'This issue has been automatically marked as stale because it has not had ' 'recent activity. If you think this still needs to be addressed ' 'please comment on this thread.\n\nPlease note that issues that do not follow the ' '[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) ' 'are likely to be ignored.' ) if __name__ == "__main__": main()
107
lowerCAmelCase_ = { """meter""": """m""", """kilometer""": """km""", """megametre""": """Mm""", """gigametre""": """Gm""", """terametre""": """Tm""", """petametre""": """Pm""", """exametre""": """Em""", """zettametre""": """Zm""", """yottametre""": """Ym""", } # Exponent of the factor(meter) lowerCAmelCase_ = { """m""": 0, """km""": 3, """Mm""": 6, """Gm""": 9, """Tm""": 12, """Pm""": 15, """Em""": 18, """Zm""": 21, """Ym""": 24, } def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> float: lowerCAmelCase__ : List[str] = from_type.lower().strip('''s''' ) lowerCAmelCase__ : List[str] = to_type.lower().strip('''s''' ) lowerCAmelCase__ : str = UNIT_SYMBOL.get(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Any = UNIT_SYMBOL.get(UpperCamelCase , UpperCamelCase ) if from_sanitized not in METRIC_CONVERSION: lowerCAmelCase__ : Tuple = ( F"""Invalid 'from_type' value: {from_type!r}.\n""" F"""Conversion abbreviations are: {", ".join(UpperCamelCase )}""" ) raise ValueError(UpperCamelCase ) if to_sanitized not in METRIC_CONVERSION: lowerCAmelCase__ : List[Any] = ( F"""Invalid 'to_type' value: {to_type!r}.\n""" F"""Conversion abbreviations are: {", ".join(UpperCamelCase )}""" ) raise ValueError(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = METRIC_CONVERSION[from_sanitized] lowerCAmelCase__ : Optional[Any] = METRIC_CONVERSION[to_sanitized] lowerCAmelCase__ : int = 1 if from_exponent > to_exponent: lowerCAmelCase__ : List[str] = from_exponent - to_exponent else: lowerCAmelCase__ : Dict = -(to_exponent - from_exponent) return value * pow(10 , UpperCamelCase ) if __name__ == "__main__": from doctest import testmod testmod()
678
0
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> bool: _UpperCAmelCase = [int(__snake_case ) for i in ip_va_address.split(""".""" ) if i.isdigit()] return len(__snake_case ) == 4 and all(0 <= int(__snake_case ) <= 2_5_4 for octet in octets ) if __name__ == "__main__": __a: int = input().strip() __a: Any = '''valid''' if is_ip_va_address_valid(ip) else '''invalid''' print(F"{ip} is a {valid_or_invalid} IP v4 address.")
108
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _lowerCAmelCase : @staticmethod def __magic_name__( *__UpperCAmelCase , **__UpperCAmelCase ): pass @is_pipeline_test @require_vision class _lowerCAmelCase ( unittest.TestCase ): @require_torch def __magic_name__( self ): lowerCAmelCase__ : int = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , ) lowerCAmelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : List[str] = image_classifier(__UpperCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__UpperCAmelCase ) , [ [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}], [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}], ] , ) lowerCAmelCase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], ] , ) @require_tf def __magic_name__( self ): lowerCAmelCase__ : List[Any] = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' ) lowerCAmelCase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : str = image_classifier(__UpperCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , ) lowerCAmelCase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], ] , ) @slow @require_torch def __magic_name__( self ): lowerCAmelCase__ : str = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , ) # This is an image of 2 cats with remotes and no planes lowerCAmelCase__ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : str = image_classifier(__UpperCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) lowerCAmelCase__ : Tuple = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , ) @slow @require_tf def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' ) # This is an image of 2 cats with remotes and no planes lowerCAmelCase__ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : Union[str, Any] = image_classifier(__UpperCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) lowerCAmelCase__ : Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , )
678
0
'''simple docstring''' from .configuration_bert_masked import MaskedBertConfig from .modeling_bert_masked import ( MaskedBertForMultipleChoice, MaskedBertForQuestionAnswering, MaskedBertForSequenceClassification, MaskedBertForTokenClassification, MaskedBertModel, ) from .modules import *
109
lowerCAmelCase_ = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]: # Return True if there is node that has not iterated. lowerCAmelCase__ : Optional[int] = [False] * len(UpperCamelCase ) lowerCAmelCase__ : Tuple = [s] lowerCAmelCase__ : Dict = True while queue: lowerCAmelCase__ : int = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = True lowerCAmelCase__ : Optional[int] = u return visited[t] def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: lowerCAmelCase__ : Any = [-1] * (len(UpperCamelCase )) lowerCAmelCase__ : List[Any] = 0 lowerCAmelCase__ : Tuple = [] lowerCAmelCase__ : Optional[int] = [i[:] for i in graph] # Record original cut, copy. while bfs(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : List[Any] = float('''Inf''' ) lowerCAmelCase__ : Dict = sink while s != source: # Find the minimum value in select path lowerCAmelCase__ : Tuple = min(UpperCamelCase , graph[parent[s]][s] ) lowerCAmelCase__ : List[Any] = parent[s] max_flow += path_flow lowerCAmelCase__ : List[Any] = sink while v != source: lowerCAmelCase__ : Dict = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowerCAmelCase__ : Optional[Any] = parent[v] for i in range(len(UpperCamelCase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
678
0
from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class __magic_name__ : '''simple docstring''' def __init__( self:Union[str, Any] , _a:str , _a:str=13 , _a:Union[str, Any]=30 , _a:str=2 , _a:int=3 , _a:List[str]=True , _a:Optional[Any]=True , _a:Optional[Any]=32 , _a:List[Any]=2 , _a:Dict=4 , _a:str=37 , _a:str="gelu" , _a:Tuple=0.1 , _a:Union[str, Any]=0.1 , _a:str=10 , _a:Optional[int]=0.02 , _a:Any=3 , _a:Optional[int]=None , _a:str=2 , ): snake_case__ = parent snake_case__ = batch_size snake_case__ = image_size snake_case__ = patch_size snake_case__ = num_channels snake_case__ = is_training snake_case__ = use_labels snake_case__ = hidden_size snake_case__ = num_hidden_layers snake_case__ = num_attention_heads snake_case__ = intermediate_size snake_case__ = hidden_act snake_case__ = hidden_dropout_prob snake_case__ = attention_probs_dropout_prob snake_case__ = type_sequence_label_size snake_case__ = initializer_range snake_case__ = scope snake_case__ = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) snake_case__ = (image_size // patch_size) ** 2 snake_case__ = num_patches + 2 def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case__ = None if self.use_labels: snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case__ = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self:List[str] ): return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def SCREAMING_SNAKE_CASE__ ( self:int , _a:Any , _a:List[str] , _a:Union[str, Any] ): snake_case__ = TFDeiTModel(config=__UpperCAmelCase ) snake_case__ = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:List[str] , _a:List[str] , _a:List[str] ): snake_case__ = TFDeiTForMaskedImageModeling(config=__UpperCAmelCase ) snake_case__ = model(__UpperCAmelCase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images snake_case__ = 1 snake_case__ = TFDeiTForMaskedImageModeling(__UpperCAmelCase ) snake_case__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case__ = model(__UpperCAmelCase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def SCREAMING_SNAKE_CASE__ ( self:int , _a:Optional[int] , _a:List[Any] , _a:Any ): snake_case__ = self.type_sequence_label_size snake_case__ = TFDeiTForImageClassification(__UpperCAmelCase ) snake_case__ = model(__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images snake_case__ = 1 snake_case__ = TFDeiTForImageClassification(__UpperCAmelCase ) snake_case__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case__ = model(__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE__ ( self:Any ): snake_case__ = self.prepare_config_and_inputs() snake_case__ = config_and_inputs snake_case__ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class __magic_name__ (_lowercase ,_lowercase ,unittest.TestCase ): '''simple docstring''' __lowercase : int = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) __lowercase : Optional[Any] = ( { 'feature-extraction': TFDeiTModel, 'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) __lowercase : Optional[int] = False __lowercase : Dict = False __lowercase : Any = False __lowercase : List[Any] = False def SCREAMING_SNAKE_CASE__ ( self:Dict ): snake_case__ = TFDeiTModelTester(self ) snake_case__ = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 ) def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): self.config_tester.run_common_tests() @unittest.skip(reason='''DeiT does not use inputs_embeds''' ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): pass def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ = model_class(__UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) snake_case__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCAmelCase , tf.keras.layers.Dense ) ) def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ = model_class(__UpperCAmelCase ) snake_case__ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case__ = [*signature.parameters.keys()] snake_case__ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self:int ): snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:List[str] , _a:Union[str, Any] , _a:Dict=False ): snake_case__ = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def SCREAMING_SNAKE_CASE__ ( self:int ): for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case__ = TFDeiTModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( ) -> List[Any]: snake_case__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class __magic_name__ (unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): return ( DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ ( self:int ): snake_case__ = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) snake_case__ = self.default_image_processor snake_case__ = prepare_img() snake_case__ = image_processor(images=__UpperCAmelCase , return_tensors='''tf''' ) # forward pass snake_case__ = model(**__UpperCAmelCase ) # verify the logits snake_case__ = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) snake_case__ = tf.constant([-1.0266, 0.1912, -1.2861] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) )
33
import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, require_torch_or_tf, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class _lowerCAmelCase ( unittest.TestCase ): A__ = MODEL_FOR_CAUSAL_LM_MAPPING A__ = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def __magic_name__( self ): lowerCAmelCase__ : Tuple = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' ) # Using `do_sample=False` to force deterministic output lowerCAmelCase__ : Optional[int] = text_generator('''This is a test''' , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ] , ) lowerCAmelCase__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] ) self.assertEqual( __UpperCAmelCase , [ [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ], [ { '''generated_text''': ( '''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy''' ''' oscope. oscope. FiliFili@@''' ) } ], ] , ) lowerCAmelCase__ : str = text_generator('''This is a test''' , do_sample=__UpperCAmelCase , num_return_sequences=2 , return_tensors=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ {'''generated_token_ids''': ANY(__UpperCAmelCase )}, {'''generated_token_ids''': ANY(__UpperCAmelCase )}, ] , ) lowerCAmelCase__ : List[Any] = text_generator.model.config.eos_token_id lowerCAmelCase__ : List[Any] = '''<pad>''' lowerCAmelCase__ : List[Any] = text_generator( ['''This is a test''', '''This is a second test'''] , do_sample=__UpperCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__UpperCAmelCase , ) self.assertEqual( __UpperCAmelCase , [ [ {'''generated_token_ids''': ANY(__UpperCAmelCase )}, {'''generated_token_ids''': ANY(__UpperCAmelCase )}, ], [ {'''generated_token_ids''': ANY(__UpperCAmelCase )}, {'''generated_token_ids''': ANY(__UpperCAmelCase )}, ], ] , ) @require_tf def __magic_name__( self ): lowerCAmelCase__ : int = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' ) # Using `do_sample=False` to force deterministic output lowerCAmelCase__ : List[Any] = text_generator('''This is a test''' , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ] , ) lowerCAmelCase__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ], [ { '''generated_text''': ( '''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes''' ''' Cannes 閲閲Cannes Cannes Cannes 攵 please,''' ) } ], ] , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Dict = TextGenerationPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) return text_generator, ["This is a test", "Another test"] def __magic_name__( self ): lowerCAmelCase__ : Any = '''Hello I believe in''' lowerCAmelCase__ : List[Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) lowerCAmelCase__ : Optional[int] = text_generator(__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , ) lowerCAmelCase__ : List[str] = text_generator(__UpperCAmelCase , stop_sequence=''' fe''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe'''}] ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = text_generator.model lowerCAmelCase__ : Optional[int] = text_generator.tokenizer lowerCAmelCase__ : Tuple = text_generator('''This is a test''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) lowerCAmelCase__ : Optional[int] = text_generator('''This is a test''' , return_full_text=__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) lowerCAmelCase__ : Dict = pipeline(task='''text-generation''' , model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , return_full_text=__UpperCAmelCase ) lowerCAmelCase__ : Dict = text_generator('''This is a test''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) lowerCAmelCase__ : List[str] = text_generator('''This is a test''' , return_full_text=__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) lowerCAmelCase__ : Optional[int] = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], ] , ) if text_generator.tokenizer.pad_token is not None: lowerCAmelCase__ : List[str] = text_generator( ['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], ] , ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Any = text_generator('''test''' , return_full_text=__UpperCAmelCase , return_text=__UpperCAmelCase ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = text_generator('''test''' , return_full_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : str = text_generator('''test''' , return_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase ) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): lowerCAmelCase__ : str = text_generator('''''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) else: with self.assertRaises((ValueError, AssertionError) ): lowerCAmelCase__ : List[str] = text_generator('''''' ) if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. lowerCAmelCase__ : Optional[Any] = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM'''] if ( tokenizer.model_max_length < 1_0000 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ): text_generator('''This is a test''' * 500 , max_new_tokens=20 ) lowerCAmelCase__ : Optional[Any] = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 ) # Hole strategy cannot work with self.assertRaises(__UpperCAmelCase ): text_generator( '''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , ) @require_torch @require_accelerate @require_torch_gpu def __magic_name__( self ): import torch # Classic `model_kwargs` lowerCAmelCase__ : List[str] = pipeline( model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowerCAmelCase__ : Any = pipe('''This is a test''' ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) lowerCAmelCase__ : Dict = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowerCAmelCase__ : Union[str, Any] = pipe('''This is a test''' ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 lowerCAmelCase__ : str = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa ) lowerCAmelCase__ : Any = pipe('''This is a test''' ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) @require_torch @require_torch_gpu def __magic_name__( self ): import torch lowerCAmelCase__ : List[str] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa ) pipe('''This is a test''' ) @require_torch @require_accelerate @require_torch_gpu def __magic_name__( self ): import torch lowerCAmelCase__ : Any = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa ) pipe('''This is a test''' , do_sample=__UpperCAmelCase , top_p=0.5 ) def __magic_name__( self ): lowerCAmelCase__ : int = '''Hello world''' lowerCAmelCase__ : Union[str, Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) if text_generator.model.framework == "tf": lowerCAmelCase__ : List[Any] = logging.get_logger('''transformers.generation.tf_utils''' ) else: lowerCAmelCase__ : Dict = logging.get_logger('''transformers.generation.utils''' ) lowerCAmelCase__ : Optional[Any] = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(__UpperCAmelCase ) as cl: lowerCAmelCase__ : List[str] = text_generator(__UpperCAmelCase , max_length=10 , max_new_tokens=1 ) self.assertIn(__UpperCAmelCase , cl.out ) # The user only sets one -> no warning with CaptureLogger(__UpperCAmelCase ) as cl: lowerCAmelCase__ : Any = text_generator(__UpperCAmelCase , max_new_tokens=1 ) self.assertNotIn(__UpperCAmelCase , cl.out ) with CaptureLogger(__UpperCAmelCase ) as cl: lowerCAmelCase__ : Union[str, Any] = text_generator(__UpperCAmelCase , max_length=10 ) self.assertNotIn(__UpperCAmelCase , cl.out )
678
0
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase__ = logging.get_logger(__name__) def _UpperCamelCase (a__ :List[Any] ): """simple docstring""" UpperCamelCase__ = DPTConfig() if "large" in checkpoint_url: UpperCamelCase__ = 1024 UpperCamelCase__ = 4096 UpperCamelCase__ = 24 UpperCamelCase__ = 16 UpperCamelCase__ = [5, 11, 17, 23] UpperCamelCase__ = [256, 512, 1024, 1024] UpperCamelCase__ = (1, 384, 384) if "ade" in checkpoint_url: UpperCamelCase__ = True UpperCamelCase__ = 150 UpperCamelCase__ = '''huggingface/label-files''' UpperCamelCase__ = '''ade20k-id2label.json''' UpperCamelCase__ = json.load(open(cached_download(hf_hub_url(a__ , a__ , repo_type="""dataset""" ) ) , """r""" ) ) UpperCamelCase__ = {int(a__ ): v for k, v in idalabel.items()} UpperCamelCase__ = idalabel UpperCamelCase__ = {v: k for k, v in idalabel.items()} UpperCamelCase__ = [1, 150, 480, 480] return config, expected_shape def _UpperCamelCase (a__ :int ): """simple docstring""" UpperCamelCase__ = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias'''] for k in ignore_keys: state_dict.pop(a__ , a__ ) def _UpperCamelCase (a__ :List[Any] ): """simple docstring""" if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): UpperCamelCase__ = name.replace("""pretrained.model""" , """dpt.encoder""" ) if "pretrained.model" in name: UpperCamelCase__ = name.replace("""pretrained.model""" , """dpt.embeddings""" ) if "patch_embed" in name: UpperCamelCase__ = name.replace("""patch_embed""" , """patch_embeddings""" ) if "pos_embed" in name: UpperCamelCase__ = name.replace("""pos_embed""" , """position_embeddings""" ) if "attn.proj" in name: UpperCamelCase__ = name.replace("""attn.proj""" , """attention.output.dense""" ) if "proj" in name and "project" not in name: UpperCamelCase__ = name.replace("""proj""" , """projection""" ) if "blocks" in name: UpperCamelCase__ = name.replace("""blocks""" , """layer""" ) if "mlp.fc1" in name: UpperCamelCase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: UpperCamelCase__ = name.replace("""mlp.fc2""" , """output.dense""" ) if "norm1" in name: UpperCamelCase__ = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: UpperCamelCase__ = name.replace("""norm2""" , """layernorm_after""" ) if "scratch.output_conv" in name: UpperCamelCase__ = name.replace("""scratch.output_conv""" , """head""" ) if "scratch" in name: UpperCamelCase__ = name.replace("""scratch""" , """neck""" ) if "layer1_rn" in name: UpperCamelCase__ = name.replace("""layer1_rn""" , """convs.0""" ) if "layer2_rn" in name: UpperCamelCase__ = name.replace("""layer2_rn""" , """convs.1""" ) if "layer3_rn" in name: UpperCamelCase__ = name.replace("""layer3_rn""" , """convs.2""" ) if "layer4_rn" in name: UpperCamelCase__ = name.replace("""layer4_rn""" , """convs.3""" ) if "refinenet" in name: UpperCamelCase__ = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 UpperCamelCase__ = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" ) if "out_conv" in name: UpperCamelCase__ = name.replace("""out_conv""" , """projection""" ) if "resConfUnit1" in name: UpperCamelCase__ = name.replace("""resConfUnit1""" , """residual_layer1""" ) if "resConfUnit2" in name: UpperCamelCase__ = name.replace("""resConfUnit2""" , """residual_layer2""" ) if "conv1" in name: UpperCamelCase__ = name.replace("""conv1""" , """convolution1""" ) if "conv2" in name: UpperCamelCase__ = name.replace("""conv2""" , """convolution2""" ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: UpperCamelCase__ = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" ) if "pretrained.act_postprocess2.0.project.0" in name: UpperCamelCase__ = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" ) if "pretrained.act_postprocess3.0.project.0" in name: UpperCamelCase__ = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" ) if "pretrained.act_postprocess4.0.project.0" in name: UpperCamelCase__ = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" ) # resize blocks if "pretrained.act_postprocess1.3" in name: UpperCamelCase__ = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" ) if "pretrained.act_postprocess1.4" in name: UpperCamelCase__ = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" ) if "pretrained.act_postprocess2.3" in name: UpperCamelCase__ = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" ) if "pretrained.act_postprocess2.4" in name: UpperCamelCase__ = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" ) if "pretrained.act_postprocess3.3" in name: UpperCamelCase__ = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" ) if "pretrained.act_postprocess4.3" in name: UpperCamelCase__ = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" ) if "pretrained.act_postprocess4.4" in name: UpperCamelCase__ = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" ) if "pretrained" in name: UpperCamelCase__ = name.replace("""pretrained""" , """dpt""" ) if "bn" in name: UpperCamelCase__ = name.replace("""bn""" , """batch_norm""" ) if "head" in name: UpperCamelCase__ = name.replace("""head""" , """head.head""" ) if "encoder.norm" in name: UpperCamelCase__ = name.replace("""encoder.norm""" , """layernorm""" ) if "auxlayer" in name: UpperCamelCase__ = name.replace("""auxlayer""" , """auxiliary_head.head""" ) return name def _UpperCamelCase (a__ :Optional[int] , a__ :Union[str, Any] ): """simple docstring""" for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCamelCase__ = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" ) UpperCamelCase__ = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase__ = in_proj_weight[: config.hidden_size, :] UpperCamelCase__ = in_proj_bias[: config.hidden_size] UpperCamelCase__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCamelCase__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCamelCase__ = in_proj_weight[ -config.hidden_size :, : ] UpperCamelCase__ = in_proj_bias[-config.hidden_size :] def _UpperCamelCase (): """simple docstring""" UpperCamelCase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCamelCase__ = Image.open(requests.get(a__ , stream=a__ ).raw ) return im @torch.no_grad() def _UpperCamelCase (a__ :Tuple , a__ :List[Any] , a__ :int , a__ :Optional[int] ): """simple docstring""" UpperCamelCase__ = get_dpt_config(a__ ) # load original state_dict from URL UpperCamelCase__ = torch.hub.load_state_dict_from_url(a__ , map_location="""cpu""" ) # remove certain keys remove_ignore_keys_(a__ ) # rename keys for key in state_dict.copy().keys(): UpperCamelCase__ = state_dict.pop(a__ ) UpperCamelCase__ = val # read in qkv matrices read_in_q_k_v(a__ , a__ ) # load HuggingFace model UpperCamelCase__ = DPTForSemanticSegmentation(a__ ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(a__ ) model.load_state_dict(a__ ) model.eval() # Check outputs on an image UpperCamelCase__ = 480 if '''ade''' in checkpoint_url else 384 UpperCamelCase__ = DPTImageProcessor(size=a__ ) UpperCamelCase__ = prepare_img() UpperCamelCase__ = image_processor(a__ , return_tensors="""pt""" ) # forward pass UpperCamelCase__ = model(**a__ ).logits if '''ade''' in checkpoint_url else model(**a__ ).predicted_depth # Assert logits UpperCamelCase__ = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] ) if "ade" in checkpoint_url: UpperCamelCase__ = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] ) assert outputs.shape == torch.Size(a__ ) assert ( torch.allclose(outputs[0, 0, :3, :3] , a__ , atol=1e-4 ) if "ade" in checkpoint_url else torch.allclose(outputs[0, :3, :3] , a__ ) ) Path(a__ ).mkdir(exist_ok=a__ ) print(f"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(a__ ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(a__ ) if push_to_hub: print("""Pushing model to hub...""" ) model.push_to_hub( repo_path_or_name=Path(a__ , a__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=a__ , ) image_processor.push_to_hub( repo_path_or_name=Path(a__ , a__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=a__ , ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt", type=str, help="URL of the original DPT checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", action="store_true", ) parser.add_argument( "--model_name", default="dpt-large", type=str, help="Name of the model, in case you're pushing to the hub.", ) UpperCamelCase__ = parser.parse_args() convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
619
def __lowerCAmelCase ( UpperCamelCase ) -> str: return "".join([hex(UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase )] ) def __lowerCAmelCase ( UpperCamelCase ) -> bytes: # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(UpperCamelCase ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(UpperCamelCase ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCamelCase ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
678
0
import jax.numpy as jnp from ...utils import logging from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel from .configuration_mta import MTaConfig lowercase_ : List[Any] = logging.get_logger(__name__) lowercase_ : int = 'T5Config' def A__ ( snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ): SCREAMING_SNAKE_CASE__: Union[str, Any]= jnp.zeros_like(snake_case_ ) SCREAMING_SNAKE_CASE__: Dict= shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] ) SCREAMING_SNAKE_CASE__: Optional[int]= shifted_input_ids.at[:, 0].set(snake_case_ ) SCREAMING_SNAKE_CASE__: Tuple= jnp.where(shifted_input_ids == -100 , snake_case_ , snake_case_ ) return shifted_input_ids class _lowerCamelCase ( _lowercase ): __a = "mt5" __a = MTaConfig class _lowerCamelCase ( _lowercase ): __a = "mt5" __a = MTaConfig class _lowerCamelCase ( _lowercase ): __a = "mt5" __a = MTaConfig
64
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class _lowerCAmelCase ( _lowercase ): A__ = (DPMSolverSDEScheduler,) A__ = 10 def __magic_name__( self , **__UpperCAmelCase ): lowerCAmelCase__ : Dict = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''noise_sampler_seed''': 0, } config.update(**__UpperCAmelCase ) return config def __magic_name__( self ): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=__UpperCAmelCase ) def __magic_name__( self ): for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=__UpperCAmelCase , beta_end=__UpperCAmelCase ) def __magic_name__( self ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__UpperCAmelCase ) def __magic_name__( self ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[Any] = self.scheduler_classes[0] lowerCAmelCase__ : str = self.get_scheduler_config() lowerCAmelCase__ : Optional[Any] = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase__ : Union[str, Any] = self.dummy_model() lowerCAmelCase__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase__ : Optional[Any] = sample.to(__UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : int = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[str] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[str] = output.prev_sample lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2 assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2 assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def __magic_name__( self ): lowerCAmelCase__ : Dict = self.scheduler_classes[0] lowerCAmelCase__ : Tuple = self.get_scheduler_config(prediction_type='''v_prediction''' ) lowerCAmelCase__ : Tuple = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase__ : Optional[Any] = self.dummy_model() lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase__ : Tuple = sample.to(__UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase__ : List[str] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : int = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : int = output.prev_sample lowerCAmelCase__ : Any = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2 assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2 assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3 else: assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2 assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3 def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0] lowerCAmelCase__ : Tuple = self.get_scheduler_config() lowerCAmelCase__ : Tuple = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase ) lowerCAmelCase__ : Dict = self.dummy_model() lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter.to(__UpperCAmelCase ) * scheduler.init_noise_sigma for t in scheduler.timesteps: lowerCAmelCase__ : List[str] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Tuple = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = output.prev_sample lowerCAmelCase__ : Dict = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Dict = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2 assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2 assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = self.scheduler_classes[0] lowerCAmelCase__ : Dict = self.get_scheduler_config() lowerCAmelCase__ : Optional[int] = scheduler_class(**__UpperCAmelCase , use_karras_sigmas=__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = self.dummy_model() lowerCAmelCase__ : int = self.dummy_sample_deter.to(__UpperCAmelCase ) * scheduler.init_noise_sigma lowerCAmelCase__ : Union[str, Any] = sample.to(__UpperCAmelCase ) for t in scheduler.timesteps: lowerCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Tuple = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Dict = output.prev_sample lowerCAmelCase__ : int = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 else: assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
678
0
"""simple docstring""" import argparse import json import subprocess def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = [] __lowerCAmelCase = ( f"curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\"" ''' https://api.github.com/repos/huggingface/transformers/actions/runners''' ) __lowerCAmelCase = subprocess.run(_UpperCamelCase , shell=_UpperCamelCase , stdout=subprocess.PIPE ) __lowerCAmelCase = output.stdout.decode("utf-8" ) __lowerCAmelCase = json.loads(_UpperCamelCase ) __lowerCAmelCase = status['''runners'''] for runner in runners: if runner["name"] in target_runners: if runner["status"] == "offline": offline_runners.append(_UpperCamelCase ) # save the result so we can report them on Slack with open("offline_runners.txt" , "w" ) as fp: fp.write(json.dumps(_UpperCamelCase ) ) if len(_UpperCamelCase ) > 0: __lowerCAmelCase = '''\n'''.join([x["name"] for x in offline_runners] ) raise ValueError(f"The following runners are offline:\n{failed}" ) if __name__ == "__main__": def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' return values.split("," ) A : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( "--target_runners", default=None, type=list_str, required=True, help="Comma-separated list of runners to check status.", ) parser.add_argument( "--token", default=None, type=str, required=True, help="A token that has actions:read permission." ) A : str = parser.parse_args() get_runner_status(args.target_runners, args.token)
636
import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class _lowerCAmelCase ( unittest.TestCase ): def __magic_name__( self , __UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = 3 lowerCAmelCase__ : Tuple = 250 lowerCAmelCase__ : List[Any] = ids_tensor((batch_size, length) , __UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = torch.ones((batch_size, length) , device=__UpperCAmelCase , dtype=torch.float ) / length return input_ids, scores def __magic_name__( self ): lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._get_tensors(5 ) lowerCAmelCase__ : List[str] = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._get_tensors(9 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._get_tensors(10 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : Tuple = MaxLengthCriteria(max_length=10 ) lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self._get_tensors(5 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._get_tensors(9 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(10 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(5 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(9 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._get_tensors(10 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ : int = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 10 ) def __magic_name__( self ): lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._get_tensors(5 ) lowerCAmelCase__ : Any = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ : int = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 ) with self.assertWarns(__UpperCAmelCase ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 ) lowerCAmelCase__ : List[str] = validate_stopping_criteria(StoppingCriteriaList() , 11 ) self.assertEqual(len(__UpperCAmelCase ) , 1 )
678
0
"""simple docstring""" import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def lowercase__ ( snake_case_ :Union[str, Any] ): def wrapper(*snake_case_ :int , **snake_case_ :List[Any] ): __UpperCAmelCase = timeit.default_timer() __UpperCAmelCase = func(*snake_case_ , **snake_case_ ) __UpperCAmelCase = timeit.default_timer() - starttime return delta __UpperCAmelCase = func.__name__ return wrapper def lowercase__ ( snake_case_ :Any , snake_case_ :Any=100 , snake_case_ :int=None ): __UpperCAmelCase = [] __UpperCAmelCase = seq_shapes or {} for i in range(snake_case_ ): __UpperCAmelCase = {} for col_id, (k, v) in enumerate(features.items() ): if isinstance(snake_case_ , _ArrayXD ): __UpperCAmelCase = np.random.rand(*v.shape ).astype(v.dtype ) elif isinstance(snake_case_ , datasets.Value ): if v.dtype == "string": __UpperCAmelCase = '''The small grey turtle was surprisingly fast when challenged.''' else: __UpperCAmelCase = np.random.randint(10 , size=1 ).astype(v.dtype ).item() elif isinstance(snake_case_ , datasets.Sequence ): while isinstance(snake_case_ , datasets.Sequence ): __UpperCAmelCase = v.feature __UpperCAmelCase = seq_shapes[k] __UpperCAmelCase = np.random.rand(*snake_case_ ).astype(v.dtype ) __UpperCAmelCase = data dummy_data.append((i, example) ) return dummy_data def lowercase__ ( snake_case_ :Any , snake_case_ :Dict , snake_case_ :Optional[Any]=100 , snake_case_ :List[Any]=None ): __UpperCAmelCase = generate_examples(snake_case_ , num_examples=snake_case_ , seq_shapes=snake_case_ ) with ArrowWriter(features=snake_case_ , path=snake_case_ ) as writer: for key, record in dummy_data: __UpperCAmelCase = features.encode_example(snake_case_ ) writer.write(snake_case_ ) __UpperCAmelCase = writer.finalize() if not num_final_examples == num_examples: raise ValueError( F'''Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.''' ) __UpperCAmelCase = datasets.Dataset.from_file(filename=snake_case_ , info=datasets.DatasetInfo(features=snake_case_ ) ) return dataset
49
from functools import reduce lowerCAmelCase_ = ( """73167176531330624919225119674426574742355349194934""" """96983520312774506326239578318016984801869478851843""" """85861560789112949495459501737958331952853208805511""" """12540698747158523863050715693290963295227443043557""" """66896648950445244523161731856403098711121722383113""" """62229893423380308135336276614282806444486645238749""" """30358907296290491560440772390713810515859307960866""" """70172427121883998797908792274921901699720888093776""" """65727333001053367881220235421809751254540594752243""" """52584907711670556013604839586446706324415722155397""" """53697817977846174064955149290862569321978468622482""" """83972241375657056057490261407972968652414535100474""" """82166370484403199890008895243450658541227588666881""" """16427171479924442928230863465674813919123162824586""" """17866458359124566529476545682848912883142607690042""" """24219022671055626321111109370544217506941658960408""" """07198403850962455444362981230987879927244284909188""" """84580156166097919133875499200524063689912560717606""" """05886116467109405077541002256983155200055935729725""" """71636269561882670428252483600823257530420752963450""" ) def __lowerCAmelCase ( UpperCamelCase = N ) -> int: return max( # mypy cannot properly interpret reduce int(reduce(lambda UpperCamelCase , UpperCamelCase : str(int(UpperCamelCase ) * int(UpperCamelCase ) ) , n[i : i + 13] ) ) for i in range(len(UpperCamelCase ) - 12 ) ) if __name__ == "__main__": print(F"""{solution() = }""")
678
0
'''simple docstring''' import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration _a : Dict = [ # tf -> hf ('/', '.'), ('layer_', 'layers.'), ('kernel', 'weight'), ('beta', 'bias'), ('gamma', 'weight'), ('pegasus', 'model'), ] _a : Optional[int] = [ ('.output.dense', '.fc2'), ('intermediate.LayerNorm', 'final_layer_norm'), ('intermediate.dense', 'fc1'), ] _a : int = ( INIT_COMMON + [ ('attention.self.LayerNorm', 'self_attn_layer_norm'), ('attention.output.dense', 'self_attn.out_proj'), ('attention.self', 'self_attn'), ('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'), ('attention.encdec_output.dense', 'encoder_attn.out_proj'), ('attention.encdec', 'encoder_attn'), ('key', 'k_proj'), ('value', 'v_proj'), ('query', 'q_proj'), ('decoder.LayerNorm', 'decoder.layernorm_embedding'), ] + END_COMMON ) _a : int = ( INIT_COMMON + [ ('embeddings.word_embeddings', 'shared.weight'), ('embeddings.position_embeddings', 'embed_positions.weight'), ('attention.self.LayerNorm', 'self_attn_layer_norm'), ('attention.output.dense', 'self_attn.output'), ('attention.self', 'self_attn.self'), ('encoder.LayerNorm', 'encoder.layernorm_embedding'), ] + END_COMMON ) _a : Any = [ 'encdec/key/bias', 'encdec/query/bias', 'encdec/value/bias', 'self/key/bias', 'self/query/bias', 'self/value/bias', 'encdec_output/dense/bias', 'attention/output/dense/bias', ] def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str ): for tf_name, hf_name in patterns: UpperCAmelCase = k.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return k def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict ): UpperCAmelCase = BigBirdPegasusConfig(**SCREAMING_SNAKE_CASE ) UpperCAmelCase = BigBirdPegasusForConditionalGeneration(SCREAMING_SNAKE_CASE ) UpperCAmelCase = torch_model.state_dict() UpperCAmelCase = {} # separating decoder weights UpperCAmelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )} UpperCAmelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )} for k, v in tqdm(decoder_weights.items() , 'tf -> hf conversion' ): UpperCAmelCase = [k.endswith(SCREAMING_SNAKE_CASE ) for ending in KEYS_TO_IGNORE] if any(SCREAMING_SNAKE_CASE ): continue UpperCAmelCase = DECODER_PATTERNS UpperCAmelCase = rename_state_dict_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if new_k not in state_dict: raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' ) if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ): UpperCAmelCase = v.T UpperCAmelCase = torch.from_numpy(SCREAMING_SNAKE_CASE ) assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}''' for k, v in tqdm(remaining_weights.items() , 'tf -> hf conversion' ): UpperCAmelCase = [k.endswith(SCREAMING_SNAKE_CASE ) for ending in KEYS_TO_IGNORE] if any(SCREAMING_SNAKE_CASE ): continue UpperCAmelCase = REMAINING_PATTERNS UpperCAmelCase = rename_state_dict_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' ) if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ): UpperCAmelCase = v.T UpperCAmelCase = torch.from_numpy(SCREAMING_SNAKE_CASE ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}''' UpperCAmelCase = mapping['''model.embed_positions.weight'''] UpperCAmelCase = mapping.pop('model.embed_positions.weight' ) UpperCAmelCase = torch_model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE ) UpperCAmelCase = [ k for k in missing if k not in [ '''final_logits_bias''', '''model.encoder.embed_tokens.weight''', '''model.decoder.embed_tokens.weight''', '''lm_head.weight''', ] ] assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}''' assert extra == [], f'''no matches found for the following tf keys {extra}''' return torch_model def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : int ): UpperCAmelCase = tf.train.list_variables(SCREAMING_SNAKE_CASE ) UpperCAmelCase = {} UpperCAmelCase = ['''global_step'''] for name, shape in tqdm(SCREAMING_SNAKE_CASE , desc='converting tf checkpoint to dict' ): UpperCAmelCase = any(pat in name for pat in ignore_name ) if skip_key: continue UpperCAmelCase = tf.train.load_variable(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) UpperCAmelCase = array return tf_weights def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict ): UpperCAmelCase = get_tf_weights_as_numpy(SCREAMING_SNAKE_CASE ) UpperCAmelCase = convert_bigbird_pegasus(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) torch_model.save_pretrained(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": _a : int = argparse.ArgumentParser() parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables') parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.') _a : List[Any] = parser.parse_args() _a : Union[str, Any] = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
447
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging lowerCAmelCase_ = logging.get_logger(__name__) def __lowerCAmelCase ( UpperCamelCase ) -> List[str]: lowerCAmelCase__ : int = R'''\w+[.]\d+''' lowerCAmelCase__ : Tuple = re.findall(UpperCamelCase , UpperCamelCase ) for pat in pats: lowerCAmelCase__ : List[str] = key.replace(UpperCamelCase , '''_'''.join(pat.split('''.''' ) ) ) return key def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: lowerCAmelCase__ : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',) if ( any('''norm''' in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowerCAmelCase__ : List[str] = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''embedding''',) return renamed_pt_tuple_key, pt_tensor # conv layer lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowerCAmelCase__ : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCAmelCase__ : List[Any] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight": lowerCAmelCase__ : str = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCAmelCase__ : Any = pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCAmelCase__ : List[Any] = pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=42 ) -> Any: # Step 1: Convert pytorch tensor to numpy lowerCAmelCase__ : Optional[Any] = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowerCAmelCase__ : Tuple = flax_model.init_weights(PRNGKey(UpperCamelCase ) ) lowerCAmelCase__ : Any = flatten_dict(UpperCamelCase ) lowerCAmelCase__ : List[Any] = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCAmelCase__ : str = rename_key(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = tuple(renamed_pt_key.split('''.''' ) ) # Correctly rename weight parameters lowerCAmelCase__ , lowerCAmelCase__ : List[str] = rename_key_and_reshape_tensor(UpperCamelCase , UpperCamelCase , UpperCamelCase ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """ F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) # also add unexpected weight so that warning is thrown lowerCAmelCase__ : List[str] = jnp.asarray(UpperCamelCase ) return unflatten_dict(UpperCamelCase )
678
0
'''simple docstring''' import sys SCREAMING_SNAKE_CASE_: Union[str, Any] =( '73167176531330624919225119674426574742355349194934' '96983520312774506326239578318016984801869478851843' '85861560789112949495459501737958331952853208805511' '12540698747158523863050715693290963295227443043557' '66896648950445244523161731856403098711121722383113' '62229893423380308135336276614282806444486645238749' '30358907296290491560440772390713810515859307960866' '70172427121883998797908792274921901699720888093776' '65727333001053367881220235421809751254540594752243' '52584907711670556013604839586446706324415722155397' '53697817977846174064955149290862569321978468622482' '83972241375657056057490261407972968652414535100474' '82166370484403199890008895243450658541227588666881' '16427171479924442928230863465674813919123162824586' '17866458359124566529476545682848912883142607690042' '24219022671055626321111109370544217506941658960408' '07198403850962455444362981230987879927244284909188' '84580156166097919133875499200524063689912560717606' '05886116467109405077541002256983155200055935729725' '71636269561882670428252483600823257530420752963450' ) def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> int: '''simple docstring''' UpperCAmelCase_ = 1 for digit in s: product *= int(snake_case_ ) return product def lowerCAmelCase_ ( snake_case_ : int = N ) -> int: '''simple docstring''' UpperCAmelCase_ = -sys.maxsize - 1 UpperCAmelCase_ = n[:13] UpperCAmelCase_ = 13 while cur_index < len(snake_case_ ) - 13: if int(n[cur_index] ) >= int(substr[0] ): UpperCAmelCase_ = substr[1:] + n[cur_index] cur_index += 1 else: UpperCAmelCase_ = max(snake_case_ , str_eval(snake_case_ ) ) UpperCAmelCase_ = n[cur_index : cur_index + 13] cur_index += 13 return largest_product if __name__ == "__main__": print(f"{solution() = }")
78
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase_ = { """configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ """NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""", """NezhaForNextSentencePrediction""", """NezhaForMaskedLM""", """NezhaForPreTraining""", """NezhaForMultipleChoice""", """NezhaForQuestionAnswering""", """NezhaForSequenceClassification""", """NezhaForTokenClassification""", """NezhaModel""", """NezhaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
678
0
from __future__ import annotations class __lowerCAmelCase : """simple docstring""" def __init__( self : List[str] , _lowerCAmelCase : List[str] ) -> Optional[int]: """simple docstring""" snake_case_ = data snake_case_ = None snake_case_ = None def _lowerCAmelCase ( lowerCAmelCase_ :Optional[int] )->None: # In Order traversal of the tree '''simple docstring''' if tree: display(tree.left ) print(tree.data ) display(tree.right ) def _lowerCAmelCase ( lowerCAmelCase_ :Tuple )->int: '''simple docstring''' return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0 def _lowerCAmelCase ( lowerCAmelCase_ :List[Any] )->bool: '''simple docstring''' if not tree: return True if tree.left and tree.right: return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right ) else: return not tree.left and not tree.right def _lowerCAmelCase ( )->None: # Main function for testing. '''simple docstring''' snake_case_ = Node(1 ) snake_case_ = Node(2 ) snake_case_ = Node(3 ) snake_case_ = Node(4 ) snake_case_ = Node(5 ) snake_case_ = Node(6 ) snake_case_ = Node(7 ) snake_case_ = Node(8 ) snake_case_ = Node(9 ) print(is_full_binary_tree(lowerCAmelCase_ ) ) print(depth_of_tree(lowerCAmelCase_ ) ) print("Tree is: " ) display(lowerCAmelCase_ ) if __name__ == "__main__": main()
283
import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel lowerCAmelCase_ = """0.12""" # assumed parallelism: 8 @require_flax @is_staging_test class _lowerCAmelCase ( unittest.TestCase ): @classmethod def __magic_name__( cls ): lowerCAmelCase__ : Dict = TOKEN HfFolder.save_token(__UpperCAmelCase ) @classmethod def __magic_name__( cls ): try: delete_repo(token=cls._token , repo_id='''test-model-flax''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' ) except HTTPError: pass def __magic_name__( self ): lowerCAmelCase__ : List[Any] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) lowerCAmelCase__ : List[Any] = FlaxBertModel(__UpperCAmelCase ) model.push_to_hub('''test-model-flax''' , use_auth_token=self._token ) lowerCAmelCase__ : List[Any] = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" ) lowerCAmelCase__ : Union[str, Any] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : List[Any] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : str = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id='''test-model-flax''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(__UpperCAmelCase , repo_id='''test-model-flax''' , push_to_hub=__UpperCAmelCase , use_auth_token=self._token ) lowerCAmelCase__ : Tuple = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" ) lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : Optional[int] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) lowerCAmelCase__ : Optional[Any] = FlaxBertModel(__UpperCAmelCase ) model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token ) lowerCAmelCase__ : str = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : Dict = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : Tuple = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( __UpperCAmelCase , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=__UpperCAmelCase , use_auth_token=self._token ) lowerCAmelCase__ : Union[str, Any] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) lowerCAmelCase__ : Optional[int] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : Tuple = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCAmelCase__ : Any = True lowerCAmelCase__ : Any = flatten_dict(modela.params ) lowerCAmelCase__ : List[str] = flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4: lowerCAmelCase__ : Optional[Any] = False return models_are_equal @require_flax class _lowerCAmelCase ( unittest.TestCase ): def __magic_name__( self ): lowerCAmelCase__ : List[str] = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) lowerCAmelCase__ : List[str] = FlaxBertModel(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertTrue(check_models_equal(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : Tuple = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) lowerCAmelCase__ : Union[str, Any] = FlaxBertModel(__UpperCAmelCase ) lowerCAmelCase__ : Dict = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , max_shard_size='''10KB''' ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Tuple = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Any = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertTrue(check_models_equal(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = '''bert''' lowerCAmelCase__ : int = '''hf-internal-testing/tiny-random-bert-subfolder''' with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Dict = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[Any] = '''bert''' lowerCAmelCase__ : Tuple = '''hf-internal-testing/tiny-random-bert-sharded-subfolder''' with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase )
678
0
"""simple docstring""" from random import randint from tempfile import TemporaryFile import numpy as np def __snake_case ( __A ,__A ,__A ) -> Any: lowercase : Optional[Any] = 0 if start < end: lowercase : Union[str, Any] = randint(__A ,__A ) lowercase : List[str] = a[end] lowercase : List[Any] = a[pivot] lowercase : str = temp lowercase : Tuple = _in_place_partition(__A ,__A ,__A ) count += _in_place_quick_sort(__A ,__A ,p - 1 ) count += _in_place_quick_sort(__A ,p + 1 ,__A ) return count def __snake_case ( __A ,__A ,__A ) -> Optional[Any]: lowercase : List[str] = 0 lowercase : List[Any] = randint(__A ,__A ) lowercase : str = a[end] lowercase : Optional[int] = a[pivot] lowercase : List[str] = temp lowercase : str = start - 1 for index in range(__A ,__A ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value lowercase : List[str] = new_pivot_index + 1 lowercase : int = a[new_pivot_index] lowercase : int = a[index] lowercase : Tuple = temp lowercase : Optional[Any] = a[new_pivot_index + 1] lowercase : List[str] = a[end] lowercase : Union[str, Any] = temp return new_pivot_index + 1, count lowerCAmelCase: Any =TemporaryFile() lowerCAmelCase: int =100 # 1000 elements are to be sorted lowerCAmelCase , lowerCAmelCase: Tuple =0, 1 # mean and standard deviation lowerCAmelCase: Optional[Any] =np.random.normal(mu, sigma, p) np.save(outfile, X) print("The array is") print(X) outfile.seek(0) # using the same array lowerCAmelCase: Union[str, Any] =np.load(outfile) lowerCAmelCase: List[str] =len(M) - 1 lowerCAmelCase: Union[str, Any] =_in_place_quick_sort(M, 0, r) print( "No of Comparisons for 100 elements selected from a standard normal distribution" "is :" ) print(z)
607
from random import randint from tempfile import TemporaryFile import numpy as np def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any: lowerCAmelCase__ : Optional[Any] = 0 if start < end: lowerCAmelCase__ : Union[str, Any] = randint(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : List[str] = a[end] lowerCAmelCase__ : List[Any] = a[pivot] lowerCAmelCase__ : str = temp lowerCAmelCase__ , lowerCAmelCase__ : Tuple = _in_place_partition(UpperCamelCase , UpperCamelCase , UpperCamelCase ) count += _in_place_quick_sort(UpperCamelCase , UpperCamelCase , p - 1 ) count += _in_place_quick_sort(UpperCamelCase , p + 1 , UpperCamelCase ) return count def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: lowerCAmelCase__ : List[str] = 0 lowerCAmelCase__ : List[Any] = randint(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : str = a[end] lowerCAmelCase__ : Optional[int] = a[pivot] lowerCAmelCase__ : List[str] = temp lowerCAmelCase__ : str = start - 1 for index in range(UpperCamelCase , UpperCamelCase ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value lowerCAmelCase__ : List[str] = new_pivot_index + 1 lowerCAmelCase__ : int = a[new_pivot_index] lowerCAmelCase__ : int = a[index] lowerCAmelCase__ : Tuple = temp lowerCAmelCase__ : Optional[Any] = a[new_pivot_index + 1] lowerCAmelCase__ : List[str] = a[end] lowerCAmelCase__ : Union[str, Any] = temp return new_pivot_index + 1, count lowerCAmelCase_ = TemporaryFile() lowerCAmelCase_ = 1_00 # 1000 elements are to be sorted lowerCAmelCase_ , lowerCAmelCase_ = 0, 1 # mean and standard deviation lowerCAmelCase_ = np.random.normal(mu, sigma, p) np.save(outfile, X) print("""The array is""") print(X) outfile.seek(0) # using the same array lowerCAmelCase_ = np.load(outfile) lowerCAmelCase_ = len(M) - 1 lowerCAmelCase_ = _in_place_quick_sort(M, 0, r) print( """No of Comparisons for 100 elements selected from a standard normal distribution""" """is :""" ) print(z)
678
0
"""simple docstring""" from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class _lowerCAmelCase ( _lowercase ): __UpperCAmelCase : Tuple = ['''image_processor''', '''tokenizer'''] __UpperCAmelCase : Dict = '''BlipImageProcessor''' __UpperCAmelCase : str = '''AutoTokenizer''' def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict: '''simple docstring''' snake_case : Any = False super().__init__(__UpperCAmelCase , __UpperCAmelCase ) snake_case : Optional[Any] = self.image_processor def __call__( self , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = True , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = 0 , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = True , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Any: '''simple docstring''' if images is None and text is None: raise ValueError("You have to specify either images or text." ) # Get only text if images is None: snake_case : List[Any] = self.tokenizer snake_case : Union[str, Any] = self.tokenizer( text=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , ) return text_encoding # add pixel_values snake_case : Optional[Any] = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase ) if text is not None: snake_case : Union[str, Any] = self.tokenizer( text=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , ) else: snake_case : Dict = None if text_encoding is not None: encoding_image_processor.update(__UpperCAmelCase ) return encoding_image_processor def lowerCamelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Any: '''simple docstring''' return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Any: '''simple docstring''' return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def lowerCamelCase ( self ) -> Optional[int]: '''simple docstring''' snake_case : Optional[Any] = self.tokenizer.model_input_names snake_case : int = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
178
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[int]: assert isinstance(UpperCamelCase , UpperCamelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]: lowerCAmelCase__ : List[str] = tmp_path / '''cache''' lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase__ : List[Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]: lowerCAmelCase__ : str = tmp_path / '''cache''' lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : str = features.copy() if features else default_expected_features lowerCAmelCase__ : List[Any] = ( Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: lowerCAmelCase__ : str = tmp_path / '''cache''' lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , split=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str: if issubclass(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : Any = parquet_path elif issubclass(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : Any = [parquet_path] lowerCAmelCase__ : int = tmp_path / '''cache''' lowerCAmelCase__ : str = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=("train",) ) -> str: assert isinstance(UpperCamelCase , UpperCamelCase ) for split in splits: lowerCAmelCase__ : str = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]: lowerCAmelCase__ : Any = tmp_path / '''cache''' lowerCAmelCase__ : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase__ : Optional[Any] = ParquetDatasetReader( {'''train''': parquet_path} , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read() _check_parquet_datasetdict(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: lowerCAmelCase__ : Any = tmp_path / '''cache''' lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : Tuple = features.copy() if features else default_expected_features lowerCAmelCase__ : Optional[int] = ( Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase__ : List[str] = ParquetDatasetReader({'''train''': parquet_path} , features=UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_datasetdict(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: if split: lowerCAmelCase__ : Tuple = {split: parquet_path} else: lowerCAmelCase__ : int = '''train''' lowerCAmelCase__ : List[Any] = {'''train''': parquet_path, '''test''': parquet_path} lowerCAmelCase__ : Optional[int] = tmp_path / '''cache''' lowerCAmelCase__ : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : List[str] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_datasetdict(UpperCamelCase , UpperCamelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCAmelCase__ : Optional[Any] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' ) assert writer.write() > 0 lowerCAmelCase__ : Union[str, Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' ) lowerCAmelCase__ : int = pf.read() assert dataset.data.table == output_table def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCAmelCase__ : List[str] = str(shared_datadir / '''test_image_rgb.jpg''' ) lowerCAmelCase__ : Dict = {'''image''': [image_path]} lowerCAmelCase__ : int = Features({'''image''': Image()} ) lowerCAmelCase__ : Dict = Dataset.from_dict(UpperCamelCase , features=UpperCamelCase ) lowerCAmelCase__ : List[str] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' ) assert writer.write() > 0 lowerCAmelCase__ : Dict = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) ) assert dataset.features == reloaded_dataset.features lowerCAmelCase__ : int = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=UpperCamelCase ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( '''feature, expected''' , [ (Features({'''foo''': Value('''int32''' )} ), None), (Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Any: assert get_writer_batch_size(UpperCamelCase ) == expected
678
0
import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class __magic_name__ ( _lowercase ): def __init__( self : Dict ) -> Any: '''simple docstring''' UpperCAmelCase = [] def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Optional[int] ) -> Dict: '''simple docstring''' self.events.append("on_init_end" ) def SCREAMING_SNAKE_CASE_ ( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Dict , **UpperCamelCase__ : List[str] ) -> Union[str, Any]: '''simple docstring''' self.events.append("on_train_begin" ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Any ) -> str: '''simple docstring''' self.events.append("on_train_end" ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Tuple ) -> List[str]: '''simple docstring''' self.events.append("on_epoch_begin" ) def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Dict ) -> int: '''simple docstring''' self.events.append("on_epoch_end" ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Dict ) -> Union[str, Any]: '''simple docstring''' self.events.append("on_step_begin" ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , **UpperCamelCase__ : Any ) -> Any: '''simple docstring''' self.events.append("on_step_end" ) def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Dict ) -> int: '''simple docstring''' self.events.append("on_evaluate" ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Tuple ) -> List[Any]: '''simple docstring''' self.events.append("on_predict" ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , **UpperCamelCase__ : Optional[Any] ) -> Any: '''simple docstring''' self.events.append("on_save" ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , **UpperCamelCase__ : int ) -> Union[str, Any]: '''simple docstring''' self.events.append("on_log" ) def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , **UpperCamelCase__ : Optional[Any] ) -> str: '''simple docstring''' self.events.append("on_prediction_step" ) @require_torch class __magic_name__ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Tuple: '''simple docstring''' UpperCAmelCase = tempfile.mkdtemp() def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Dict: '''simple docstring''' shutil.rmtree(self.output_dir ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : Tuple=0 , UpperCamelCase__ : Optional[Any]=64 , UpperCamelCase__ : str=64 , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[str]=False , **UpperCamelCase__ : Optional[Any] ) -> int: '''simple docstring''' UpperCAmelCase = RegressionDataset(length=__UpperCAmelCase ) UpperCAmelCase = RegressionDataset(length=__UpperCAmelCase ) UpperCAmelCase = RegressionModelConfig(a=__UpperCAmelCase , b=__UpperCAmelCase ) UpperCAmelCase = RegressionPreTrainedModel(__UpperCAmelCase ) UpperCAmelCase = TrainingArguments(self.output_dir , disable_tqdm=__UpperCAmelCase , report_to=[] , **__UpperCAmelCase ) return Trainer( __UpperCAmelCase , __UpperCAmelCase , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , callbacks=__UpperCAmelCase , ) def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : int ) -> str: '''simple docstring''' self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) ) # Order doesn't matter UpperCAmelCase = sorted(__UpperCAmelCase , key=lambda UpperCamelCase__ : cb.__name__ if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else cb.__class__.__name__ ) UpperCAmelCase = sorted(__UpperCAmelCase , key=lambda UpperCamelCase__ : cb.__name__ if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else cb.__class__.__name__ ) for cba, cba in zip(__UpperCAmelCase , __UpperCAmelCase ): if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(__UpperCAmelCase , __UpperCAmelCase ): self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and not isinstance(__UpperCAmelCase , __UpperCAmelCase ): self.assertEqual(__UpperCAmelCase , cba.__class__ ) elif not isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(__UpperCAmelCase , __UpperCAmelCase ): self.assertEqual(cba.__class__ , __UpperCAmelCase ) else: self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) def SCREAMING_SNAKE_CASE_ ( self : int , UpperCamelCase__ : str ) -> Tuple: '''simple docstring''' UpperCAmelCase = ['''on_init_end''', '''on_train_begin'''] UpperCAmelCase = 0 UpperCAmelCase = len(trainer.get_eval_dataloader() ) UpperCAmelCase = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate'''] for _ in range(trainer.state.num_train_epochs ): expected_events.append("on_epoch_begin" ) for _ in range(__UpperCAmelCase ): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append("on_log" ) if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append("on_save" ) expected_events.append("on_epoch_end" ) if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]: '''simple docstring''' UpperCAmelCase = self.get_trainer() UpperCAmelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase ) # Callbacks passed at init are added to the default callbacks UpperCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] ) expected_callbacks.append(__UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase ) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback UpperCAmelCase = self.get_trainer(disable_tqdm=__UpperCAmelCase ) UpperCAmelCase = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback] UpperCAmelCase = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(__UpperCAmelCase ) expected_callbacks.remove(__UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase ) UpperCAmelCase = self.get_trainer() UpperCAmelCase = trainer.pop_callback(__UpperCAmelCase ) self.assertEqual(cb.__class__ , __UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase ) trainer.add_callback(__UpperCAmelCase ) expected_callbacks.insert(0 , __UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase ) # We can also add, pop, or remove by instance UpperCAmelCase = self.get_trainer() UpperCAmelCase = trainer.callback_handler.callbacks[0] trainer.remove_callback(__UpperCAmelCase ) expected_callbacks.remove(__UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase ) UpperCAmelCase = self.get_trainer() UpperCAmelCase = trainer.callback_handler.callbacks[0] UpperCAmelCase = trainer.pop_callback(__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase ) trainer.add_callback(__UpperCAmelCase ) expected_callbacks.insert(0 , __UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase ) def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[Any]: '''simple docstring''' import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action="ignore" , category=__UpperCAmelCase ) UpperCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] ) trainer.train() UpperCAmelCase = trainer.callback_handler.callbacks[-2].events self.assertEqual(__UpperCAmelCase , self.get_expected_events(__UpperCAmelCase ) ) # Independent log/save/eval UpperCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 ) trainer.train() UpperCAmelCase = trainer.callback_handler.callbacks[-2].events self.assertEqual(__UpperCAmelCase , self.get_expected_events(__UpperCAmelCase ) ) UpperCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 ) trainer.train() UpperCAmelCase = trainer.callback_handler.callbacks[-2].events self.assertEqual(__UpperCAmelCase , self.get_expected_events(__UpperCAmelCase ) ) UpperCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="steps" ) trainer.train() UpperCAmelCase = trainer.callback_handler.callbacks[-2].events self.assertEqual(__UpperCAmelCase , self.get_expected_events(__UpperCAmelCase ) ) UpperCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="epoch" ) trainer.train() UpperCAmelCase = trainer.callback_handler.callbacks[-2].events self.assertEqual(__UpperCAmelCase , self.get_expected_events(__UpperCAmelCase ) ) # A bit of everything UpperCAmelCase = self.get_trainer( callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="steps" , ) trainer.train() UpperCAmelCase = trainer.callback_handler.callbacks[-2].events self.assertEqual(__UpperCAmelCase , self.get_expected_events(__UpperCAmelCase ) ) # warning should be emitted for duplicated callbacks with patch("transformers.trainer_callback.logger.warning" ) as warn_mock: UpperCAmelCase = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , ) assert str(__UpperCAmelCase ) in warn_mock.call_args[0][0]
323
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""", } class _lowerCAmelCase ( _lowercase , _lowercase ): A__ = 'focalnet' def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=False , __UpperCAmelCase=[192, 384, 768, 768] , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[2, 2, 2, 2] , __UpperCAmelCase=[3, 3, 3, 3] , __UpperCAmelCase="gelu" , __UpperCAmelCase=4.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=False , __UpperCAmelCase=1e-4 , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ): super().__init__(**__UpperCAmelCase ) lowerCAmelCase__ : Any = image_size lowerCAmelCase__ : Any = patch_size lowerCAmelCase__ : Tuple = num_channels lowerCAmelCase__ : Dict = embed_dim lowerCAmelCase__ : Optional[int] = use_conv_embed lowerCAmelCase__ : Optional[int] = hidden_sizes lowerCAmelCase__ : Optional[Any] = depths lowerCAmelCase__ : Dict = focal_levels lowerCAmelCase__ : int = focal_windows lowerCAmelCase__ : Optional[Any] = hidden_act lowerCAmelCase__ : Optional[int] = mlp_ratio lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob lowerCAmelCase__ : List[Any] = drop_path_rate lowerCAmelCase__ : Tuple = use_layerscale lowerCAmelCase__ : List[Any] = layerscale_value lowerCAmelCase__ : Dict = use_post_layernorm lowerCAmelCase__ : Dict = use_post_layernorm_in_modulation lowerCAmelCase__ : Dict = normalize_modulator lowerCAmelCase__ : Union[str, Any] = initializer_range lowerCAmelCase__ : List[str] = layer_norm_eps lowerCAmelCase__ : Tuple = encoder_stride lowerCAmelCase__ : Dict = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = get_aligned_output_features_output_indices( out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
678
0
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging lowerCamelCase__ : str = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[str]: snake_case__ = R'''\w+[.]\d+''' snake_case__ = re.findall(__lowerCAmelCase , __lowerCAmelCase ) for pat in pats: snake_case__ = key.replace(__lowerCAmelCase , '''_'''.join(pat.split('''.''' ) ) ) return key def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict: snake_case__ = pt_tuple_key[:-1] + ('''scale''',) if ( any('''norm''' in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): snake_case__ = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: snake_case__ = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: snake_case__ = pt_tuple_key[:-1] + ('''embedding''',) return renamed_pt_tuple_key, pt_tensor # conv layer snake_case__ = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: snake_case__ = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer snake_case__ = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight": snake_case__ = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight snake_case__ = pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias snake_case__ = pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=42 ) -> Any: # Step 1: Convert pytorch tensor to numpy snake_case__ = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params snake_case__ = flax_model.init_weights(PRNGKey(__lowerCAmelCase ) ) snake_case__ = flatten_dict(__lowerCAmelCase ) snake_case__ = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): snake_case__ = rename_key(__lowerCAmelCase ) snake_case__ = tuple(renamed_pt_key.split('''.''' ) ) # Correctly rename weight parameters snake_case__ = rename_key_and_reshape_tensor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """ F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) # also add unexpected weight so that warning is thrown snake_case__ = jnp.asarray(__lowerCAmelCase ) return unflatten_dict(__lowerCAmelCase )
33
from scipy.stats import pearsonr import datasets lowerCAmelCase_ = """ Pearson correlation coefficient and p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. """ lowerCAmelCase_ = """ Args: predictions (`list` of `int`): Predicted class labels, as returned by a model. references (`list` of `int`): Ground truth labels. return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`. Returns: pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation. p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities. Examples: Example 1-A simple example using only predictions and references. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5]) >>> print(round(results['pearsonr'], 2)) -0.74 Example 2-The same as Example 1, but that also returns the `p-value`. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True) >>> print(sorted(list(results.keys()))) ['p-value', 'pearsonr'] >>> print(round(results['pearsonr'], 2)) -0.74 >>> print(round(results['p-value'], 2)) 0.15 """ lowerCAmelCase_ = """ @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, Ilhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Antonio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowerCAmelCase ( datasets.Metric ): def __magic_name__( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float''' ), '''references''': datasets.Value('''float''' ), } ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ): if return_pvalue: lowerCAmelCase__ : Union[str, Any] = pearsonr(__UpperCAmelCase , __UpperCAmelCase ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(__UpperCAmelCase , __UpperCAmelCase )[0] )}
678
0
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def _lowerCamelCase ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _lowerCamelCase ( self ): UpperCamelCase__ = 1 UpperCamelCase__ = 3 UpperCamelCase__ = (32, 32) UpperCamelCase__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCAmelCase ) return image @property def _lowerCamelCase ( self ): torch.manual_seed(0 ) UpperCamelCase__ = UNetaDConditionModel( block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__UpperCAmelCase , only_cross_attention=(True, True, False) , num_class_embeds=100 , ) return model @property def _lowerCamelCase ( self ): torch.manual_seed(0 ) UpperCamelCase__ = AutoencoderKL( block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) return model @property def _lowerCamelCase ( self ): torch.manual_seed(0 ) UpperCamelCase__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , ) return CLIPTextModel(__UpperCAmelCase ) def _lowerCamelCase ( self ): UpperCamelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator UpperCamelCase__ = self.dummy_cond_unet_upscale UpperCamelCase__ = DDPMScheduler() UpperCamelCase__ = DDIMScheduler(prediction_type="""v_prediction""" ) UpperCamelCase__ = self.dummy_vae UpperCamelCase__ = self.dummy_text_encoder UpperCamelCase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) UpperCamelCase__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCamelCase__ = Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk UpperCamelCase__ = StableDiffusionUpscalePipeline( unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=350 , ) UpperCamelCase__ = sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) UpperCamelCase__ = '''A painting of a squirrel eating a burger''' UpperCamelCase__ = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) UpperCamelCase__ = sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) UpperCamelCase__ = output.images UpperCamelCase__ = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) UpperCamelCase__ = sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=__UpperCAmelCase , )[0] UpperCamelCase__ = image[0, -3:, -3:, -1] UpperCamelCase__ = image_from_tuple[0, -3:, -3:, -1] UpperCamelCase__ = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) UpperCamelCase__ = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def _lowerCamelCase ( self ): UpperCamelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator UpperCamelCase__ = self.dummy_cond_unet_upscale UpperCamelCase__ = DDPMScheduler() UpperCamelCase__ = DDIMScheduler(prediction_type="""v_prediction""" ) UpperCamelCase__ = self.dummy_vae UpperCamelCase__ = self.dummy_text_encoder UpperCamelCase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) UpperCamelCase__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCamelCase__ = Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk UpperCamelCase__ = StableDiffusionUpscalePipeline( unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=350 , ) UpperCamelCase__ = sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) UpperCamelCase__ = '''A painting of a squirrel eating a burger''' UpperCamelCase__ = sd_pipe( 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) UpperCamelCase__ = output.images assert image.shape[0] == 2 UpperCamelCase__ = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) UpperCamelCase__ = sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) UpperCamelCase__ = output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" ) def _lowerCamelCase ( self ): UpperCamelCase__ = self.dummy_cond_unet_upscale UpperCamelCase__ = DDPMScheduler() UpperCamelCase__ = DDIMScheduler(prediction_type="""v_prediction""" ) UpperCamelCase__ = self.dummy_vae UpperCamelCase__ = self.dummy_text_encoder UpperCamelCase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) UpperCamelCase__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCamelCase__ = Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) ) # put models in fp16, except vae as it overflows in fp16 UpperCamelCase__ = unet.half() UpperCamelCase__ = text_encoder.half() # make sure here that pndm scheduler skips prk UpperCamelCase__ = StableDiffusionUpscalePipeline( unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=350 , ) UpperCamelCase__ = sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) UpperCamelCase__ = '''A painting of a squirrel eating a burger''' UpperCamelCase__ = torch.manual_seed(0 ) UpperCamelCase__ = sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , ).images UpperCamelCase__ = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def _lowerCamelCase ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self ): UpperCamelCase__ = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) UpperCamelCase__ = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale""" """/upsampled_cat.npy""" ) UpperCamelCase__ = '''stabilityai/stable-diffusion-x4-upscaler''' UpperCamelCase__ = StableDiffusionUpscalePipeline.from_pretrained(__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() UpperCamelCase__ = '''a cat sitting on a park bench''' UpperCamelCase__ = torch.manual_seed(0 ) UpperCamelCase__ = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , ) UpperCamelCase__ = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1E-3 def _lowerCamelCase ( self ): UpperCamelCase__ = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) UpperCamelCase__ = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale""" """/upsampled_cat_fp16.npy""" ) UpperCamelCase__ = '''stabilityai/stable-diffusion-x4-upscaler''' UpperCamelCase__ = StableDiffusionUpscalePipeline.from_pretrained( __UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() UpperCamelCase__ = '''a cat sitting on a park bench''' UpperCamelCase__ = torch.manual_seed(0 ) UpperCamelCase__ = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , ) UpperCamelCase__ = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5E-1 def _lowerCamelCase ( self ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() UpperCamelCase__ = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) UpperCamelCase__ = '''stabilityai/stable-diffusion-x4-upscaler''' UpperCamelCase__ = StableDiffusionUpscalePipeline.from_pretrained( __UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() UpperCamelCase__ = '''a cat sitting on a park bench''' UpperCamelCase__ = torch.manual_seed(0 ) UpperCamelCase__ = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , output_type="""np""" , ) UpperCamelCase__ = torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9
619
from manim import * class _lowerCAmelCase ( _lowercase ): def __magic_name__( self ): lowerCAmelCase__ : Tuple = Rectangle(height=0.5 , width=0.5 ) lowerCAmelCase__ : Dict = Rectangle(height=0.25 , width=0.25 ) lowerCAmelCase__ : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) lowerCAmelCase__ : Optional[Any] = [mem.copy() for i in range(6 )] lowerCAmelCase__ : int = [mem.copy() for i in range(6 )] lowerCAmelCase__ : Optional[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : str = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : List[str] = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : int = Text('''CPU''' , font_size=24 ) lowerCAmelCase__ : int = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = [mem.copy() for i in range(4 )] lowerCAmelCase__ : Tuple = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Tuple = Text('''GPU''' , font_size=24 ) lowerCAmelCase__ : int = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) gpu.move_to([-1, -1, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : int = [mem.copy() for i in range(6 )] lowerCAmelCase__ : List[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Tuple = Text('''Model''' , font_size=24 ) lowerCAmelCase__ : List[Any] = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) model.move_to([3, -1.0, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : Dict = [] lowerCAmelCase__ : List[str] = [] lowerCAmelCase__ : Optional[Any] = [] for i, rect in enumerate(__UpperCAmelCase ): rect.set_stroke(__UpperCAmelCase ) lowerCAmelCase__ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCAmelCase ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=__UpperCAmelCase , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=__UpperCAmelCase , buff=0.0 ) self.add(__UpperCAmelCase ) model_cpu_arr.append(__UpperCAmelCase ) self.add(*__UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase ) lowerCAmelCase__ : Any = [mem.copy() for i in range(6 )] lowerCAmelCase__ : Optional[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Any = Text('''Loaded Checkpoint''' , font_size=24 ) lowerCAmelCase__ : Optional[Any] = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) checkpoint.move_to([3, 0.5, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : Dict = [] lowerCAmelCase__ : str = [] for i, rect in enumerate(__UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = fill.copy().set_fill(__UpperCAmelCase , opacity=0.7 ) target.move_to(__UpperCAmelCase ) ckpt_arr.append(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(__UpperCAmelCase ) self.add(*__UpperCAmelCase , *__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCAmelCase__ : List[Any] = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[str] = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , ) blue_text.next_to(__UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : str = MarkupText( f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) lowerCAmelCase__ : Optional[Any] = [meta_mem.copy() for i in range(6 )] lowerCAmelCase__ : Dict = [meta_mem.copy() for i in range(6 )] lowerCAmelCase__ : Union[str, Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Dict = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : str = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : List[str] = Text('''Disk''' , font_size=24 ) lowerCAmelCase__ : Any = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(__UpperCAmelCase , run_time=3 ) , Write(__UpperCAmelCase , run_time=1 ) , Create(__UpperCAmelCase , run_time=1 ) ) lowerCAmelCase__ : str = [] for i, rect in enumerate(__UpperCAmelCase ): lowerCAmelCase__ : Dict = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(__UpperCAmelCase , run_time=1.5 ) ) self.play(*__UpperCAmelCase ) self.play(FadeOut(__UpperCAmelCase ) ) lowerCAmelCase__ : int = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(__UpperCAmelCase , run_time=3 ) ) self.play( FadeOut(__UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase ) , ) self.wait()
678
0
import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def A__ ( ): SCREAMING_SNAKE_CASE__: Optional[Any]= '''https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg''' SCREAMING_SNAKE_CASE__: Tuple= Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert('''RGB''' ) return image def A__ ( snake_case_ : Optional[Any] ): SCREAMING_SNAKE_CASE__: str= [] # fmt: off # vision encoder rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') ) rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') ) rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') ) rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') ) rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') ) rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F'visual_encoder.blocks.{i}.norm1.weight', F'vision_model.encoder.layers.{i}.layer_norm1.weight') ) rename_keys.append((F'visual_encoder.blocks.{i}.norm1.bias', F'vision_model.encoder.layers.{i}.layer_norm1.bias') ) rename_keys.append((F'visual_encoder.blocks.{i}.norm2.weight', F'vision_model.encoder.layers.{i}.layer_norm2.weight') ) rename_keys.append((F'visual_encoder.blocks.{i}.norm2.bias', F'vision_model.encoder.layers.{i}.layer_norm2.bias') ) rename_keys.append((F'visual_encoder.blocks.{i}.attn.qkv.weight', F'vision_model.encoder.layers.{i}.self_attn.qkv.weight') ) rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.weight', F'vision_model.encoder.layers.{i}.self_attn.projection.weight',) ) rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.bias', F'vision_model.encoder.layers.{i}.self_attn.projection.bias') ) rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.weight', F'vision_model.encoder.layers.{i}.mlp.fc1.weight') ) rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.bias', F'vision_model.encoder.layers.{i}.mlp.fc1.bias') ) rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.weight', F'vision_model.encoder.layers.{i}.mlp.fc2.weight') ) rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.bias', F'vision_model.encoder.layers.{i}.mlp.fc2.bias') ) # QFormer rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.embeddings.layernorm.weight''') ) rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.embeddings.layernorm.bias''') ) # fmt: on return rename_keys def A__ ( snake_case_ : Dict , snake_case_ : Dict , snake_case_ : Dict ): SCREAMING_SNAKE_CASE__: List[Any]= dct.pop(snake_case_ ) SCREAMING_SNAKE_CASE__: Dict= val def A__ ( snake_case_ : List[str] , snake_case_ : Tuple ): for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases SCREAMING_SNAKE_CASE__: Optional[Any]= state_dict.pop(F'visual_encoder.blocks.{i}.attn.q_bias' ) SCREAMING_SNAKE_CASE__: Dict= state_dict.pop(F'visual_encoder.blocks.{i}.attn.v_bias' ) # next, set bias in the state dict SCREAMING_SNAKE_CASE__: Tuple= torch.cat((q_bias, torch.zeros_like(snake_case_ , requires_grad=snake_case_ ), v_bias) ) SCREAMING_SNAKE_CASE__: Optional[int]= qkv_bias def A__ ( snake_case_ : List[str] ): SCREAMING_SNAKE_CASE__: int= 364 if '''coco''' in model_name else 224 SCREAMING_SNAKE_CASE__: Optional[int]= InstructBlipVisionConfig(image_size=snake_case_ ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: SCREAMING_SNAKE_CASE__: Any= TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: SCREAMING_SNAKE_CASE__: int= TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: SCREAMING_SNAKE_CASE__: Optional[Any]= LlamaConfig.from_pretrained('''decapoda-research/llama-7b-hf''' , vocab_size=32_001 ).to_dict() elif "vicuna-13b" in model_name: SCREAMING_SNAKE_CASE__: Optional[Any]= LlamaConfig.from_pretrained('''decapoda-research/llama-13b-hf''' , vocab_size=32_001 ).to_dict() else: raise ValueError('''Model name not supported''' ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 SCREAMING_SNAKE_CASE__: Optional[Any]= InstructBlipQFormerConfig(vocab_size=30_523 ).to_dict() SCREAMING_SNAKE_CASE__: int= InstructBlipConfig(vision_config=snake_case_ , text_config=snake_case_ , qformer_config=snake_case_ ) return config, image_size @torch.no_grad() def A__ ( snake_case_ : Optional[Any] , snake_case_ : int=None , snake_case_ : Dict=False ): SCREAMING_SNAKE_CASE__: Optional[Any]= AutoTokenizer.from_pretrained('''bert-base-uncased''' , truncation_side='''left''' ) qformer_tokenizer.add_special_tokens({'''bos_token''': '''[DEC]'''} ) if "t5" in model_name: SCREAMING_SNAKE_CASE__: Any= TaTokenizerFast.from_pretrained('''google/flan-t5-xl''' , truncation_side='''left''' ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) SCREAMING_SNAKE_CASE__: Optional[Any]= LlamaTokenizerFast.from_pretrained( '''huggyllama/llama-7b''' , truncation_side='''left''' , bos_token='''</s>''' , unk_token='''</s>''' ) tokenizer.add_special_tokens({'''pad_token''': '''[PAD]'''} ) SCREAMING_SNAKE_CASE__: str= get_blipa_config(snake_case_ ) SCREAMING_SNAKE_CASE__: int= InstructBlipForConditionalGeneration(snake_case_ ).eval() SCREAMING_SNAKE_CASE__: str= { '''instructblip-vicuna-7b''': ('''blip2_vicuna_instruct''', '''vicuna7b'''), '''instructblip-vicuna-13b''': ('''blip2_vicuna_instruct''', '''vicuna13b'''), '''instructblip-flan-t5-xl''': ('''blip2_t5_instruct''', '''flant5xl'''), '''instructblip-flan-t5-xxl''': ('''blip2_t5_instruct''', '''flant5xxl'''), } SCREAMING_SNAKE_CASE__: Dict= model_name_to_original[model_name] # load original model print('''Loading original model...''' ) SCREAMING_SNAKE_CASE__: Union[str, Any]= '''cuda:1''' if torch.cuda.is_available() else '''cpu''' SCREAMING_SNAKE_CASE__: Dict= '''cuda:2''' if torch.cuda.is_available() else '''cpu''' SCREAMING_SNAKE_CASE__: str= load_model_and_preprocess( name=snake_case_ , model_type=snake_case_ , is_eval=snake_case_ , device=snake_case_ ) original_model.eval() print('''Done!''' ) # update state dict keys SCREAMING_SNAKE_CASE__: Union[str, Any]= original_model.state_dict() SCREAMING_SNAKE_CASE__: List[Any]= create_rename_keys(snake_case_ ) for src, dest in rename_keys: rename_key(snake_case_ , snake_case_ , snake_case_ ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): SCREAMING_SNAKE_CASE__: List[str]= state_dict.pop(snake_case_ ) if key.startswith('''Qformer.bert''' ): SCREAMING_SNAKE_CASE__: Dict= key.replace('''Qformer.bert''' , '''qformer''' ) if "attention.self" in key: SCREAMING_SNAKE_CASE__: Optional[int]= key.replace('''self''' , '''attention''' ) if "llm_proj" in key: SCREAMING_SNAKE_CASE__: Optional[Any]= key.replace('''llm_proj''' , '''language_projection''' ) if "t5_proj" in key: SCREAMING_SNAKE_CASE__: List[Any]= key.replace('''t5_proj''' , '''language_projection''' ) if key.startswith('''llm_model''' ): SCREAMING_SNAKE_CASE__: Optional[Any]= key.replace('''llm_model''' , '''language_model''' ) if key.startswith('''t5''' ): SCREAMING_SNAKE_CASE__: Optional[int]= key.replace('''t5''' , '''language''' ) SCREAMING_SNAKE_CASE__: Tuple= val # read in qv biases read_in_q_v_bias(snake_case_ , snake_case_ ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(snake_case_ , strict=snake_case_ ) SCREAMING_SNAKE_CASE__: Dict= load_demo_image() SCREAMING_SNAKE_CASE__: Dict= '''What is unusual about this image?''' # create processor SCREAMING_SNAKE_CASE__: Optional[Any]= BlipImageProcessor( size={'''height''': image_size, '''width''': image_size} , image_mean=snake_case_ , image_std=snake_case_ ) SCREAMING_SNAKE_CASE__: Optional[int]= InstructBlipProcessor( image_processor=snake_case_ , tokenizer=snake_case_ , qformer_tokenizer=snake_case_ , ) SCREAMING_SNAKE_CASE__: Tuple= processor(images=snake_case_ , text=snake_case_ , return_tensors='''pt''' ).to(snake_case_ ) # make sure processor creates exact same pixel values SCREAMING_SNAKE_CASE__: Optional[int]= vis_processors['''eval'''](snake_case_ ).unsqueeze(0 ).to(snake_case_ ) SCREAMING_SNAKE_CASE__: str= inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) , snake_case_ ) original_model.to(snake_case_ ) hf_model.to(snake_case_ ) with torch.no_grad(): if "vicuna" in model_name: SCREAMING_SNAKE_CASE__: List[str]= original_model({'''image''': original_pixel_values, '''text_input''': [prompt]} ).logits SCREAMING_SNAKE_CASE__: Tuple= hf_model(**snake_case_ ).logits else: SCREAMING_SNAKE_CASE__: List[Any]= original_model( {'''image''': original_pixel_values, '''text_input''': [prompt], '''text_output''': ['''\n''']} ).logits SCREAMING_SNAKE_CASE__: List[Any]= tokenizer('''\n''' , return_tensors='''pt''' ).input_ids.to(snake_case_ ) SCREAMING_SNAKE_CASE__: str= label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 ) SCREAMING_SNAKE_CASE__: Union[str, Any]= hf_model(**snake_case_ , labels=snake_case_ ).logits print('''First values of original logits:''' , original_logits[0, :3, :3] ) print('''First values of HF logits:''' , logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape SCREAMING_SNAKE_CASE__: Optional[int]= 1E-4 if '''vicuna''' in model_name else 1E-5 assert torch.allclose(original_logits.to(logits.device ) , snake_case_ , atol=snake_case_ ) print('''Looks ok!''' ) print('''Generating with original model...''' ) SCREAMING_SNAKE_CASE__: Optional[int]= original_model.generate({'''image''': original_pixel_values, '''prompt''': prompt} , num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print('''Generating with HF model...''' ) SCREAMING_SNAKE_CASE__: Any= hf_model.generate( **snake_case_ , do_sample=snake_case_ , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , ) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? SCREAMING_SNAKE_CASE__: int= 2 print('''Original generation:''' , snake_case_ ) SCREAMING_SNAKE_CASE__: Tuple= processor.batch_decode(snake_case_ , skip_special_tokens=snake_case_ ) SCREAMING_SNAKE_CASE__: Union[str, Any]= [text.strip() for text in output_text] print('''HF generation:''' , snake_case_ ) if pytorch_dump_folder_path is not None: processor.save_pretrained(snake_case_ ) hf_model.save_pretrained(snake_case_ ) if push_to_hub: processor.push_to_hub(F'Salesforce/{model_name}' ) hf_model.push_to_hub(F'Salesforce/{model_name}' ) if __name__ == "__main__": lowercase_ : Union[str, Any] = argparse.ArgumentParser() lowercase_ : Optional[int] = [ 'instructblip-vicuna-7b', 'instructblip-vicuna-13b', 'instructblip-flan-t5-xl', 'instructblip-flan-t5-xxl', ] parser.add_argument( '--model_name', default='instructblip-flan-t5-xl', choices=choices, type=str, help='Path to hf config.json of model to convert', ) parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model and processor to the hub after converting', ) lowercase_ : Optional[Any] = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
64
import collections import os import re from pathlib import Path lowerCAmelCase_ = """src/transformers""" # Matches is_xxx_available() lowerCAmelCase_ = re.compile(R"""is\_([a-z_]*)_available()""") # Catches a one-line _import_struct = {xxx} lowerCAmelCase_ = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] lowerCAmelCase_ = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""") # Catches a line if not is_foo_available lowerCAmelCase_ = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""") # Catches a line _import_struct["bla"].append("foo") lowerCAmelCase_ = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] lowerCAmelCase_ = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""") # Catches a line with an object between quotes and a comma: "MyModel", lowerCAmelCase_ = re.compile(R"""^\s+\"([^\"]+)\",""") # Catches a line with objects between brackets only: ["foo", "bar"], lowerCAmelCase_ = re.compile(R"""^\s+\[([^\]]+)\]""") # Catches a line with from foo import bar, bla, boo lowerCAmelCase_ = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") # Catches a line with try: lowerCAmelCase_ = re.compile(R"""^\s*try:""") # Catches a line with else: lowerCAmelCase_ = re.compile(R"""^\s*else:""") def __lowerCAmelCase ( UpperCamelCase ) -> int: if _re_test_backend.search(UpperCamelCase ) is None: return None lowerCAmelCase__ : int = [b[0] for b in _re_backend.findall(UpperCamelCase )] backends.sort() return "_and_".join(UpperCamelCase ) def __lowerCAmelCase ( UpperCamelCase ) -> Any: with open(UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCAmelCase__ : Union[str, Any] = f.readlines() lowerCAmelCase__ : Tuple = 0 while line_index < len(UpperCamelCase ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(UpperCamelCase ): return None # First grab the objects without a specific backend in _import_structure lowerCAmelCase__ : List[str] = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: lowerCAmelCase__ : str = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(UpperCamelCase ): lowerCAmelCase__ : str = _re_one_line_import_struct.search(UpperCamelCase ).groups()[0] lowerCAmelCase__ : Optional[Any] = re.findall(R'''\[([^\]]+)\]''' , UpperCamelCase ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue lowerCAmelCase__ : Tuple = _re_import_struct_key_value.search(UpperCamelCase ) if single_line_import_search is not None: lowerCAmelCase__ : Optional[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 lowerCAmelCase__ : Any = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. lowerCAmelCase__ : List[str] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase__ : Union[str, Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase__ : Optional[int] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): lowerCAmelCase__ : str = lines[line_index] if _re_import_struct_add_one.search(UpperCamelCase ) is not None: objects.append(_re_import_struct_add_one.search(UpperCamelCase ).groups()[0] ) elif _re_import_struct_add_many.search(UpperCamelCase ) is not None: lowerCAmelCase__ : Optional[int] = _re_import_struct_add_many.search(UpperCamelCase ).groups()[0].split(''', ''' ) lowerCAmelCase__ : List[Any] = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif _re_between_brackets.search(UpperCamelCase ) is not None: lowerCAmelCase__ : List[Any] = _re_between_brackets.search(UpperCamelCase ).groups()[0].split(''', ''' ) lowerCAmelCase__ : Optional[int] = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif _re_quote_object.search(UpperCamelCase ) is not None: objects.append(_re_quote_object.search(UpperCamelCase ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 lowerCAmelCase__ : Optional[Any] = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend lowerCAmelCase__ : Any = [] while ( line_index < len(UpperCamelCase ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): lowerCAmelCase__ : Tuple = lines[line_index] lowerCAmelCase__ : List[Any] = _re_import.search(UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 lowerCAmelCase__ : Dict = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(UpperCamelCase ): # If the line is an if is_backend_available, we grab all objects associated. lowerCAmelCase__ : str = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase__ : Tuple = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase__ : str = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): lowerCAmelCase__ : Any = lines[line_index] lowerCAmelCase__ : Union[str, Any] = _re_import.search(UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 lowerCAmelCase__ : str = objects else: line_index += 1 return import_dict_objects, type_hint_objects def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> List[Any]: def find_duplicates(UpperCamelCase ): return [k for k, v in collections.Counter(UpperCamelCase ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] lowerCAmelCase__ : Optional[Any] = [] for key in import_dict_objects.keys(): lowerCAmelCase__ : str = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) lowerCAmelCase__ : int = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): lowerCAmelCase__ : List[Any] = '''base imports''' if key == '''none''' else F"""{key} backend""" errors.append(F"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def __lowerCAmelCase ( ) -> Optional[Any]: lowerCAmelCase__ : Dict = [] for root, _, files in os.walk(UpperCamelCase ): if "__init__.py" in files: lowerCAmelCase__ : Any = os.path.join(UpperCamelCase , '''__init__.py''' ) lowerCAmelCase__ : List[Any] = parse_init(UpperCamelCase ) if objects is not None: lowerCAmelCase__ : Optional[int] = analyze_results(*UpperCamelCase ) if len(UpperCamelCase ) > 0: lowerCAmelCase__ : Tuple = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append('''\n'''.join(UpperCamelCase ) ) if len(UpperCamelCase ) > 0: raise ValueError('''\n\n'''.join(UpperCamelCase ) ) def __lowerCAmelCase ( ) -> Tuple: lowerCAmelCase__ : str = [] for path, directories, files in os.walk(UpperCamelCase ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(UpperCamelCase ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(UpperCamelCase ) / folder).glob('''*.py''' ) ) ) == 0: continue lowerCAmelCase__ : Tuple = str((Path(UpperCamelCase ) / folder).relative_to(UpperCamelCase ) ) lowerCAmelCase__ : Dict = short_path.replace(os.path.sep , '''.''' ) submodules.append(UpperCamelCase ) for fname in files: if fname == "__init__.py": continue lowerCAmelCase__ : Union[str, Any] = str((Path(UpperCamelCase ) / fname).relative_to(UpperCamelCase ) ) lowerCAmelCase__ : Tuple = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(UpperCamelCase ) return submodules lowerCAmelCase_ = [ """convert_pytorch_checkpoint_to_tf2""", """modeling_flax_pytorch_utils""", """models.esm.openfold_utils""", ] def __lowerCAmelCase ( ) -> List[str]: # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import lowerCAmelCase__ : Dict = direct_transformers_import(UpperCamelCase ) lowerCAmelCase__ : int = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(UpperCamelCase , '''__init__.py''' ) , '''r''' ) as f: lowerCAmelCase__ : str = f.read() import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , UpperCamelCase ) ) ) lowerCAmelCase__ : Optional[int] = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(UpperCamelCase ) > 0: lowerCAmelCase__ : List[Any] = '''\n'''.join(F"""- {module}""" for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registed in the main init of Transformers:\n''' F"""{list_of_modules}\n""" '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
678
0
"""simple docstring""" import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = AlbertConfig.from_json_file(_UpperCamelCase ) print(f"Building PyTorch model from configuration: {config}" ) __lowerCAmelCase = AlbertForPreTraining(_UpperCamelCase ) # Load weights from tf checkpoint load_tf_weights_in_albert(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}" ) torch.save(model.state_dict() , _UpperCamelCase ) if __name__ == "__main__": A : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--albert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained ALBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) A : List[str] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
636
import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class _lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=64 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ): lowerCAmelCase__ : Optional[int] = parent lowerCAmelCase__ : Tuple = batch_size lowerCAmelCase__ : Union[str, Any] = seq_length lowerCAmelCase__ : str = is_training lowerCAmelCase__ : Union[str, Any] = use_input_mask lowerCAmelCase__ : List[Any] = use_token_type_ids lowerCAmelCase__ : int = use_labels lowerCAmelCase__ : List[Any] = vocab_size lowerCAmelCase__ : Optional[int] = hidden_size lowerCAmelCase__ : List[str] = embedding_size lowerCAmelCase__ : Optional[int] = num_hidden_layers lowerCAmelCase__ : Optional[int] = num_attention_heads lowerCAmelCase__ : List[str] = intermediate_size lowerCAmelCase__ : Tuple = hidden_act lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob lowerCAmelCase__ : Union[str, Any] = max_position_embeddings lowerCAmelCase__ : List[Any] = type_vocab_size lowerCAmelCase__ : Optional[Any] = type_sequence_label_size lowerCAmelCase__ : List[Any] = initializer_range lowerCAmelCase__ : Optional[Any] = num_labels lowerCAmelCase__ : List[str] = num_choices lowerCAmelCase__ : Any = scope def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase__ : str = None if self.use_input_mask: lowerCAmelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase__ : Optional[Any] = None if self.use_token_type_ids: lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase__ : Dict = None lowerCAmelCase__ : Dict = None lowerCAmelCase__ : Optional[int] = None if self.use_labels: lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase__ : Union[str, Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __magic_name__( self ): return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = MegatronBertModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : List[Any] = MegatronBertForMaskedLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = MegatronBertForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = MegatronBertForNextSentencePrediction(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Dict = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = MegatronBertForPreTraining(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : int = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , next_sentence_label=__UpperCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = MegatronBertForQuestionAnswering(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = self.num_labels lowerCAmelCase__ : Union[str, Any] = MegatronBertForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = self.num_labels lowerCAmelCase__ : str = MegatronBertForTokenClassification(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = self.num_choices lowerCAmelCase__ : Dict = MegatronBertForMultipleChoice(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : Any = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs() ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) : Optional[int] = config_and_inputs lowerCAmelCase__ : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _lowerCAmelCase ( _lowercase , _lowercase , unittest.TestCase ): A__ = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) A__ = ( { 'feature-extraction': MegatronBertModel, 'fill-mask': MegatronBertForMaskedLM, 'question-answering': MegatronBertForQuestionAnswering, 'text-classification': MegatronBertForSequenceClassification, 'text-generation': MegatronBertForCausalLM, 'token-classification': MegatronBertForTokenClassification, 'zero-shot': MegatronBertForSequenceClassification, } if is_torch_available() else {} ) A__ = True # test_resize_embeddings = False A__ = False def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ): lowerCAmelCase__ : List[Any] = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) if return_labels: if model_class in get_values(__UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase ) return inputs_dict def __magic_name__( self ): lowerCAmelCase__ : str = MegatronBertModelTester(self ) lowerCAmelCase__ : Dict = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def __magic_name__( self ): self.config_tester.run_common_tests() def __magic_name__( self ): lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*__UpperCAmelCase ) def __lowerCAmelCase ( UpperCamelCase ) -> Optional[int]: return torch.tensor( UpperCamelCase , dtype=torch.long , device=UpperCamelCase , ) lowerCAmelCase_ = 1e-4 @require_torch @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( unittest.TestCase ): @slow @unittest.skip('''Model is not available.''' ) def __magic_name__( self ): lowerCAmelCase__ : int = '''nvidia/megatron-bert-uncased-345m''' if "MYDIR" in os.environ: lowerCAmelCase__ : Union[str, Any] = os.path.join(os.environ['''MYDIR'''] , __UpperCAmelCase ) lowerCAmelCase__ : Tuple = MegatronBertModel.from_pretrained(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.half() lowerCAmelCase__ : Optional[int] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] ) with torch.no_grad(): lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase )[0] lowerCAmelCase__ : List[Any] = torch.Size((1, 9, 1024) ) self.assertEqual(output.shape , __UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728] for ii in range(3 ): for jj in range(3 ): lowerCAmelCase__ : Union[str, Any] = output[0, ii, jj] lowerCAmelCase__ : Optional[Any] = expected[3 * ii + jj] lowerCAmelCase__ : List[str] = '''ii={} jj={} a={} b={}'''.format(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) self.assertTrue(math.isclose(__UpperCAmelCase , __UpperCAmelCase , rel_tol=__UpperCAmelCase , abs_tol=__UpperCAmelCase ) , msg=__UpperCAmelCase )
678
0
"""simple docstring""" import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available from transformers.models.gpta.tokenization_gpta import GPTaTokenizer from transformers.testing_utils import require_keras_nlp, require_tf, slow if is_tf_available(): import tensorflow as tf if is_keras_nlp_available(): from transformers.models.gpta import TFGPTaTokenizer _lowercase : Union[str, Any] = ['gpt2'] _lowercase : str = 'gpt2' if is_tf_available(): class _UpperCAmelCase ( tf.Module ): def __init__( self : Any , _lowercase : Union[str, Any] ): super().__init__() __UpperCAmelCase = tokenizer __UpperCAmelCase = AutoConfig.from_pretrained(__UpperCAmelCase ) __UpperCAmelCase = TFGPTaLMHeadModel.from_config(__UpperCAmelCase ) @tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) ) def a ( self : Dict , _lowercase : Tuple ): __UpperCAmelCase = self.tokenizer(__UpperCAmelCase ) __UpperCAmelCase = tokenized['''input_ids'''].to_tensor() __UpperCAmelCase = tf.cast(input_ids_dense > 0 , tf.intaa ) # input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN]) __UpperCAmelCase = self.model(input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase )['''logits'''] return outputs @require_tf @require_keras_nlp class _UpperCAmelCase ( unittest.TestCase ): def a ( self : Optional[int] ): super().setUp() __UpperCAmelCase = [GPTaTokenizer.from_pretrained(__UpperCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)] __UpperCAmelCase = [TFGPTaTokenizer.from_pretrained(__UpperCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS] assert len(self.tokenizers ) == len(self.tf_tokenizers ) __UpperCAmelCase = [ '''This is a straightforward English test sentence.''', '''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''', '''Now we\'re going to add some Chinese: 一 二 三 一二三''', '''And some much more rare Chinese: 齉 堃 齉堃''', '''Je vais aussi écrire en français pour tester les accents''', '''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''', ] __UpperCAmelCase = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def a ( self : Optional[Any] ): for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in self.test_sentences: __UpperCAmelCase = tokenizer([test_inputs] , return_tensors='''tf''' ) __UpperCAmelCase = tf_tokenizer([test_inputs] ) for key in python_outputs.keys(): # convert them to numpy to avoid messing with ragged tensors __UpperCAmelCase = python_outputs[key].numpy() __UpperCAmelCase = tf_outputs[key].numpy() self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) ) self.assertTrue(tf.reduce_all(tf.cast(__UpperCAmelCase , tf.intaa ) == tf_outputs_values ) ) @slow def a ( self : Dict ): for tf_tokenizer in self.tf_tokenizers: __UpperCAmelCase = tf.function(__UpperCAmelCase ) for test_inputs in self.test_sentences: __UpperCAmelCase = tf.constant(__UpperCAmelCase ) __UpperCAmelCase = compiled_tokenizer(__UpperCAmelCase ) __UpperCAmelCase = tf_tokenizer(__UpperCAmelCase ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def a ( self : Any ): for tf_tokenizer in self.tf_tokenizers: __UpperCAmelCase = ModelToSave(tokenizer=__UpperCAmelCase ) __UpperCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] ) __UpperCAmelCase = model.serving(__UpperCAmelCase ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: __UpperCAmelCase = Path(__UpperCAmelCase ) / '''saved.model''' tf.saved_model.save(__UpperCAmelCase , __UpperCAmelCase , signatures={'''serving_default''': model.serving} ) __UpperCAmelCase = tf.saved_model.load(__UpperCAmelCase ) __UpperCAmelCase = loaded_model.signatures['''serving_default'''](__UpperCAmelCase )['''output_0'''] # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertTrue(tf.reduce_all(out == loaded_output ) ) @slow def a ( self : List[Any] ): for tf_tokenizer in self.tf_tokenizers: __UpperCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] ) __UpperCAmelCase = tf_tokenizer(__UpperCAmelCase ) # Build model with some sample inputs __UpperCAmelCase = tf_tokenizer.get_config() __UpperCAmelCase = TFGPTaTokenizer.from_config(__UpperCAmelCase ) __UpperCAmelCase = model_from_config(__UpperCAmelCase ) for key in from_config_output.keys(): self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) ) @slow def a ( self : Tuple ): for tf_tokenizer in self.tf_tokenizers: # for the test to run __UpperCAmelCase = 12_31_23 for max_length in [3, 5, 10_24]: __UpperCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] ) __UpperCAmelCase = tf_tokenizer(__UpperCAmelCase , max_length=__UpperCAmelCase ) __UpperCAmelCase = out['''input_ids'''].numpy().shape[1] assert out_length == max_length
49
import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""", # See all BART models at https://huggingface.co/models?filter=bart } class _lowerCAmelCase ( _lowercase ): A__ = 'bart' A__ = ['past_key_values'] A__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , __UpperCAmelCase=5_0265 , __UpperCAmelCase=1024 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase="gelu" , __UpperCAmelCase=1024 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.0 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=2 , **__UpperCAmelCase , ): lowerCAmelCase__ : Union[str, Any] = vocab_size lowerCAmelCase__ : Optional[Any] = max_position_embeddings lowerCAmelCase__ : int = d_model lowerCAmelCase__ : str = encoder_ffn_dim lowerCAmelCase__ : Any = encoder_layers lowerCAmelCase__ : Dict = encoder_attention_heads lowerCAmelCase__ : Union[str, Any] = decoder_ffn_dim lowerCAmelCase__ : Union[str, Any] = decoder_layers lowerCAmelCase__ : Any = decoder_attention_heads lowerCAmelCase__ : Tuple = dropout lowerCAmelCase__ : Any = attention_dropout lowerCAmelCase__ : Any = activation_dropout lowerCAmelCase__ : Optional[Any] = activation_function lowerCAmelCase__ : Union[str, Any] = init_std lowerCAmelCase__ : Union[str, Any] = encoder_layerdrop lowerCAmelCase__ : int = decoder_layerdrop lowerCAmelCase__ : Optional[int] = classifier_dropout lowerCAmelCase__ : str = use_cache lowerCAmelCase__ : int = encoder_layers lowerCAmelCase__ : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , __UpperCAmelCase ): lowerCAmelCase__ : str = self.bos_token_id warnings.warn( f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """ '''The config can simply be saved and uploaded again to be fixed.''' ) class _lowerCAmelCase ( _lowercase ): @property def __magic_name__( self ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Tuple = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: lowerCAmelCase__ : Any = {0: '''batch'''} lowerCAmelCase__ : Tuple = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: lowerCAmelCase__ : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''} lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. lowerCAmelCase__ : List[str] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.num_layers for i in range(__UpperCAmelCase ): lowerCAmelCase__ : int = {0: '''batch''', 2: '''past_sequence + sequence'''} lowerCAmelCase__ : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} else: lowerCAmelCase__ : List[str] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property def __magic_name__( self ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Dict = super().outputs else: lowerCAmelCase__ : Any = super(__UpperCAmelCase , self ).outputs if self.use_past: lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.num_layers for i in range(__UpperCAmelCase ): lowerCAmelCase__ : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): lowerCAmelCase__ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # Generate decoder inputs lowerCAmelCase__ : int = seq_length if not self.use_past else 1 lowerCAmelCase__ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()} lowerCAmelCase__ : str = dict(**__UpperCAmelCase , **__UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = common_inputs['''input_ids'''].shape lowerCAmelCase__ : List[str] = common_inputs['''decoder_input_ids'''].shape[1] lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.num_attention_heads lowerCAmelCase__ : Any = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCAmelCase__ : List[Any] = decoder_seq_length + 3 lowerCAmelCase__ : Any = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) lowerCAmelCase__ : Any = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase )] , dim=1 ) lowerCAmelCase__ : List[str] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered lowerCAmelCase__ , lowerCAmelCase__ : Any = self.num_layers lowerCAmelCase__ : Union[str, Any] = min(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Any = max(__UpperCAmelCase , __UpperCAmelCase ) - min_num_layers lowerCAmelCase__ : Dict = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(__UpperCAmelCase ): common_inputs["past_key_values"].append( ( torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), ) ) # TODO: test this. lowerCAmelCase__ : Any = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(__UpperCAmelCase , __UpperCAmelCase ): common_inputs["past_key_values"].append((torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) ) return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): lowerCAmelCase__ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowerCAmelCase__ , lowerCAmelCase__ : Any = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values lowerCAmelCase__ : List[str] = seqlen + 2 lowerCAmelCase__ , lowerCAmelCase__ : int = self.num_layers lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.num_attention_heads lowerCAmelCase__ : Optional[Any] = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCAmelCase__ : Optional[Any] = common_inputs['''attention_mask'''].dtype lowerCAmelCase__ : List[Any] = torch.cat( [common_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 ) lowerCAmelCase__ : List[Any] = [ (torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(__UpperCAmelCase ) ] return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowerCAmelCase__ : Tuple = compute_effective_axis_dimension( __UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowerCAmelCase__ : Optional[Any] = tokenizer.num_special_tokens_to_add(__UpperCAmelCase ) lowerCAmelCase__ : int = compute_effective_axis_dimension( __UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCAmelCase ) # Generate dummy inputs according to compute batch and sequence lowerCAmelCase__ : Optional[int] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size lowerCAmelCase__ : str = dict(tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase ) ) return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) elif self.task == "causal-lm": lowerCAmelCase__ : List[str] = self._generate_dummy_inputs_for_causal_lm( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) else: lowerCAmelCase__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Any = super()._flatten_past_key_values_(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) else: lowerCAmelCase__ : Dict = super(__UpperCAmelCase , self )._flatten_past_key_values_( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
678
0
'''simple docstring''' def lowerCamelCase__ ( ): UpperCAmelCase = [] UpperCAmelCase = 1 while len(SCREAMING_SNAKE_CASE ) < 1E6: constant.append(str(SCREAMING_SNAKE_CASE ) ) i += 1 UpperCAmelCase = ''''''.join(SCREAMING_SNAKE_CASE ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[999] ) * int(constant[9999] ) * int(constant[9_9999] ) * int(constant[99_9999] ) ) if __name__ == "__main__": print(solution())
447
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""", # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class _lowerCAmelCase ( _lowercase ): A__ = 'sew-d' def __init__( self , __UpperCAmelCase=32 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase=2 , __UpperCAmelCase=512 , __UpperCAmelCase=256 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=("p2c", "c2p") , __UpperCAmelCase="layer_norm" , __UpperCAmelCase="gelu_python" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-7 , __UpperCAmelCase=1e-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __UpperCAmelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __UpperCAmelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __UpperCAmelCase=False , __UpperCAmelCase=128 , __UpperCAmelCase=16 , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=10 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=10 , __UpperCAmelCase=0 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=256 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , **__UpperCAmelCase , ): super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = hidden_size lowerCAmelCase__ : Optional[int] = feat_extract_norm lowerCAmelCase__ : str = feat_extract_activation lowerCAmelCase__ : int = list(__UpperCAmelCase ) lowerCAmelCase__ : int = list(__UpperCAmelCase ) lowerCAmelCase__ : Any = list(__UpperCAmelCase ) lowerCAmelCase__ : int = conv_bias lowerCAmelCase__ : List[Any] = num_conv_pos_embeddings lowerCAmelCase__ : Optional[int] = num_conv_pos_embedding_groups lowerCAmelCase__ : int = len(self.conv_dim ) lowerCAmelCase__ : Union[str, Any] = num_hidden_layers lowerCAmelCase__ : Any = intermediate_size lowerCAmelCase__ : int = squeeze_factor lowerCAmelCase__ : int = max_position_embeddings lowerCAmelCase__ : Any = position_buckets lowerCAmelCase__ : Optional[int] = share_att_key lowerCAmelCase__ : Tuple = relative_attention lowerCAmelCase__ : Optional[int] = norm_rel_ebd lowerCAmelCase__ : Tuple = list(__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = hidden_act lowerCAmelCase__ : Any = num_attention_heads lowerCAmelCase__ : Optional[int] = hidden_dropout lowerCAmelCase__ : Union[str, Any] = attention_dropout lowerCAmelCase__ : str = activation_dropout lowerCAmelCase__ : List[Any] = feat_proj_dropout lowerCAmelCase__ : Any = final_dropout lowerCAmelCase__ : Optional[int] = layer_norm_eps lowerCAmelCase__ : List[str] = feature_layer_norm_eps lowerCAmelCase__ : Tuple = initializer_range lowerCAmelCase__ : Tuple = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)""" f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCAmelCase__ : Tuple = apply_spec_augment lowerCAmelCase__ : List[str] = mask_time_prob lowerCAmelCase__ : int = mask_time_length lowerCAmelCase__ : int = mask_time_min_masks lowerCAmelCase__ : Optional[int] = mask_feature_prob lowerCAmelCase__ : int = mask_feature_length lowerCAmelCase__ : int = mask_feature_min_masks # ctc loss lowerCAmelCase__ : Optional[Any] = ctc_loss_reduction lowerCAmelCase__ : Any = ctc_zero_infinity # sequence classification lowerCAmelCase__ : Tuple = use_weighted_layer_sum lowerCAmelCase__ : Dict = classifier_proj_size @property def __magic_name__( self ): return functools.reduce(operator.mul , self.conv_stride , 1 )
678
0
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class __A : def __init__(self : Optional[int] , __a : Optional[Any] , __a : Any=13 , __a : List[str]=7 , __a : List[str]=True , __a : Dict=True , __a : int=True , __a : Any=99 , __a : Union[str, Any]=32 , __a : Optional[Any]=5 , __a : Union[str, Any]=4 , __a : List[Any]=37 , __a : List[str]="gelu" , __a : str=0.1 , __a : Optional[Any]=0.1 , __a : Any=512 , __a : List[str]=16 , __a : Union[str, Any]=2 , __a : Tuple=0.02 , __a : Any=3 , __a : Optional[Any]=4 , __a : Tuple=None , ): UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = seq_length UpperCAmelCase_ = is_training UpperCAmelCase_ = use_token_type_ids UpperCAmelCase_ = use_labels UpperCAmelCase_ = vocab_size UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_act UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = max_position_embeddings UpperCAmelCase_ = type_vocab_size UpperCAmelCase_ = type_sequence_label_size UpperCAmelCase_ = initializer_range UpperCAmelCase_ = num_labels UpperCAmelCase_ = num_choices UpperCAmelCase_ = scope UpperCAmelCase_ = self.vocab_size - 1 def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ = None if self.use_token_type_ids: UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase_ = None UpperCAmelCase_ = None UpperCAmelCase_ = None if self.use_labels: UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase_ = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) UpperCAmelCase_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def _lowercase (self : str , __a : Any , __a : List[Any] , __a : Optional[Any] , __a : Any , *__a : int ): UpperCAmelCase_ = OpenAIGPTModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() UpperCAmelCase_ = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase , head_mask=__UpperCAmelCase ) UpperCAmelCase_ = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) UpperCAmelCase_ = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase (self : str , __a : Any , __a : List[Any] , __a : List[str] , __a : int , *__a : List[Any] ): UpperCAmelCase_ = OpenAIGPTLMHeadModel(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() UpperCAmelCase_ = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase (self : Union[str, Any] , __a : Tuple , __a : str , __a : int , __a : Optional[Any] , *__a : Tuple ): UpperCAmelCase_ = OpenAIGPTDoubleHeadsModel(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() UpperCAmelCase_ = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase (self : Union[str, Any] , __a : Tuple , __a : List[Any] , __a : Tuple , __a : Any , *__a : Any ): UpperCAmelCase_ = self.num_labels UpperCAmelCase_ = OpenAIGPTForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase (self : int ): UpperCAmelCase_ = self.prepare_config_and_inputs() ( UpperCAmelCase_ ) = config_and_inputs UpperCAmelCase_ = { '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask, } return config, inputs_dict @require_torch class __A ( _lowercase , _lowercase , _lowercase , unittest.TestCase ): a__ : str = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) a__ : Tuple = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly a__ : Optional[int] = ( { """feature-extraction""": OpenAIGPTModel, """text-classification""": OpenAIGPTForSequenceClassification, """text-generation""": OpenAIGPTLMHeadModel, """zero-shot""": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def _lowercase (self : int , __a : Optional[Any] , __a : int , __a : Optional[Any] , __a : Dict , __a : Tuple ): if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def _lowercase (self : List[str] , __a : Tuple , __a : Union[str, Any] , __a : List[Any]=False ): UpperCAmelCase_ = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": UpperCAmelCase_ = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCAmelCase , ) UpperCAmelCase_ = inputs_dict['''labels'''] UpperCAmelCase_ = inputs_dict['''labels'''] UpperCAmelCase_ = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__UpperCAmelCase , ) UpperCAmelCase_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase ) return inputs_dict def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = OpenAIGPTModelTester(self ) UpperCAmelCase_ = ConfigTester(self , config_class=__UpperCAmelCase , n_embd=37 ) def _lowercase (self : List[str] ): self.config_tester.run_common_tests() def _lowercase (self : Any ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*__UpperCAmelCase ) def _lowercase (self : Tuple ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*__UpperCAmelCase ) def _lowercase (self : Tuple ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*__UpperCAmelCase ) def _lowercase (self : Dict ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__UpperCAmelCase ) @slow def _lowercase (self : Tuple ): for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ = OpenAIGPTModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @require_torch class __A ( unittest.TestCase ): @slow def _lowercase (self : List[Any] ): UpperCAmelCase_ = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" ) model.to(__UpperCAmelCase ) UpperCAmelCase_ = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=__UpperCAmelCase ) # the president is UpperCAmelCase_ = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 40477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the UpperCAmelCase_ = model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase ) self.assertListEqual(output_ids[0].tolist() , __UpperCAmelCase )
78
import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase_ = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""") @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( _lowercase , unittest.TestCase ): A__ = PegasusTokenizer A__ = PegasusTokenizerFast A__ = True A__ = True def __magic_name__( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ : Union[str, Any] = PegasusTokenizer(__UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __magic_name__( self ): return PegasusTokenizer.from_pretrained('''google/pegasus-large''' ) def __magic_name__( self , **__UpperCAmelCase ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __magic_name__( self , __UpperCAmelCase ): return ("This is a test", "This is a test") def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = '''</s>''' lowerCAmelCase__ : Optional[int] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Tuple = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''</s>''' ) self.assertEqual(vocab_keys[-1] , '''v''' ) self.assertEqual(len(__UpperCAmelCase ) , 1103 ) def __magic_name__( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1103 ) def __magic_name__( self ): lowerCAmelCase__ : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : Tuple = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : int = ( '''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important''' ''' </s> <pad> <pad> <pad>''' ) lowerCAmelCase__ : Any = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] lowerCAmelCase__ : Dict = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Any = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word lowerCAmelCase__ : List[str] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.''' lowerCAmelCase__ : Tuple = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1] lowerCAmelCase__ : Tuple = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Dict = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_6103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 lowerCAmelCase__ : str = '''To ensure a smooth flow of bank resolutions.''' lowerCAmelCase__ : int = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1] lowerCAmelCase__ : List[Any] = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = ['''This is going to be way too long.''' * 150, '''short example'''] lowerCAmelCase__ : List[str] = ['''not super long but more than 5 tokens''', '''tiny'''] lowerCAmelCase__ : Tuple = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) lowerCAmelCase__ : Optional[int] = self._large_tokenizer( text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask. @slow def __magic_name__( self ): # fmt: off lowerCAmelCase__ : Optional[int] = {'''input_ids''': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCAmelCase , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , ) @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( _lowercase , unittest.TestCase ): A__ = PegasusTokenizer A__ = PegasusTokenizerFast A__ = True A__ = True def __magic_name__( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ : List[Any] = PegasusTokenizer(__UpperCAmelCase , offset=0 , mask_token_sent=__UpperCAmelCase , mask_token='''[MASK]''' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __magic_name__( self ): return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' ) def __magic_name__( self , **__UpperCAmelCase ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __magic_name__( self , __UpperCAmelCase ): return ("This is a test", "This is a test") def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : int = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : str = ( '''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>''' ''' <pad> <pad> <pad>''' ) lowerCAmelCase__ : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] lowerCAmelCase__ : int = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) @require_torch def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = ['''This is going to be way too long.''' * 1000, '''short example'''] lowerCAmelCase__ : int = ['''not super long but more than 5 tokens''', '''tiny'''] lowerCAmelCase__ : Tuple = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) lowerCAmelCase__ : Tuple = self._large_tokenizer( text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask. def __magic_name__( self ): lowerCAmelCase__ : List[str] = ( '''This is an example string that is used to test the original TF implementation against the HF''' ''' implementation''' ) lowerCAmelCase__ : Union[str, Any] = self._large_tokenizer(__UpperCAmelCase ).input_ids self.assertListEqual( __UpperCAmelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
678
0
from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class __lowerCAmelCase ( _lowercase ): """simple docstring""" _SCREAMING_SNAKE_CASE = '' _SCREAMING_SNAKE_CASE = 'hf-legacy' # "hf://"" is reserved for hffs def __init__( self : Union[str, Any] , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : List[Any] = None , **_lowerCAmelCase : List[Any] , ) -> List[Any]: """simple docstring""" super().__init__(self , **__UpperCAmelCase ) snake_case_ = repo_info snake_case_ = token snake_case_ = None def lowerCAmelCase__ ( self : Any ) -> Dict: """simple docstring""" if self.dir_cache is None: snake_case_ = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes snake_case_ = { '''name''': hf_file.rfilename, '''size''': None, '''type''': '''file''', } self.dir_cache.update( { str(__UpperCAmelCase ): {"name": str(__UpperCAmelCase ), "size": None, "type": "directory"} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def lowerCAmelCase__ ( self : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str = "rb" , **_lowerCAmelCase : Tuple , ) -> List[Any]: """simple docstring""" if not isinstance(self.repo_info , __UpperCAmelCase ): raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' ) snake_case_ = hf_hub_url(self.repo_info.id , __UpperCAmelCase , revision=self.repo_info.sha ) return fsspec.open( __UpperCAmelCase , mode=__UpperCAmelCase , headers=get_authentication_headers_for_url(__UpperCAmelCase , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open() def lowerCAmelCase__ ( self : Union[str, Any] , _lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : List[str] ) -> Dict: """simple docstring""" self._get_dirs() snake_case_ = self._strip_protocol(__UpperCAmelCase ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(__UpperCAmelCase ) def lowerCAmelCase__ ( self : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : int=False , **_lowerCAmelCase : Optional[Any] ) -> Optional[Any]: """simple docstring""" self._get_dirs() snake_case_ = PurePosixPath(path.strip("/" ) ) snake_case_ = {} for p, f in self.dir_cache.items(): snake_case_ = PurePosixPath(p.strip("/" ) ) snake_case_ = p.parent if root == path: snake_case_ = f snake_case_ = list(paths.values() ) if detail: return out else: return sorted(f["name"] for f in out )
283
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""", # See all Donut models at https://huggingface.co/models?filter=donut-swin } class _lowerCAmelCase ( _lowercase ): A__ = 'donut-swin' A__ = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[3, 6, 12, 24] , __UpperCAmelCase=7 , __UpperCAmelCase=4.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , **__UpperCAmelCase , ): super().__init__(**__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = image_size lowerCAmelCase__ : List[str] = patch_size lowerCAmelCase__ : int = num_channels lowerCAmelCase__ : Optional[Any] = embed_dim lowerCAmelCase__ : int = depths lowerCAmelCase__ : Dict = len(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = num_heads lowerCAmelCase__ : Dict = window_size lowerCAmelCase__ : str = mlp_ratio lowerCAmelCase__ : Optional[int] = qkv_bias lowerCAmelCase__ : Any = hidden_dropout_prob lowerCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob lowerCAmelCase__ : List[str] = drop_path_rate lowerCAmelCase__ : Tuple = hidden_act lowerCAmelCase__ : List[str] = use_absolute_embeddings lowerCAmelCase__ : Dict = layer_norm_eps lowerCAmelCase__ : Any = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCAmelCase__ : List[Any] = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
678
0
"""simple docstring""" import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class lowerCamelCase__ ( unittest.TestCase ): def _UpperCAmelCase ( self , snake_case , snake_case ) -> List[str]: """simple docstring""" lowercase : List[str] = jnp.ones((batch_size, length) ) / length return scores def _UpperCAmelCase ( self ) -> str: """simple docstring""" lowercase : List[str] = None lowercase : Optional[int] = 2_0 lowercase : List[Any] = self._get_uniform_logits(batch_size=2 , length=__UpperCAmelCase ) # tweak scores to not be uniform anymore lowercase : Union[str, Any] = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch lowercase : Tuple = scores.at[1, 1_0].set((1 / length) - 0.4 ) # valley, 1st batch # compute softmax lowercase : Dict = jax.nn.softmax(__UpperCAmelCase , axis=-1 ) lowercase : Tuple = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowercase : Any = FlaxTemperatureLogitsWarper(temperature=1.3 ) lowercase : Optional[Any] = jax.nn.softmax(temp_dist_warper_sharper(__UpperCAmelCase , scores.copy() , cur_len=__UpperCAmelCase ) , axis=-1 ) lowercase : Dict = jax.nn.softmax(temp_dist_warper_smoother(__UpperCAmelCase , scores.copy() , cur_len=__UpperCAmelCase ) , axis=-1 ) # uniform distribution stays uniform self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) ) self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) ) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() ) self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() ) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() ) self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() ) def _UpperCAmelCase ( self ) -> int: """simple docstring""" lowercase : Tuple = None lowercase : Optional[int] = 1_0 lowercase : List[str] = 2 # create ramp distribution lowercase : Optional[Any] = np.broadcast_to(np.arange(__UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy() lowercase : str = ramp_logits[1:, : vocab_size // 2] + vocab_size lowercase : int = FlaxTopKLogitsWarper(3 ) lowercase : Optional[int] = top_k_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase ) # check that correct tokens are filtered self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] ) self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] ) # check special case lowercase : Dict = 5 lowercase : int = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 ) lowercase : str = np.broadcast_to(np.arange(__UpperCAmelCase )[None, :] , (batch_size, length) ).copy() lowercase : int = top_k_warp_safety_check(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase ) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] ) def _UpperCAmelCase ( self ) -> Optional[Any]: """simple docstring""" lowercase : Dict = None lowercase : Any = 1_0 lowercase : Dict = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) lowercase : Tuple = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) ) lowercase : Any = FlaxTopPLogitsWarper(0.8 ) lowercase : Optional[int] = np.exp(top_p_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase ) ) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 lowercase : Optional[Any] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] ) self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) ) # check edge cases with negative and extreme logits lowercase : List[str] = np.broadcast_to(np.arange(__UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy() - ( vocab_size // 2 ) # make ramp_logits more extreme lowercase : Union[str, Any] = ramp_logits[1] * 1_00.0 # make sure at least 2 tokens are kept lowercase : List[str] = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 ) lowercase : Dict = top_p_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase ) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] ) def _UpperCAmelCase ( self ) -> List[Any]: """simple docstring""" lowercase : Tuple = 2_0 lowercase : List[Any] = 4 lowercase : Any = 0 lowercase : Tuple = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=__UpperCAmelCase ) # check that min length is applied at length 5 lowercase : Dict = ids_tensor((batch_size, 2_0) , vocab_size=2_0 ) lowercase : Any = 5 lowercase : Dict = self._get_uniform_logits(__UpperCAmelCase , __UpperCAmelCase ) lowercase : Any = min_dist_processor(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase ) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("""inf""" )] ) # check that min length is not applied anymore at length 15 lowercase : Union[str, Any] = self._get_uniform_logits(__UpperCAmelCase , __UpperCAmelCase ) lowercase : int = 1_5 lowercase : List[Any] = min_dist_processor(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase ) self.assertFalse(jnp.isinf(__UpperCAmelCase ).any() ) def _UpperCAmelCase ( self ) -> List[Any]: """simple docstring""" lowercase : Optional[Any] = 2_0 lowercase : List[Any] = 4 lowercase : Any = 0 lowercase : str = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__UpperCAmelCase ) # check that all scores are -inf except the bos_token_id score lowercase : Optional[int] = ids_tensor((batch_size, 1) , vocab_size=2_0 ) lowercase : Union[str, Any] = 1 lowercase : Optional[Any] = self._get_uniform_logits(__UpperCAmelCase , __UpperCAmelCase ) lowercase : Optional[Any] = logits_processor(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase ) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 lowercase : Optional[Any] = 3 lowercase : Optional[int] = self._get_uniform_logits(__UpperCAmelCase , __UpperCAmelCase ) lowercase : List[Any] = logits_processor(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase ) self.assertFalse(jnp.isinf(__UpperCAmelCase ).any() ) def _UpperCAmelCase ( self ) -> Dict: """simple docstring""" lowercase : Any = 2_0 lowercase : List[str] = 4 lowercase : Optional[int] = 0 lowercase : Union[str, Any] = 5 lowercase : Any = FlaxForcedEOSTokenLogitsProcessor(max_length=__UpperCAmelCase , eos_token_id=__UpperCAmelCase ) # check that all scores are -inf except the eos_token_id when max_length is reached lowercase : int = ids_tensor((batch_size, 4) , vocab_size=2_0 ) lowercase : List[Any] = 4 lowercase : Optional[Any] = self._get_uniform_logits(__UpperCAmelCase , __UpperCAmelCase ) lowercase : Dict = logits_processor(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase ) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length is not reached lowercase : Tuple = 3 lowercase : List[Any] = self._get_uniform_logits(__UpperCAmelCase , __UpperCAmelCase ) lowercase : Optional[int] = logits_processor(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase ) self.assertFalse(jnp.isinf(__UpperCAmelCase ).any() ) def _UpperCAmelCase ( self ) -> Tuple: """simple docstring""" lowercase : Dict = 4 lowercase : Optional[Any] = 1_0 lowercase : str = 1_5 lowercase : str = 2 lowercase : Dict = 1 lowercase : Union[str, Any] = 1_5 # dummy input_ids and scores lowercase : Dict = ids_tensor((batch_size, sequence_length) , __UpperCAmelCase ) lowercase : int = input_ids.copy() lowercase : Optional[Any] = self._get_uniform_logits(__UpperCAmelCase , __UpperCAmelCase ) lowercase : List[Any] = scores.copy() # instantiate all dist processors lowercase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowercase : List[Any] = FlaxTopKLogitsWarper(3 ) lowercase : Tuple = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors lowercase : int = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=__UpperCAmelCase ) lowercase : List[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__UpperCAmelCase ) lowercase : List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=__UpperCAmelCase , eos_token_id=__UpperCAmelCase ) lowercase : Union[str, Any] = 1_0 # no processor list lowercase : Any = temp_dist_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase ) lowercase : List[str] = top_k_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase ) lowercase : List[str] = top_p_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase ) lowercase : Optional[int] = min_dist_proc(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase ) lowercase : Dict = bos_dist_proc(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase ) lowercase : Optional[int] = eos_dist_proc(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase ) # with processor list lowercase : Tuple = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) lowercase : str = processor(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase ) # scores should be equal self.assertTrue(jnp.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() ) def _UpperCAmelCase ( self ) -> int: """simple docstring""" lowercase : int = 4 lowercase : List[Any] = 1_0 lowercase : Tuple = 1_5 lowercase : List[Any] = 2 lowercase : str = 1 lowercase : Optional[Any] = 1_5 # dummy input_ids and scores lowercase : int = ids_tensor((batch_size, sequence_length) , __UpperCAmelCase ) lowercase : List[str] = input_ids.copy() lowercase : Optional[int] = self._get_uniform_logits(__UpperCAmelCase , __UpperCAmelCase ) lowercase : str = scores.copy() # instantiate all dist processors lowercase : str = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowercase : int = FlaxTopKLogitsWarper(3 ) lowercase : Optional[Any] = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors lowercase : int = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=__UpperCAmelCase ) lowercase : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__UpperCAmelCase ) lowercase : Any = FlaxForcedEOSTokenLogitsProcessor(max_length=__UpperCAmelCase , eos_token_id=__UpperCAmelCase ) lowercase : List[str] = 1_0 # no processor list def run_no_processor_list(snake_case , snake_case , snake_case ): lowercase : int = temp_dist_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase ) lowercase : Union[str, Any] = top_k_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase ) lowercase : Optional[Any] = top_p_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase ) lowercase : str = min_dist_proc(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase ) lowercase : str = bos_dist_proc(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase ) lowercase : int = eos_dist_proc(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase ) return scores # with processor list def run_processor_list(snake_case , snake_case , snake_case ): lowercase : Tuple = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) lowercase : Optional[Any] = processor(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase ) return scores lowercase : List[str] = jax.jit(__UpperCAmelCase ) lowercase : Tuple = jax.jit(__UpperCAmelCase ) lowercase : Optional[int] = jitted_run_no_processor_list(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowercase : str = jitted_run_processor_list(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # scores should be equal self.assertTrue(jnp.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
607
lowerCAmelCase_ = { """meter""": """m""", """kilometer""": """km""", """megametre""": """Mm""", """gigametre""": """Gm""", """terametre""": """Tm""", """petametre""": """Pm""", """exametre""": """Em""", """zettametre""": """Zm""", """yottametre""": """Ym""", } # Exponent of the factor(meter) lowerCAmelCase_ = { """m""": 0, """km""": 3, """Mm""": 6, """Gm""": 9, """Tm""": 12, """Pm""": 15, """Em""": 18, """Zm""": 21, """Ym""": 24, } def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> float: lowerCAmelCase__ : List[str] = from_type.lower().strip('''s''' ) lowerCAmelCase__ : List[str] = to_type.lower().strip('''s''' ) lowerCAmelCase__ : str = UNIT_SYMBOL.get(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Any = UNIT_SYMBOL.get(UpperCamelCase , UpperCamelCase ) if from_sanitized not in METRIC_CONVERSION: lowerCAmelCase__ : Tuple = ( F"""Invalid 'from_type' value: {from_type!r}.\n""" F"""Conversion abbreviations are: {", ".join(UpperCamelCase )}""" ) raise ValueError(UpperCamelCase ) if to_sanitized not in METRIC_CONVERSION: lowerCAmelCase__ : List[Any] = ( F"""Invalid 'to_type' value: {to_type!r}.\n""" F"""Conversion abbreviations are: {", ".join(UpperCamelCase )}""" ) raise ValueError(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = METRIC_CONVERSION[from_sanitized] lowerCAmelCase__ : Optional[Any] = METRIC_CONVERSION[to_sanitized] lowerCAmelCase__ : int = 1 if from_exponent > to_exponent: lowerCAmelCase__ : List[str] = from_exponent - to_exponent else: lowerCAmelCase__ : Dict = -(to_exponent - from_exponent) return value * pow(10 , UpperCamelCase ) if __name__ == "__main__": from doctest import testmod testmod()
678
0
"""simple docstring""" __snake_case = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ __snake_case = [{"""type""": """code""", """content""": INSTALL_CONTENT}] __snake_case = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
178
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _lowerCAmelCase : @staticmethod def __magic_name__( *__UpperCAmelCase , **__UpperCAmelCase ): pass @is_pipeline_test @require_vision class _lowerCAmelCase ( unittest.TestCase ): @require_torch def __magic_name__( self ): lowerCAmelCase__ : int = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , ) lowerCAmelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : List[str] = image_classifier(__UpperCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__UpperCAmelCase ) , [ [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}], [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}], ] , ) lowerCAmelCase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], ] , ) @require_tf def __magic_name__( self ): lowerCAmelCase__ : List[Any] = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' ) lowerCAmelCase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : str = image_classifier(__UpperCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , ) lowerCAmelCase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], ] , ) @slow @require_torch def __magic_name__( self ): lowerCAmelCase__ : str = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , ) # This is an image of 2 cats with remotes and no planes lowerCAmelCase__ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : str = image_classifier(__UpperCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) lowerCAmelCase__ : Tuple = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , ) @slow @require_tf def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' ) # This is an image of 2 cats with remotes and no planes lowerCAmelCase__ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : Union[str, Any] = image_classifier(__UpperCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) lowerCAmelCase__ : Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , )
678
0
import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __lowerCamelCase : Any = get_tests_dir("fixtures/test_sentencepiece_no_bos.model") @require_sentencepiece @require_tokenizers class __magic_name__ ( _lowercase, unittest.TestCase ): lowercase : Optional[int] =PegasusTokenizer lowercase : int =PegasusTokenizerFast lowercase : Optional[Any] =True lowercase : List[Any] =True def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase = PegasusTokenizer(__UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Dict: '''simple docstring''' return PegasusTokenizer.from_pretrained("google/pegasus-large" ) def SCREAMING_SNAKE_CASE_ ( self : Any , **UpperCamelCase__ : Any ) -> Any: '''simple docstring''' return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCamelCase__ : List[str] ) -> int: '''simple docstring''' return ("This is a test", "This is a test") def SCREAMING_SNAKE_CASE_ ( self : int ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase = '''</s>''' UpperCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase ) def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[Any]: '''simple docstring''' UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<pad>" ) self.assertEqual(vocab_keys[1] , "</s>" ) self.assertEqual(vocab_keys[-1] , "v" ) self.assertEqual(len(__UpperCAmelCase ) , 11_03 ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 11_03 ) def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[int]: '''simple docstring''' UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) UpperCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname ) UpperCAmelCase = ( '''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important''' ''' </s> <pad> <pad> <pad>''' ) UpperCAmelCase = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] UpperCAmelCase = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> int: '''simple docstring''' UpperCAmelCase = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word UpperCAmelCase = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.''' UpperCAmelCase = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1] UpperCAmelCase = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def SCREAMING_SNAKE_CASE_ ( self : str ) -> Tuple: '''simple docstring''' UpperCAmelCase = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_61_03 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 1_03 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 10_24 UpperCAmelCase = '''To ensure a smooth flow of bank resolutions.''' UpperCAmelCase = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1] UpperCAmelCase = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Tuple: '''simple docstring''' UpperCAmelCase = ['''This is going to be way too long.''' * 1_50, '''short example'''] UpperCAmelCase = ['''not super long but more than 5 tokens''', '''tiny'''] UpperCAmelCase = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="pt" ) UpperCAmelCase = self._large_tokenizer( text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="pt" ) assert batch.input_ids.shape == (2, 10_24) assert batch.attention_mask.shape == (2, 10_24) assert targets["input_ids"].shape == (2, 5) assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask. @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> int: '''simple docstring''' UpperCAmelCase = {'''input_ids''': [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCAmelCase , model_name="google/bigbird-pegasus-large-arxiv" , revision="ba85d0851d708441f91440d509690f1ab6353415" , ) @require_sentencepiece @require_tokenizers class __magic_name__ ( _lowercase, unittest.TestCase ): lowercase : Optional[Any] =PegasusTokenizer lowercase : str =PegasusTokenizerFast lowercase : Dict =True lowercase : Any =True def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Dict: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase = PegasusTokenizer(__UpperCAmelCase , offset=0 , mask_token_sent=__UpperCAmelCase , mask_token="[MASK]" ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Any: '''simple docstring''' return PegasusTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv" ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , **UpperCamelCase__ : List[str] ) -> int: '''simple docstring''' return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCamelCase__ : Dict ) -> str: '''simple docstring''' return ("This is a test", "This is a test") def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) UpperCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname ) UpperCAmelCase = ( '''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>''' ''' <pad> <pad> <pad>''' ) UpperCAmelCase = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] UpperCAmelCase = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) @require_torch def SCREAMING_SNAKE_CASE_ ( self : str ) -> List[str]: '''simple docstring''' UpperCAmelCase = ['''This is going to be way too long.''' * 10_00, '''short example'''] UpperCAmelCase = ['''not super long but more than 5 tokens''', '''tiny'''] UpperCAmelCase = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="pt" ) UpperCAmelCase = self._large_tokenizer( text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="pt" ) assert batch.input_ids.shape == (2, 40_96) assert batch.attention_mask.shape == (2, 40_96) assert targets["input_ids"].shape == (2, 5) assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask. def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> str: '''simple docstring''' UpperCAmelCase = ( '''This is an example string that is used to test the original TF implementation against the HF''' ''' implementation''' ) UpperCAmelCase = self._large_tokenizer(__UpperCAmelCase ).input_ids self.assertListEqual( __UpperCAmelCase , [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1] , )
323
lowerCAmelCase_ = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]: # Return True if there is node that has not iterated. lowerCAmelCase__ : Optional[int] = [False] * len(UpperCamelCase ) lowerCAmelCase__ : Tuple = [s] lowerCAmelCase__ : Dict = True while queue: lowerCAmelCase__ : int = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = True lowerCAmelCase__ : Optional[int] = u return visited[t] def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: lowerCAmelCase__ : Any = [-1] * (len(UpperCamelCase )) lowerCAmelCase__ : List[Any] = 0 lowerCAmelCase__ : Tuple = [] lowerCAmelCase__ : Optional[int] = [i[:] for i in graph] # Record original cut, copy. while bfs(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : List[Any] = float('''Inf''' ) lowerCAmelCase__ : Dict = sink while s != source: # Find the minimum value in select path lowerCAmelCase__ : Tuple = min(UpperCamelCase , graph[parent[s]][s] ) lowerCAmelCase__ : List[Any] = parent[s] max_flow += path_flow lowerCAmelCase__ : List[Any] = sink while v != source: lowerCAmelCase__ : Dict = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowerCAmelCase__ : Optional[Any] = parent[v] for i in range(len(UpperCamelCase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
678
0
from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
33
import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, require_torch_or_tf, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class _lowerCAmelCase ( unittest.TestCase ): A__ = MODEL_FOR_CAUSAL_LM_MAPPING A__ = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def __magic_name__( self ): lowerCAmelCase__ : Tuple = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' ) # Using `do_sample=False` to force deterministic output lowerCAmelCase__ : Optional[int] = text_generator('''This is a test''' , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ] , ) lowerCAmelCase__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] ) self.assertEqual( __UpperCAmelCase , [ [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ], [ { '''generated_text''': ( '''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy''' ''' oscope. oscope. FiliFili@@''' ) } ], ] , ) lowerCAmelCase__ : str = text_generator('''This is a test''' , do_sample=__UpperCAmelCase , num_return_sequences=2 , return_tensors=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ {'''generated_token_ids''': ANY(__UpperCAmelCase )}, {'''generated_token_ids''': ANY(__UpperCAmelCase )}, ] , ) lowerCAmelCase__ : List[Any] = text_generator.model.config.eos_token_id lowerCAmelCase__ : List[Any] = '''<pad>''' lowerCAmelCase__ : List[Any] = text_generator( ['''This is a test''', '''This is a second test'''] , do_sample=__UpperCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__UpperCAmelCase , ) self.assertEqual( __UpperCAmelCase , [ [ {'''generated_token_ids''': ANY(__UpperCAmelCase )}, {'''generated_token_ids''': ANY(__UpperCAmelCase )}, ], [ {'''generated_token_ids''': ANY(__UpperCAmelCase )}, {'''generated_token_ids''': ANY(__UpperCAmelCase )}, ], ] , ) @require_tf def __magic_name__( self ): lowerCAmelCase__ : int = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' ) # Using `do_sample=False` to force deterministic output lowerCAmelCase__ : List[Any] = text_generator('''This is a test''' , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ] , ) lowerCAmelCase__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ], [ { '''generated_text''': ( '''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes''' ''' Cannes 閲閲Cannes Cannes Cannes 攵 please,''' ) } ], ] , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Dict = TextGenerationPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) return text_generator, ["This is a test", "Another test"] def __magic_name__( self ): lowerCAmelCase__ : Any = '''Hello I believe in''' lowerCAmelCase__ : List[Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) lowerCAmelCase__ : Optional[int] = text_generator(__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , ) lowerCAmelCase__ : List[str] = text_generator(__UpperCAmelCase , stop_sequence=''' fe''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe'''}] ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = text_generator.model lowerCAmelCase__ : Optional[int] = text_generator.tokenizer lowerCAmelCase__ : Tuple = text_generator('''This is a test''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) lowerCAmelCase__ : Optional[int] = text_generator('''This is a test''' , return_full_text=__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) lowerCAmelCase__ : Dict = pipeline(task='''text-generation''' , model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , return_full_text=__UpperCAmelCase ) lowerCAmelCase__ : Dict = text_generator('''This is a test''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) lowerCAmelCase__ : List[str] = text_generator('''This is a test''' , return_full_text=__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) lowerCAmelCase__ : Optional[int] = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], ] , ) if text_generator.tokenizer.pad_token is not None: lowerCAmelCase__ : List[str] = text_generator( ['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], ] , ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Any = text_generator('''test''' , return_full_text=__UpperCAmelCase , return_text=__UpperCAmelCase ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = text_generator('''test''' , return_full_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : str = text_generator('''test''' , return_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase ) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): lowerCAmelCase__ : str = text_generator('''''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) else: with self.assertRaises((ValueError, AssertionError) ): lowerCAmelCase__ : List[str] = text_generator('''''' ) if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. lowerCAmelCase__ : Optional[Any] = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM'''] if ( tokenizer.model_max_length < 1_0000 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ): text_generator('''This is a test''' * 500 , max_new_tokens=20 ) lowerCAmelCase__ : Optional[Any] = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 ) # Hole strategy cannot work with self.assertRaises(__UpperCAmelCase ): text_generator( '''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , ) @require_torch @require_accelerate @require_torch_gpu def __magic_name__( self ): import torch # Classic `model_kwargs` lowerCAmelCase__ : List[str] = pipeline( model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowerCAmelCase__ : Any = pipe('''This is a test''' ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) lowerCAmelCase__ : Dict = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowerCAmelCase__ : Union[str, Any] = pipe('''This is a test''' ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 lowerCAmelCase__ : str = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa ) lowerCAmelCase__ : Any = pipe('''This is a test''' ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) @require_torch @require_torch_gpu def __magic_name__( self ): import torch lowerCAmelCase__ : List[str] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa ) pipe('''This is a test''' ) @require_torch @require_accelerate @require_torch_gpu def __magic_name__( self ): import torch lowerCAmelCase__ : Any = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa ) pipe('''This is a test''' , do_sample=__UpperCAmelCase , top_p=0.5 ) def __magic_name__( self ): lowerCAmelCase__ : int = '''Hello world''' lowerCAmelCase__ : Union[str, Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) if text_generator.model.framework == "tf": lowerCAmelCase__ : List[Any] = logging.get_logger('''transformers.generation.tf_utils''' ) else: lowerCAmelCase__ : Dict = logging.get_logger('''transformers.generation.utils''' ) lowerCAmelCase__ : Optional[Any] = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(__UpperCAmelCase ) as cl: lowerCAmelCase__ : List[str] = text_generator(__UpperCAmelCase , max_length=10 , max_new_tokens=1 ) self.assertIn(__UpperCAmelCase , cl.out ) # The user only sets one -> no warning with CaptureLogger(__UpperCAmelCase ) as cl: lowerCAmelCase__ : Any = text_generator(__UpperCAmelCase , max_new_tokens=1 ) self.assertNotIn(__UpperCAmelCase , cl.out ) with CaptureLogger(__UpperCAmelCase ) as cl: lowerCAmelCase__ : Union[str, Any] = text_generator(__UpperCAmelCase , max_length=10 ) self.assertNotIn(__UpperCAmelCase , cl.out )
678
0
import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def _UpperCamelCase (a__ :List[Any] ): """simple docstring""" UpperCamelCase__ = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''_float_tensor''', '''decoder.output_projection.weight''', ] for k in ignore_keys: state_dict.pop(a__ , a__ ) def _UpperCamelCase (a__ :List[str] ): """simple docstring""" UpperCamelCase__ = emb.weight.shape UpperCamelCase__ = nn.Linear(a__ , a__ , bias=a__ ) UpperCamelCase__ = emb.weight.data return lin_layer def _UpperCamelCase (a__ :int , a__ :Dict="facebook/mbart-large-en-ro" , a__ :str=False , a__ :Any=False ): """simple docstring""" UpperCamelCase__ = torch.load(a__ , map_location="""cpu""" )['''model'''] remove_ignore_keys_(a__ ) UpperCamelCase__ = state_dict['''encoder.embed_tokens.weight'''].shape[0] UpperCamelCase__ = MBartConfig.from_pretrained(a__ , vocab_size=a__ ) if mbart_aa and finetuned: UpperCamelCase__ = '''relu''' UpperCamelCase__ = state_dict['''decoder.embed_tokens.weight'''] UpperCamelCase__ = MBartForConditionalGeneration(a__ ) model.model.load_state_dict(a__ ) if finetuned: UpperCamelCase__ = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( "fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem." ) parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--hf_config", default="facebook/mbart-large-cc25", type=str, help="Which huggingface architecture to use: mbart-large", ) parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint") parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint") UpperCamelCase__ = parser.parse_args() UpperCamelCase__ = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
619
def __lowerCAmelCase ( UpperCamelCase ) -> str: return "".join([hex(UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase )] ) def __lowerCAmelCase ( UpperCamelCase ) -> bytes: # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(UpperCamelCase ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(UpperCamelCase ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCamelCase ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
678
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: lowercase_ : List[str] = None lowercase_ : Optional[Any] = logging.get_logger(__name__) lowercase_ : Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} lowercase_ : Optional[int] = { 'vocab_file': { 'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model', 'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model', 'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model', 'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model', 'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model', 'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model', 'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model', 'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model', }, 'tokenizer_file': { 'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json', 'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json', 'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json', 'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json', 'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json', 'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json', 'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json', 'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json', }, } lowercase_ : List[Any] = { 'albert-base-v1': 5_1_2, 'albert-large-v1': 5_1_2, 'albert-xlarge-v1': 5_1_2, 'albert-xxlarge-v1': 5_1_2, 'albert-base-v2': 5_1_2, 'albert-large-v2': 5_1_2, 'albert-xlarge-v2': 5_1_2, 'albert-xxlarge-v2': 5_1_2, } lowercase_ : str = '▁' class _lowerCamelCase ( _lowercase ): __a = VOCAB_FILES_NAMES __a = PRETRAINED_VOCAB_FILES_MAP __a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a = AlbertTokenizer def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase="[CLS]" , lowerCAmelCase="[SEP]" , lowerCAmelCase="<unk>" , lowerCAmelCase="[SEP]" , lowerCAmelCase="<pad>" , lowerCAmelCase="[CLS]" , lowerCAmelCase="[MASK]" , **lowerCAmelCase , ) -> Optional[Any]: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. SCREAMING_SNAKE_CASE__: str= ( AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase , normalized=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token ) super().__init__( __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , **__UpperCAmelCase , ) SCREAMING_SNAKE_CASE__: Optional[int]= do_lower_case SCREAMING_SNAKE_CASE__: int= remove_space SCREAMING_SNAKE_CASE__: List[str]= keep_accents SCREAMING_SNAKE_CASE__: Tuple= vocab_file SCREAMING_SNAKE_CASE__: Optional[Any]= False if not self.vocab_file else True def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = None ) -> Tuple: SCREAMING_SNAKE_CASE__: Any= [self.sep_token_id] SCREAMING_SNAKE_CASE__: Optional[int]= [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = None ) -> List[Any]: SCREAMING_SNAKE_CASE__: Union[str, Any]= [self.sep_token_id] SCREAMING_SNAKE_CASE__: Optional[int]= [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = None ) -> Dict: if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(__UpperCAmelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return SCREAMING_SNAKE_CASE__: List[str]= os.path.join( __UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ): copyfile(self.vocab_file , __UpperCAmelCase ) return (out_vocab_file,)
64
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class _lowerCAmelCase ( _lowercase ): A__ = (DPMSolverSDEScheduler,) A__ = 10 def __magic_name__( self , **__UpperCAmelCase ): lowerCAmelCase__ : Dict = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''noise_sampler_seed''': 0, } config.update(**__UpperCAmelCase ) return config def __magic_name__( self ): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=__UpperCAmelCase ) def __magic_name__( self ): for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=__UpperCAmelCase , beta_end=__UpperCAmelCase ) def __magic_name__( self ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__UpperCAmelCase ) def __magic_name__( self ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[Any] = self.scheduler_classes[0] lowerCAmelCase__ : str = self.get_scheduler_config() lowerCAmelCase__ : Optional[Any] = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase__ : Union[str, Any] = self.dummy_model() lowerCAmelCase__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase__ : Optional[Any] = sample.to(__UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : int = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[str] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[str] = output.prev_sample lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2 assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2 assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def __magic_name__( self ): lowerCAmelCase__ : Dict = self.scheduler_classes[0] lowerCAmelCase__ : Tuple = self.get_scheduler_config(prediction_type='''v_prediction''' ) lowerCAmelCase__ : Tuple = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase__ : Optional[Any] = self.dummy_model() lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase__ : Tuple = sample.to(__UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase__ : List[str] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : int = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : int = output.prev_sample lowerCAmelCase__ : Any = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2 assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2 assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3 else: assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2 assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3 def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0] lowerCAmelCase__ : Tuple = self.get_scheduler_config() lowerCAmelCase__ : Tuple = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase ) lowerCAmelCase__ : Dict = self.dummy_model() lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter.to(__UpperCAmelCase ) * scheduler.init_noise_sigma for t in scheduler.timesteps: lowerCAmelCase__ : List[str] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Tuple = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = output.prev_sample lowerCAmelCase__ : Dict = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Dict = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2 assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2 assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = self.scheduler_classes[0] lowerCAmelCase__ : Dict = self.get_scheduler_config() lowerCAmelCase__ : Optional[int] = scheduler_class(**__UpperCAmelCase , use_karras_sigmas=__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = self.dummy_model() lowerCAmelCase__ : int = self.dummy_sample_deter.to(__UpperCAmelCase ) * scheduler.init_noise_sigma lowerCAmelCase__ : Union[str, Any] = sample.to(__UpperCAmelCase ) for t in scheduler.timesteps: lowerCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Tuple = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Dict = output.prev_sample lowerCAmelCase__ : int = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 else: assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
678
0
"""simple docstring""" import itertools import os import random import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import is_speech_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import WhisperFeatureExtractor if is_torch_available(): import torch A : Tuple = random.Random() def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=1.0 , _UpperCamelCase=None , _UpperCamelCase=None ): '''simple docstring''' if rng is None: __lowerCAmelCase = global_rng __lowerCAmelCase = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self , __a , __a=7 , __a=4_00 , __a=20_00 , __a=10 , __a=1_60 , __a=8 , __a=0.0 , __a=40_00 , __a=False , __a=True , ): __lowerCAmelCase = parent __lowerCAmelCase = batch_size __lowerCAmelCase = min_seq_length __lowerCAmelCase = max_seq_length __lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __lowerCAmelCase = padding_value __lowerCAmelCase = sampling_rate __lowerCAmelCase = return_attention_mask __lowerCAmelCase = do_normalize __lowerCAmelCase = feature_size __lowerCAmelCase = chunk_length __lowerCAmelCase = hop_length def snake_case ( self ): return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def snake_case ( self , __a=False , __a=False ): def _flatten(__a ): return list(itertools.chain(*__UpperCAmelCase ) ) if equal_length: __lowerCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size __lowerCAmelCase = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: __lowerCAmelCase = [np.asarray(__UpperCAmelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class _UpperCamelCase ( _lowercase ,unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : Dict =WhisperFeatureExtractor if is_speech_available() else None def snake_case ( self ): __lowerCAmelCase = WhisperFeatureExtractionTester(self ) def snake_case ( self ): __lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __lowerCAmelCase = feat_extract_first.save_pretrained(__UpperCAmelCase )[0] check_json_file_has_correct_format(__UpperCAmelCase ) __lowerCAmelCase = self.feature_extraction_class.from_pretrained(__UpperCAmelCase ) __lowerCAmelCase = feat_extract_first.to_dict() __lowerCAmelCase = feat_extract_second.to_dict() __lowerCAmelCase = feat_extract_first.mel_filters __lowerCAmelCase = feat_extract_second.mel_filters self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase ) ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) def snake_case ( self ): __lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __lowerCAmelCase = os.path.join(__UpperCAmelCase , "feat_extract.json" ) feat_extract_first.to_json_file(__UpperCAmelCase ) __lowerCAmelCase = self.feature_extraction_class.from_json_file(__UpperCAmelCase ) __lowerCAmelCase = feat_extract_first.to_dict() __lowerCAmelCase = feat_extract_second.to_dict() __lowerCAmelCase = feat_extract_first.mel_filters __lowerCAmelCase = feat_extract_second.mel_filters self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase ) ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) def snake_case ( self ): # Tests that all call wrap to encode_plus and batch_encode_plus __lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] __lowerCAmelCase = [np.asarray(__UpperCAmelCase ) for speech_input in speech_inputs] # Test feature size __lowerCAmelCase = feature_extractor(__UpperCAmelCase , padding="max_length" , return_tensors="np" ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames ) self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size ) # Test not batched input __lowerCAmelCase = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features __lowerCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) ) # Test batched __lowerCAmelCase = feature_extractor(__UpperCAmelCase , return_tensors="np" ).input_features __lowerCAmelCase = feature_extractor(__UpperCAmelCase , return_tensors="np" ).input_features for enc_seq_a, enc_seq_a in zip(__UpperCAmelCase , __UpperCAmelCase ): self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. __lowerCAmelCase = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] __lowerCAmelCase = np.asarray(__UpperCAmelCase ) __lowerCAmelCase = feature_extractor(__UpperCAmelCase , return_tensors="np" ).input_features __lowerCAmelCase = feature_extractor(__UpperCAmelCase , return_tensors="np" ).input_features for enc_seq_a, enc_seq_a in zip(__UpperCAmelCase , __UpperCAmelCase ): self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) ) # Test truncation required __lowerCAmelCase = [floats_list((1, x) )[0] for x in range(2_00 , (feature_extractor.n_samples + 5_00) , 2_00 )] __lowerCAmelCase = [np.asarray(__UpperCAmelCase ) for speech_input in speech_inputs] __lowerCAmelCase = [x[: feature_extractor.n_samples] for x in speech_inputs] __lowerCAmelCase = [np.asarray(__UpperCAmelCase ) for speech_input in speech_inputs_truncated] __lowerCAmelCase = feature_extractor(__UpperCAmelCase , return_tensors="np" ).input_features __lowerCAmelCase = feature_extractor(__UpperCAmelCase , return_tensors="np" ).input_features for enc_seq_a, enc_seq_a in zip(__UpperCAmelCase , __UpperCAmelCase ): self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) ) def snake_case ( self ): import torch __lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase = np.random.rand(1_00 , 32 ).astype(np.floataa ) __lowerCAmelCase = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __lowerCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) __lowerCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def snake_case ( self , __a ): __lowerCAmelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) # automatic decoding with librispeech __lowerCAmelCase = ds.sort("id" ).select(range(__UpperCAmelCase ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def snake_case ( self ): # fmt: off __lowerCAmelCase = torch.tensor( [ 0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1, 0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8, 0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4, -0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4 ] ) # fmt: on __lowerCAmelCase = self._load_datasamples(1 ) __lowerCAmelCase = WhisperFeatureExtractor() __lowerCAmelCase = feature_extractor(__UpperCAmelCase , return_tensors="pt" ).input_features self.assertEqual(input_features.shape , (1, 80, 30_00) ) self.assertTrue(torch.allclose(input_features[0, 0, :30] , __UpperCAmelCase , atol=1e-4 ) ) def snake_case ( self ): __lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase = self._load_datasamples(1 )[0] __lowerCAmelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_55_35 # Rescale to [0, 65535] to show issue __lowerCAmelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__UpperCAmelCase )[0] self.assertTrue(np.all(np.mean(__UpperCAmelCase ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(__UpperCAmelCase ) - 1 ) < 1e-3 ) )
636
import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class _lowerCAmelCase ( unittest.TestCase ): def __magic_name__( self , __UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = 3 lowerCAmelCase__ : Tuple = 250 lowerCAmelCase__ : List[Any] = ids_tensor((batch_size, length) , __UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = torch.ones((batch_size, length) , device=__UpperCAmelCase , dtype=torch.float ) / length return input_ids, scores def __magic_name__( self ): lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._get_tensors(5 ) lowerCAmelCase__ : List[str] = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._get_tensors(9 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._get_tensors(10 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : Tuple = MaxLengthCriteria(max_length=10 ) lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self._get_tensors(5 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._get_tensors(9 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(10 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(5 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(9 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._get_tensors(10 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ : int = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 10 ) def __magic_name__( self ): lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._get_tensors(5 ) lowerCAmelCase__ : Any = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ : int = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 ) with self.assertWarns(__UpperCAmelCase ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 ) lowerCAmelCase__ : List[str] = validate_stopping_criteria(StoppingCriteriaList() , 11 ) self.assertEqual(len(__UpperCAmelCase ) , 1 )
678
0
"""simple docstring""" import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class _UpperCAmelCase : def __init__( self : Any , _lowercase : int , _lowercase : Tuple=13 , _lowercase : Any=7 , _lowercase : int=True , _lowercase : Tuple=True , _lowercase : List[Any]=False , _lowercase : Optional[int]=True , _lowercase : Optional[Any]=99 , _lowercase : str=32 , _lowercase : int=5 , _lowercase : List[Any]=4 , _lowercase : Dict=37 , _lowercase : int="gelu" , _lowercase : Union[str, Any]=0.1 , _lowercase : Any=0.1 , _lowercase : Dict=5_12 , _lowercase : Any=16 , _lowercase : int=2 , _lowercase : Any=0.02 , _lowercase : List[str]=3 , _lowercase : str=4 , _lowercase : Optional[int]=None , ): __UpperCAmelCase = parent __UpperCAmelCase = batch_size __UpperCAmelCase = seq_length __UpperCAmelCase = is_training __UpperCAmelCase = use_input_mask __UpperCAmelCase = use_token_type_ids __UpperCAmelCase = use_labels __UpperCAmelCase = vocab_size __UpperCAmelCase = hidden_size __UpperCAmelCase = num_hidden_layers __UpperCAmelCase = num_attention_heads __UpperCAmelCase = intermediate_size __UpperCAmelCase = hidden_act __UpperCAmelCase = hidden_dropout_prob __UpperCAmelCase = attention_probs_dropout_prob __UpperCAmelCase = max_position_embeddings __UpperCAmelCase = type_vocab_size __UpperCAmelCase = type_sequence_label_size __UpperCAmelCase = initializer_range __UpperCAmelCase = num_labels __UpperCAmelCase = num_choices __UpperCAmelCase = scope def a ( self : Optional[int] ): __UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase = None if self.use_input_mask: __UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase = None if self.use_token_type_ids: __UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None if self.use_labels: __UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def a ( self : Union[str, Any] ): return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def a ( self : Union[str, Any] , _lowercase : int , _lowercase : List[Any] , _lowercase : Optional[Any] , _lowercase : List[Any] , _lowercase : Any , _lowercase : List[Any] , _lowercase : int ): __UpperCAmelCase = BioGptModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase ) __UpperCAmelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a ( self : Dict , _lowercase : Union[str, Any] , _lowercase : Any , _lowercase : Optional[Any] , _lowercase : int , _lowercase : Any , _lowercase : Tuple , _lowercase : Dict , _lowercase : str , _lowercase : List[str] , ): __UpperCAmelCase = BioGptForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a ( self : int , _lowercase : str , _lowercase : Optional[int] , _lowercase : Any , _lowercase : int , _lowercase : List[str] , *_lowercase : List[Any] ): __UpperCAmelCase = BioGptModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() # create attention mask __UpperCAmelCase = torch.ones(input_ids.shape , dtype=torch.long , device=__UpperCAmelCase ) __UpperCAmelCase = self.seq_length // 2 __UpperCAmelCase = 0 # first forward pass __UpperCAmelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).to_tuple() # create hypothetical next token and extent to next_input_ids __UpperCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids __UpperCAmelCase = ids_tensor((1,) , __UpperCAmelCase ).item() + 1 __UpperCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) __UpperCAmelCase = random_other_next_tokens # append to next input_ids and attn_mask __UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) __UpperCAmelCase = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__UpperCAmelCase )] , dim=1 , ) # get two different outputs __UpperCAmelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )['''last_hidden_state'''] __UpperCAmelCase = model(__UpperCAmelCase , past_key_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase )['''last_hidden_state'''] # select random slice __UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() __UpperCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach() __UpperCAmelCase = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) ) def a ( self : Tuple , _lowercase : Dict , _lowercase : Dict , _lowercase : Optional[Any] , _lowercase : str , _lowercase : Union[str, Any] , *_lowercase : str ): __UpperCAmelCase = BioGptModel(config=__UpperCAmelCase ).to(__UpperCAmelCase ).eval() __UpperCAmelCase = torch.ones(input_ids.shape , dtype=torch.long , device=__UpperCAmelCase ) # first forward pass __UpperCAmelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase ) __UpperCAmelCase = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids __UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) __UpperCAmelCase = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and __UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) __UpperCAmelCase = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) __UpperCAmelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )['''last_hidden_state'''] __UpperCAmelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[ '''last_hidden_state''' ] # select random slice __UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() __UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach() __UpperCAmelCase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) ) def a ( self : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Any , _lowercase : Union[str, Any] , _lowercase : Dict , _lowercase : Tuple , *_lowercase : Any , _lowercase : Tuple=False ): __UpperCAmelCase = BioGptForCausalLM(__UpperCAmelCase ) model.to(__UpperCAmelCase ) if gradient_checkpointing: model.gradient_checkpointing_enable() __UpperCAmelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def a ( self : int , _lowercase : List[str] , *_lowercase : Any ): __UpperCAmelCase = BioGptModel(__UpperCAmelCase ) __UpperCAmelCase = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 ) def a ( self : str , _lowercase : Any , _lowercase : Dict , _lowercase : str , _lowercase : Union[str, Any] , _lowercase : List[str] , *_lowercase : str ): __UpperCAmelCase = self.num_labels __UpperCAmelCase = BioGptForTokenClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a ( self : List[Any] ): __UpperCAmelCase = self.prepare_config_and_inputs() ( __UpperCAmelCase ) = config_and_inputs __UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _UpperCAmelCase ( _lowercase , _lowercase , _lowercase , unittest.TestCase ): a__ : Any = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) a__ : Any = (BioGptForCausalLM,) if is_torch_available() else () a__ : List[str] = ( { "feature-extraction": BioGptModel, "text-classification": BioGptForSequenceClassification, "text-generation": BioGptForCausalLM, "token-classification": BioGptForTokenClassification, "zero-shot": BioGptForSequenceClassification, } if is_torch_available() else {} ) a__ : List[Any] = False def a ( self : Optional[int] ): __UpperCAmelCase = BioGptModelTester(self ) __UpperCAmelCase = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def a ( self : Tuple ): self.config_tester.run_common_tests() def a ( self : Optional[int] ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def a ( self : Union[str, Any] ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __UpperCAmelCase = type self.model_tester.create_and_check_model(*__UpperCAmelCase ) def a ( self : List[Any] ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__UpperCAmelCase ) def a ( self : Optional[int] ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*__UpperCAmelCase , gradient_checkpointing=__UpperCAmelCase ) def a ( self : Union[str, Any] ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__UpperCAmelCase ) def a ( self : Optional[Any] ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*__UpperCAmelCase ) def a ( self : Tuple ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*__UpperCAmelCase ) @slow def a ( self : List[Any] ): __UpperCAmelCase = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) model.to(__UpperCAmelCase ) __UpperCAmelCase = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) __UpperCAmelCase = '''left''' # Define PAD Token = EOS Token = 50256 __UpperCAmelCase = tokenizer.eos_token __UpperCAmelCase = model.config.eos_token_id # use different length sentences to test batching __UpperCAmelCase = [ '''Hello, my dog is a little''', '''Today, I''', ] __UpperCAmelCase = tokenizer(__UpperCAmelCase , return_tensors='''pt''' , padding=__UpperCAmelCase ) __UpperCAmelCase = inputs['''input_ids'''].to(__UpperCAmelCase ) __UpperCAmelCase = model.generate( input_ids=__UpperCAmelCase , attention_mask=inputs['''attention_mask'''].to(__UpperCAmelCase ) , ) __UpperCAmelCase = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(__UpperCAmelCase ) __UpperCAmelCase = model.generate(input_ids=__UpperCAmelCase ) __UpperCAmelCase = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item() __UpperCAmelCase = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(__UpperCAmelCase ) __UpperCAmelCase = model.generate(input_ids=__UpperCAmelCase , max_length=model.config.max_length - num_paddings ) __UpperCAmelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) __UpperCAmelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCAmelCase ) __UpperCAmelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCAmelCase ) __UpperCAmelCase = [ '''Hello, my dog is a little bit bigger than a little bit.''', '''Today, I have a good idea of how to use the information''', ] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , [non_padded_sentence, padded_sentence] ) @slow def a ( self : str ): for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase = BioGptModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def a ( self : Optional[Any] ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase = 3 __UpperCAmelCase = input_dict['''input_ids'''] __UpperCAmelCase = input_ids.ne(1 ).to(__UpperCAmelCase ) __UpperCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __UpperCAmelCase = BioGptForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def a ( self : Union[str, Any] ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase = 3 __UpperCAmelCase = '''multi_label_classification''' __UpperCAmelCase = input_dict['''input_ids'''] __UpperCAmelCase = input_ids.ne(1 ).to(__UpperCAmelCase ) __UpperCAmelCase = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) __UpperCAmelCase = BioGptForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class _UpperCAmelCase ( unittest.TestCase ): @slow def a ( self : Tuple ): __UpperCAmelCase = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) __UpperCAmelCase = torch.tensor([[2, 48_05, 9, 6_56, 21]] ) __UpperCAmelCase = model(__UpperCAmelCase )[0] __UpperCAmelCase = 4_23_84 __UpperCAmelCase = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , __UpperCAmelCase ) __UpperCAmelCase = torch.tensor( [[[-9.5_236, -9.8_918, 10.4_557], [-11.0_469, -9.6_423, 8.1_022], [-8.8_664, -7.8_826, 5.5_325]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) ) @slow def a ( self : Dict ): __UpperCAmelCase = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) __UpperCAmelCase = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) model.to(__UpperCAmelCase ) torch.manual_seed(0 ) __UpperCAmelCase = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(__UpperCAmelCase ) __UpperCAmelCase = model.generate( **__UpperCAmelCase , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=__UpperCAmelCase , ) __UpperCAmelCase = tokenizer.decode(output_ids[0] , skip_special_tokens=__UpperCAmelCase ) __UpperCAmelCase = ( '''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the''' ''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and''' ''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),''' ''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and''' ''' more than 800,000 deaths.''' ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
49
from functools import reduce lowerCAmelCase_ = ( """73167176531330624919225119674426574742355349194934""" """96983520312774506326239578318016984801869478851843""" """85861560789112949495459501737958331952853208805511""" """12540698747158523863050715693290963295227443043557""" """66896648950445244523161731856403098711121722383113""" """62229893423380308135336276614282806444486645238749""" """30358907296290491560440772390713810515859307960866""" """70172427121883998797908792274921901699720888093776""" """65727333001053367881220235421809751254540594752243""" """52584907711670556013604839586446706324415722155397""" """53697817977846174064955149290862569321978468622482""" """83972241375657056057490261407972968652414535100474""" """82166370484403199890008895243450658541227588666881""" """16427171479924442928230863465674813919123162824586""" """17866458359124566529476545682848912883142607690042""" """24219022671055626321111109370544217506941658960408""" """07198403850962455444362981230987879927244284909188""" """84580156166097919133875499200524063689912560717606""" """05886116467109405077541002256983155200055935729725""" """71636269561882670428252483600823257530420752963450""" ) def __lowerCAmelCase ( UpperCamelCase = N ) -> int: return max( # mypy cannot properly interpret reduce int(reduce(lambda UpperCamelCase , UpperCamelCase : str(int(UpperCamelCase ) * int(UpperCamelCase ) ) , n[i : i + 13] ) ) for i in range(len(UpperCamelCase ) - 12 ) ) if __name__ == "__main__": print(F"""{solution() = }""")
678
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available _a : Dict = { 'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Any = [ 'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'LongT5EncoderModel', 'LongT5ForConditionalGeneration', 'LongT5Model', 'LongT5PreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Optional[int] = [ 'FlaxLongT5ForConditionalGeneration', 'FlaxLongT5Model', 'FlaxLongT5PreTrainedModel', ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys _a : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
447
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging lowerCAmelCase_ = logging.get_logger(__name__) def __lowerCAmelCase ( UpperCamelCase ) -> List[str]: lowerCAmelCase__ : int = R'''\w+[.]\d+''' lowerCAmelCase__ : Tuple = re.findall(UpperCamelCase , UpperCamelCase ) for pat in pats: lowerCAmelCase__ : List[str] = key.replace(UpperCamelCase , '''_'''.join(pat.split('''.''' ) ) ) return key def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: lowerCAmelCase__ : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',) if ( any('''norm''' in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowerCAmelCase__ : List[str] = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''embedding''',) return renamed_pt_tuple_key, pt_tensor # conv layer lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowerCAmelCase__ : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCAmelCase__ : List[Any] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight": lowerCAmelCase__ : str = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCAmelCase__ : Any = pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCAmelCase__ : List[Any] = pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=42 ) -> Any: # Step 1: Convert pytorch tensor to numpy lowerCAmelCase__ : Optional[Any] = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowerCAmelCase__ : Tuple = flax_model.init_weights(PRNGKey(UpperCamelCase ) ) lowerCAmelCase__ : Any = flatten_dict(UpperCamelCase ) lowerCAmelCase__ : List[Any] = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCAmelCase__ : str = rename_key(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = tuple(renamed_pt_key.split('''.''' ) ) # Correctly rename weight parameters lowerCAmelCase__ , lowerCAmelCase__ : List[str] = rename_key_and_reshape_tensor(UpperCamelCase , UpperCamelCase , UpperCamelCase ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """ F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) # also add unexpected weight so that warning is thrown lowerCAmelCase__ : List[str] = jnp.asarray(UpperCamelCase ) return unflatten_dict(UpperCamelCase )
678
0
'''simple docstring''' import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : str=1 ) -> List[str]: '''simple docstring''' if n_shave_prefix_segments >= 0: return ".".join(path.split("." )[n_shave_prefix_segments:] ) else: return ".".join(path.split("." )[:n_shave_prefix_segments] ) def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Tuple=0 ) -> List[Any]: '''simple docstring''' UpperCAmelCase_ = [] for old_item in old_list: UpperCAmelCase_ = old_item.replace("in_layers.0" , "norm1" ) UpperCAmelCase_ = new_item.replace("in_layers.2" , "conv1" ) UpperCAmelCase_ = new_item.replace("out_layers.0" , "norm2" ) UpperCAmelCase_ = new_item.replace("out_layers.3" , "conv2" ) UpperCAmelCase_ = new_item.replace("emb_layers.1" , "time_emb_proj" ) UpperCAmelCase_ = new_item.replace("skip_connection" , "conv_shortcut" ) UpperCAmelCase_ = shave_segments(snake_case_ , n_shave_prefix_segments=snake_case_ ) mapping.append({"old": old_item, "new": new_item} ) return mapping def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : List[str]=0 ) -> Tuple: '''simple docstring''' UpperCAmelCase_ = [] for old_item in old_list: UpperCAmelCase_ = old_item UpperCAmelCase_ = new_item.replace("norm.weight" , "group_norm.weight" ) UpperCAmelCase_ = new_item.replace("norm.bias" , "group_norm.bias" ) UpperCAmelCase_ = new_item.replace("proj_out.weight" , "proj_attn.weight" ) UpperCAmelCase_ = new_item.replace("proj_out.bias" , "proj_attn.bias" ) UpperCAmelCase_ = shave_segments(snake_case_ , n_shave_prefix_segments=snake_case_ ) mapping.append({"old": old_item, "new": new_item} ) return mapping def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : str , snake_case_ : Any , snake_case_ : Dict=None , snake_case_ : Dict=None , snake_case_ : str=None ) -> int: '''simple docstring''' assert isinstance(snake_case_ , snake_case_ ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): UpperCAmelCase_ = old_checkpoint[path] UpperCAmelCase_ = old_tensor.shape[0] // 3 UpperCAmelCase_ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) UpperCAmelCase_ = old_tensor.shape[0] // config['''num_head_channels'''] // 3 UpperCAmelCase_ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) UpperCAmelCase_ = old_tensor.split(channels // num_heads , dim=1 ) UpperCAmelCase_ = query.reshape(snake_case_ ) UpperCAmelCase_ = key.reshape(snake_case_ ) UpperCAmelCase_ = value.reshape(snake_case_ ) for path in paths: UpperCAmelCase_ = path['''new'''] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here UpperCAmelCase_ = new_path.replace("middle_block.0" , "mid_block.resnets.0" ) UpperCAmelCase_ = new_path.replace("middle_block.1" , "mid_block.attentions.0" ) UpperCAmelCase_ = new_path.replace("middle_block.2" , "mid_block.resnets.1" ) if additional_replacements is not None: for replacement in additional_replacements: UpperCAmelCase_ = new_path.replace(replacement["old"] , replacement["new"] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: UpperCAmelCase_ = old_checkpoint[path['''old''']][:, :, 0] else: UpperCAmelCase_ = old_checkpoint[path['''old''']] def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Dict ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase_ = {} UpperCAmelCase_ = checkpoint['''time_embed.0.weight'''] UpperCAmelCase_ = checkpoint['''time_embed.0.bias'''] UpperCAmelCase_ = checkpoint['''time_embed.2.weight'''] UpperCAmelCase_ = checkpoint['''time_embed.2.bias'''] UpperCAmelCase_ = checkpoint['''input_blocks.0.0.weight'''] UpperCAmelCase_ = checkpoint['''input_blocks.0.0.bias'''] UpperCAmelCase_ = checkpoint['''out.0.weight'''] UpperCAmelCase_ = checkpoint['''out.0.bias'''] UpperCAmelCase_ = checkpoint['''out.2.weight'''] UpperCAmelCase_ = checkpoint['''out.2.bias'''] # Retrieves the keys for the input blocks only UpperCAmelCase_ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "input_blocks" in layer} ) UpperCAmelCase_ = { layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key] for layer_id in range(snake_case_ ) } # Retrieves the keys for the middle blocks only UpperCAmelCase_ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "middle_block" in layer} ) UpperCAmelCase_ = { layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key] for layer_id in range(snake_case_ ) } # Retrieves the keys for the output blocks only UpperCAmelCase_ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "output_blocks" in layer} ) UpperCAmelCase_ = { layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key] for layer_id in range(snake_case_ ) } for i in range(1 , snake_case_ ): UpperCAmelCase_ = (i - 1) // (config['''num_res_blocks'''] + 1) UpperCAmelCase_ = (i - 1) % (config['''num_res_blocks'''] + 1) UpperCAmelCase_ = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key] UpperCAmelCase_ = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key] if f"""input_blocks.{i}.0.op.weight""" in checkpoint: UpperCAmelCase_ = checkpoint[ f"""input_blocks.{i}.0.op.weight""" ] UpperCAmelCase_ = checkpoint[ f"""input_blocks.{i}.0.op.bias""" ] continue UpperCAmelCase_ = renew_resnet_paths(snake_case_ ) UpperCAmelCase_ = {'''old''': f"""input_blocks.{i}.0""", '''new''': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""} UpperCAmelCase_ = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''} assign_to_checkpoint( snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path, resnet_op] , config=snake_case_ ) if len(snake_case_ ): UpperCAmelCase_ = renew_attention_paths(snake_case_ ) UpperCAmelCase_ = { '''old''': f"""input_blocks.{i}.1""", '''new''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""", } UpperCAmelCase_ = { f"""input_blocks.{i}.1.qkv.bias""": { '''key''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""", '''query''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""", '''value''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""", }, f"""input_blocks.{i}.1.qkv.weight""": { '''key''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""", '''query''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""", '''value''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""", }, } assign_to_checkpoint( snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , attention_paths_to_split=snake_case_ , config=snake_case_ , ) UpperCAmelCase_ = middle_blocks[0] UpperCAmelCase_ = middle_blocks[1] UpperCAmelCase_ = middle_blocks[2] UpperCAmelCase_ = renew_resnet_paths(snake_case_ ) assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , config=snake_case_ ) UpperCAmelCase_ = renew_resnet_paths(snake_case_ ) assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , config=snake_case_ ) UpperCAmelCase_ = renew_attention_paths(snake_case_ ) UpperCAmelCase_ = { '''middle_block.1.qkv.bias''': { '''key''': '''mid_block.attentions.0.key.bias''', '''query''': '''mid_block.attentions.0.query.bias''', '''value''': '''mid_block.attentions.0.value.bias''', }, '''middle_block.1.qkv.weight''': { '''key''': '''mid_block.attentions.0.key.weight''', '''query''': '''mid_block.attentions.0.query.weight''', '''value''': '''mid_block.attentions.0.value.weight''', }, } assign_to_checkpoint( snake_case_ , snake_case_ , snake_case_ , attention_paths_to_split=snake_case_ , config=snake_case_ ) for i in range(snake_case_ ): UpperCAmelCase_ = i // (config['''num_res_blocks'''] + 1) UpperCAmelCase_ = i % (config['''num_res_blocks'''] + 1) UpperCAmelCase_ = [shave_segments(snake_case_ , 2 ) for name in output_blocks[i]] UpperCAmelCase_ = {} for layer in output_block_layers: UpperCAmelCase_ = layer.split("." )[0], shave_segments(snake_case_ , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(snake_case_ ) else: UpperCAmelCase_ = [layer_name] if len(snake_case_ ) > 1: UpperCAmelCase_ = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key] UpperCAmelCase_ = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key] UpperCAmelCase_ = renew_resnet_paths(snake_case_ ) UpperCAmelCase_ = renew_resnet_paths(snake_case_ ) UpperCAmelCase_ = {'''old''': f"""output_blocks.{i}.0""", '''new''': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""} assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ ) if ["conv.weight", "conv.bias"] in output_block_list.values(): UpperCAmelCase_ = list(output_block_list.values() ).index(["conv.weight", "conv.bias"] ) UpperCAmelCase_ = checkpoint[ f"""output_blocks.{i}.{index}.conv.weight""" ] UpperCAmelCase_ = checkpoint[ f"""output_blocks.{i}.{index}.conv.bias""" ] # Clear attentions as they have been attributed above. if len(snake_case_ ) == 2: UpperCAmelCase_ = [] if len(snake_case_ ): UpperCAmelCase_ = renew_attention_paths(snake_case_ ) UpperCAmelCase_ = { '''old''': f"""output_blocks.{i}.1""", '''new''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""", } UpperCAmelCase_ = { f"""output_blocks.{i}.1.qkv.bias""": { '''key''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""", '''query''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""", '''value''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""", }, f"""output_blocks.{i}.1.qkv.weight""": { '''key''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""", '''query''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""", '''value''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""", }, } assign_to_checkpoint( snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("qkv" in key for key in attentions ) else None , config=snake_case_ , ) else: UpperCAmelCase_ = renew_resnet_paths(snake_case_ , n_shave_prefix_segments=1 ) for path in resnet_0_paths: UpperCAmelCase_ = '''.'''.join(["output_blocks", str(snake_case_ ), path["old"]] ) UpperCAmelCase_ = '''.'''.join(["up_blocks", str(snake_case_ ), "resnets", str(snake_case_ ), path["new"]] ) UpperCAmelCase_ = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": SCREAMING_SNAKE_CASE_: Optional[Any] =argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the architecture.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') SCREAMING_SNAKE_CASE_: Any =parser.parse_args() SCREAMING_SNAKE_CASE_: Optional[int] =torch.load(args.checkpoint_path) with open(args.config_file) as f: SCREAMING_SNAKE_CASE_: List[str] =json.loads(f.read()) SCREAMING_SNAKE_CASE_: Tuple =convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] SCREAMING_SNAKE_CASE_: Dict =UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: SCREAMING_SNAKE_CASE_: Any =DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1])) SCREAMING_SNAKE_CASE_: Optional[Any] =VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1])) SCREAMING_SNAKE_CASE_: Union[str, Any] =LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
78
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase_ = { """configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ """NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""", """NezhaForNextSentencePrediction""", """NezhaForMaskedLM""", """NezhaForPreTraining""", """NezhaForMultipleChoice""", """NezhaForQuestionAnswering""", """NezhaForSequenceClassification""", """NezhaForTokenClassification""", """NezhaModel""", """NezhaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
678
0
def _lowerCAmelCase ( lowerCAmelCase_ :Union[str, Any] = 1_000 )->int: '''simple docstring''' snake_case_ = 1, 1 snake_case_ = 2 while True: snake_case_ = 0 snake_case_ = fa + fa snake_case_ = fa, f index += 1 for _ in str(lowerCAmelCase_ ): i += 1 if i == n: break return index if __name__ == "__main__": print(solution(int(str(input()).strip())))
283
import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel lowerCAmelCase_ = """0.12""" # assumed parallelism: 8 @require_flax @is_staging_test class _lowerCAmelCase ( unittest.TestCase ): @classmethod def __magic_name__( cls ): lowerCAmelCase__ : Dict = TOKEN HfFolder.save_token(__UpperCAmelCase ) @classmethod def __magic_name__( cls ): try: delete_repo(token=cls._token , repo_id='''test-model-flax''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' ) except HTTPError: pass def __magic_name__( self ): lowerCAmelCase__ : List[Any] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) lowerCAmelCase__ : List[Any] = FlaxBertModel(__UpperCAmelCase ) model.push_to_hub('''test-model-flax''' , use_auth_token=self._token ) lowerCAmelCase__ : List[Any] = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" ) lowerCAmelCase__ : Union[str, Any] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : List[Any] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : str = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id='''test-model-flax''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(__UpperCAmelCase , repo_id='''test-model-flax''' , push_to_hub=__UpperCAmelCase , use_auth_token=self._token ) lowerCAmelCase__ : Tuple = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" ) lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : Optional[int] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) lowerCAmelCase__ : Optional[Any] = FlaxBertModel(__UpperCAmelCase ) model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token ) lowerCAmelCase__ : str = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : Dict = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : Tuple = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( __UpperCAmelCase , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=__UpperCAmelCase , use_auth_token=self._token ) lowerCAmelCase__ : Union[str, Any] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) lowerCAmelCase__ : Optional[int] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : Tuple = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCAmelCase__ : Any = True lowerCAmelCase__ : Any = flatten_dict(modela.params ) lowerCAmelCase__ : List[str] = flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4: lowerCAmelCase__ : Optional[Any] = False return models_are_equal @require_flax class _lowerCAmelCase ( unittest.TestCase ): def __magic_name__( self ): lowerCAmelCase__ : List[str] = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) lowerCAmelCase__ : List[str] = FlaxBertModel(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertTrue(check_models_equal(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : Tuple = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) lowerCAmelCase__ : Union[str, Any] = FlaxBertModel(__UpperCAmelCase ) lowerCAmelCase__ : Dict = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , max_shard_size='''10KB''' ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Tuple = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Any = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertTrue(check_models_equal(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = '''bert''' lowerCAmelCase__ : int = '''hf-internal-testing/tiny-random-bert-subfolder''' with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Dict = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[Any] = '''bert''' lowerCAmelCase__ : Tuple = '''hf-internal-testing/tiny-random-bert-sharded-subfolder''' with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase )
678
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available lowerCAmelCase: Optional[Any] ={} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase: Tuple =["GPTSw3Tokenizer"] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys lowerCAmelCase: Dict =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
607
from random import randint from tempfile import TemporaryFile import numpy as np def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any: lowerCAmelCase__ : Optional[Any] = 0 if start < end: lowerCAmelCase__ : Union[str, Any] = randint(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : List[str] = a[end] lowerCAmelCase__ : List[Any] = a[pivot] lowerCAmelCase__ : str = temp lowerCAmelCase__ , lowerCAmelCase__ : Tuple = _in_place_partition(UpperCamelCase , UpperCamelCase , UpperCamelCase ) count += _in_place_quick_sort(UpperCamelCase , UpperCamelCase , p - 1 ) count += _in_place_quick_sort(UpperCamelCase , p + 1 , UpperCamelCase ) return count def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: lowerCAmelCase__ : List[str] = 0 lowerCAmelCase__ : List[Any] = randint(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : str = a[end] lowerCAmelCase__ : Optional[int] = a[pivot] lowerCAmelCase__ : List[str] = temp lowerCAmelCase__ : str = start - 1 for index in range(UpperCamelCase , UpperCamelCase ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value lowerCAmelCase__ : List[str] = new_pivot_index + 1 lowerCAmelCase__ : int = a[new_pivot_index] lowerCAmelCase__ : int = a[index] lowerCAmelCase__ : Tuple = temp lowerCAmelCase__ : Optional[Any] = a[new_pivot_index + 1] lowerCAmelCase__ : List[str] = a[end] lowerCAmelCase__ : Union[str, Any] = temp return new_pivot_index + 1, count lowerCAmelCase_ = TemporaryFile() lowerCAmelCase_ = 1_00 # 1000 elements are to be sorted lowerCAmelCase_ , lowerCAmelCase_ = 0, 1 # mean and standard deviation lowerCAmelCase_ = np.random.normal(mu, sigma, p) np.save(outfile, X) print("""The array is""") print(X) outfile.seek(0) # using the same array lowerCAmelCase_ = np.load(outfile) lowerCAmelCase_ = len(M) - 1 lowerCAmelCase_ = _in_place_quick_sort(M, 0, r) print( """No of Comparisons for 100 elements selected from a standard normal distribution""" """is :""" ) print(z)
678
0
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging __snake_case = logging.get_logger(__name__) if is_vision_available(): import PIL class _lowerCAmelCase ( _lowercase ): __UpperCAmelCase : Optional[Any] = ['''pixel_values'''] def __init__( self , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = PILImageResampling.BICUBIC , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = True , UpperCamelCase__ = 1 / 255 , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = True , **UpperCamelCase__ , ) -> Tuple: '''simple docstring''' super().__init__(**__UpperCAmelCase ) snake_case : Optional[Any] = size if size is not None else {'''shortest_edge''': 224} snake_case : Optional[int] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) snake_case : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} snake_case : Optional[Any] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase , param_name="crop_size" ) snake_case : List[Any] = do_resize snake_case : int = size snake_case : str = resample snake_case : Tuple = do_center_crop snake_case : Union[str, Any] = crop_size snake_case : Dict = do_rescale snake_case : Tuple = rescale_factor snake_case : Union[str, Any] = do_normalize snake_case : int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN snake_case : Union[str, Any] = image_std if image_std is not None else OPENAI_CLIP_STD snake_case : int = do_convert_rgb def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = PILImageResampling.BICUBIC , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> List[Any]: '''simple docstring''' snake_case : List[str] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) if "shortest_edge" not in size: raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) snake_case : int = get_resize_output_image_size(__UpperCAmelCase , size=size["shortest_edge"] , default_to_square=__UpperCAmelCase ) return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> int: '''simple docstring''' snake_case : Union[str, Any] = get_size_dict(__UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(__UpperCAmelCase , size=(size["height"], size["width"]) , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Optional[int]: '''simple docstring''' return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Optional[Any]: '''simple docstring''' return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = ChannelDimension.FIRST , **UpperCamelCase__ , ) -> int: '''simple docstring''' snake_case : Optional[Any] = do_resize if do_resize is not None else self.do_resize snake_case : Optional[int] = size if size is not None else self.size snake_case : Optional[int] = get_size_dict(__UpperCAmelCase , param_name="size" , default_to_square=__UpperCAmelCase ) snake_case : int = resample if resample is not None else self.resample snake_case : int = do_center_crop if do_center_crop is not None else self.do_center_crop snake_case : Tuple = crop_size if crop_size is not None else self.crop_size snake_case : Union[str, Any] = get_size_dict(__UpperCAmelCase , param_name="crop_size" , default_to_square=__UpperCAmelCase ) snake_case : List[Any] = do_rescale if do_rescale is not None else self.do_rescale snake_case : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize snake_case : Optional[Any] = image_mean if image_mean is not None else self.image_mean snake_case : Optional[Any] = image_std if image_std is not None else self.image_std snake_case : Optional[int] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb snake_case : Any = make_list_of_images(__UpperCAmelCase ) if not valid_images(__UpperCAmelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # PIL RGBA images are converted to RGB if do_convert_rgb: snake_case : Optional[Any] = [convert_to_rgb(__UpperCAmelCase ) for image in images] # All transformations expect numpy arrays. snake_case : Union[str, Any] = [to_numpy_array(__UpperCAmelCase ) for image in images] if do_resize: snake_case : Optional[int] = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images] if do_center_crop: snake_case : Dict = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images] if do_rescale: snake_case : Dict = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images] if do_normalize: snake_case : Optional[Any] = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images] snake_case : Tuple = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images] snake_case : Tuple = {'''pixel_values''': images} return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
178
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[int]: assert isinstance(UpperCamelCase , UpperCamelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]: lowerCAmelCase__ : List[str] = tmp_path / '''cache''' lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase__ : List[Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]: lowerCAmelCase__ : str = tmp_path / '''cache''' lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : str = features.copy() if features else default_expected_features lowerCAmelCase__ : List[Any] = ( Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: lowerCAmelCase__ : str = tmp_path / '''cache''' lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , split=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str: if issubclass(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : Any = parquet_path elif issubclass(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : Any = [parquet_path] lowerCAmelCase__ : int = tmp_path / '''cache''' lowerCAmelCase__ : str = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=("train",) ) -> str: assert isinstance(UpperCamelCase , UpperCamelCase ) for split in splits: lowerCAmelCase__ : str = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]: lowerCAmelCase__ : Any = tmp_path / '''cache''' lowerCAmelCase__ : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase__ : Optional[Any] = ParquetDatasetReader( {'''train''': parquet_path} , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read() _check_parquet_datasetdict(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: lowerCAmelCase__ : Any = tmp_path / '''cache''' lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : Tuple = features.copy() if features else default_expected_features lowerCAmelCase__ : Optional[int] = ( Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase__ : List[str] = ParquetDatasetReader({'''train''': parquet_path} , features=UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_datasetdict(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: if split: lowerCAmelCase__ : Tuple = {split: parquet_path} else: lowerCAmelCase__ : int = '''train''' lowerCAmelCase__ : List[Any] = {'''train''': parquet_path, '''test''': parquet_path} lowerCAmelCase__ : Optional[int] = tmp_path / '''cache''' lowerCAmelCase__ : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : List[str] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_datasetdict(UpperCamelCase , UpperCamelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCAmelCase__ : Optional[Any] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' ) assert writer.write() > 0 lowerCAmelCase__ : Union[str, Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' ) lowerCAmelCase__ : int = pf.read() assert dataset.data.table == output_table def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCAmelCase__ : List[str] = str(shared_datadir / '''test_image_rgb.jpg''' ) lowerCAmelCase__ : Dict = {'''image''': [image_path]} lowerCAmelCase__ : int = Features({'''image''': Image()} ) lowerCAmelCase__ : Dict = Dataset.from_dict(UpperCamelCase , features=UpperCamelCase ) lowerCAmelCase__ : List[str] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' ) assert writer.write() > 0 lowerCAmelCase__ : Dict = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) ) assert dataset.features == reloaded_dataset.features lowerCAmelCase__ : int = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=UpperCamelCase ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( '''feature, expected''' , [ (Features({'''foo''': Value('''int32''' )} ), None), (Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Any: assert get_writer_batch_size(UpperCamelCase ) == expected
678
0
import os import unittest from tempfile import TemporaryDirectory import torch import torch.nn as nn from accelerate.utils import ( OffloadedWeightsLoader, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, ) class __magic_name__ ( nn.Module ): def __init__( self : List[str] ) -> int: '''simple docstring''' super().__init__() UpperCAmelCase = nn.Linear(3 , 4 ) UpperCAmelCase = nn.BatchNormad(4 ) UpperCAmelCase = nn.Linear(4 , 5 ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCamelCase__ : Dict ) -> Dict: '''simple docstring''' return self.lineara(self.batchnorm(self.lineara(__UpperCAmelCase ) ) ) class __magic_name__ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> int: '''simple docstring''' UpperCAmelCase = ModelForTest() with TemporaryDirectory() as tmp_dir: offload_state_dict(__UpperCAmelCase , model.state_dict() ) UpperCAmelCase = os.path.join(__UpperCAmelCase , "index.json" ) self.assertTrue(os.path.isfile(__UpperCAmelCase ) ) # TODO: add tests on what is inside the index for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]: UpperCAmelCase = os.path.join(__UpperCAmelCase , F'{key}.dat' ) self.assertTrue(os.path.isfile(__UpperCAmelCase ) ) # TODO: add tests on the fact weights are properly loaded def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> int: '''simple docstring''' UpperCAmelCase = [torch.floataa, torch.floataa, torch.bfloataa] for dtype in dtypes: UpperCAmelCase = torch.randn(2 , 3 , dtype=__UpperCAmelCase ) with TemporaryDirectory() as tmp_dir: UpperCAmelCase = offload_weight(__UpperCAmelCase , "weight" , __UpperCAmelCase , {} ) UpperCAmelCase = os.path.join(__UpperCAmelCase , "weight.dat" ) self.assertTrue(os.path.isfile(__UpperCAmelCase ) ) self.assertDictEqual(__UpperCAmelCase , {"weight": {"shape": [2, 3], "dtype": str(__UpperCAmelCase ).split("." )[1]}} ) UpperCAmelCase = load_offloaded_weight(__UpperCAmelCase , index["weight"] ) self.assertTrue(torch.equal(__UpperCAmelCase , __UpperCAmelCase ) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' UpperCAmelCase = ModelForTest() UpperCAmelCase = model.state_dict() UpperCAmelCase = {k: v for k, v in state_dict.items() if '''linear2''' not in k} UpperCAmelCase = {k: v for k, v in state_dict.items() if '''linear2''' in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(__UpperCAmelCase , __UpperCAmelCase ) UpperCAmelCase = OffloadedWeightsLoader(state_dict=__UpperCAmelCase , save_folder=__UpperCAmelCase ) # Every key is there with the right value self.assertEqual(sorted(__UpperCAmelCase ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(__UpperCAmelCase , weight_map[key] ) ) UpperCAmelCase = {k: v for k, v in state_dict.items() if '''weight''' in k} UpperCAmelCase = {k: v for k, v in state_dict.items() if '''weight''' not in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(__UpperCAmelCase , __UpperCAmelCase ) UpperCAmelCase = OffloadedWeightsLoader(state_dict=__UpperCAmelCase , save_folder=__UpperCAmelCase ) # Every key is there with the right value self.assertEqual(sorted(__UpperCAmelCase ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(__UpperCAmelCase , weight_map[key] ) ) with TemporaryDirectory() as tmp_dir: offload_state_dict(__UpperCAmelCase , __UpperCAmelCase ) # Duplicates are removed UpperCAmelCase = OffloadedWeightsLoader(state_dict=__UpperCAmelCase , save_folder=__UpperCAmelCase ) # Every key is there with the right value self.assertEqual(sorted(__UpperCAmelCase ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(__UpperCAmelCase , weight_map[key] ) ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Optional[int]: '''simple docstring''' UpperCAmelCase = {'''a.1''': 0, '''a.10''': 1, '''a.2''': 2} UpperCAmelCase = extract_submodules_state_dict(__UpperCAmelCase , ["a.1", "a.2"] ) self.assertDictEqual(__UpperCAmelCase , {"a.1": 0, "a.2": 2} ) UpperCAmelCase = {'''a.1.a''': 0, '''a.10.a''': 1, '''a.2.a''': 2} UpperCAmelCase = extract_submodules_state_dict(__UpperCAmelCase , ["a.1", "a.2"] ) self.assertDictEqual(__UpperCAmelCase , {"a.1.a": 0, "a.2.a": 2} )
323
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""", } class _lowerCAmelCase ( _lowercase , _lowercase ): A__ = 'focalnet' def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=False , __UpperCAmelCase=[192, 384, 768, 768] , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[2, 2, 2, 2] , __UpperCAmelCase=[3, 3, 3, 3] , __UpperCAmelCase="gelu" , __UpperCAmelCase=4.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=False , __UpperCAmelCase=1e-4 , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ): super().__init__(**__UpperCAmelCase ) lowerCAmelCase__ : Any = image_size lowerCAmelCase__ : Any = patch_size lowerCAmelCase__ : Tuple = num_channels lowerCAmelCase__ : Dict = embed_dim lowerCAmelCase__ : Optional[int] = use_conv_embed lowerCAmelCase__ : Optional[int] = hidden_sizes lowerCAmelCase__ : Optional[Any] = depths lowerCAmelCase__ : Dict = focal_levels lowerCAmelCase__ : int = focal_windows lowerCAmelCase__ : Optional[Any] = hidden_act lowerCAmelCase__ : Optional[int] = mlp_ratio lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob lowerCAmelCase__ : List[Any] = drop_path_rate lowerCAmelCase__ : Tuple = use_layerscale lowerCAmelCase__ : List[Any] = layerscale_value lowerCAmelCase__ : Dict = use_post_layernorm lowerCAmelCase__ : Dict = use_post_layernorm_in_modulation lowerCAmelCase__ : Dict = normalize_modulator lowerCAmelCase__ : Union[str, Any] = initializer_range lowerCAmelCase__ : List[str] = layer_norm_eps lowerCAmelCase__ : Tuple = encoder_stride lowerCAmelCase__ : Dict = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = get_aligned_output_features_output_indices( out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
678
0
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class __magic_name__ (unittest.TestCase ): '''simple docstring''' def __init__( self:int , _a:List[str] , _a:Tuple=7 , _a:Tuple=3 , _a:Tuple=18 , _a:Union[str, Any]=30 , _a:Any=4_00 , _a:Optional[int]=True , _a:Dict=None , _a:Optional[Any]=True , _a:Any=False , _a:List[Any]=True , _a:Optional[Any]=True , _a:str=[0.5, 0.5, 0.5] , _a:str=[0.5, 0.5, 0.5] , ): snake_case__ = parent snake_case__ = batch_size snake_case__ = num_channels snake_case__ = image_size snake_case__ = min_resolution snake_case__ = max_resolution snake_case__ = do_resize snake_case__ = size if size is not None else {'''height''': 18, '''width''': 20} snake_case__ = do_thumbnail snake_case__ = do_align_axis snake_case__ = do_pad snake_case__ = do_normalize snake_case__ = image_mean snake_case__ = image_std def SCREAMING_SNAKE_CASE__ ( self:Dict ): return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class __magic_name__ (_lowercase ,unittest.TestCase ): '''simple docstring''' __lowercase : Union[str, Any] = DonutImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): snake_case__ = DonutImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE__ ( self:int ): return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__UpperCAmelCase , '''do_resize''' ) ) self.assertTrue(hasattr(__UpperCAmelCase , '''size''' ) ) self.assertTrue(hasattr(__UpperCAmelCase , '''do_thumbnail''' ) ) self.assertTrue(hasattr(__UpperCAmelCase , '''do_align_long_axis''' ) ) self.assertTrue(hasattr(__UpperCAmelCase , '''do_pad''' ) ) self.assertTrue(hasattr(__UpperCAmelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(__UpperCAmelCase , '''image_mean''' ) ) self.assertTrue(hasattr(__UpperCAmelCase , '''image_std''' ) ) def SCREAMING_SNAKE_CASE__ ( self:Tuple ): snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} ) snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) # Previous config had dimensions in (width, height) order snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} ) def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): pass @is_flaky() def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): # Initialize image_processing snake_case__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , Image.Image ) # Test not batched input snake_case__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched snake_case__ = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) @is_flaky() def SCREAMING_SNAKE_CASE__ ( self:Any ): # Initialize image_processing snake_case__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , np.ndarray ) # Test not batched input snake_case__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched snake_case__ = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) @is_flaky() def SCREAMING_SNAKE_CASE__ ( self:Dict ): # Initialize image_processing snake_case__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , torch.Tensor ) # Test not batched input snake_case__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched snake_case__ = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , )
33
from scipy.stats import pearsonr import datasets lowerCAmelCase_ = """ Pearson correlation coefficient and p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. """ lowerCAmelCase_ = """ Args: predictions (`list` of `int`): Predicted class labels, as returned by a model. references (`list` of `int`): Ground truth labels. return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`. Returns: pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation. p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities. Examples: Example 1-A simple example using only predictions and references. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5]) >>> print(round(results['pearsonr'], 2)) -0.74 Example 2-The same as Example 1, but that also returns the `p-value`. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True) >>> print(sorted(list(results.keys()))) ['p-value', 'pearsonr'] >>> print(round(results['pearsonr'], 2)) -0.74 >>> print(round(results['p-value'], 2)) 0.15 """ lowerCAmelCase_ = """ @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, Ilhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Antonio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowerCAmelCase ( datasets.Metric ): def __magic_name__( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float''' ), '''references''': datasets.Value('''float''' ), } ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ): if return_pvalue: lowerCAmelCase__ : Union[str, Any] = pearsonr(__UpperCAmelCase , __UpperCAmelCase ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(__UpperCAmelCase , __UpperCAmelCase )[0] )}
678
0
from math import isqrt, loga def _UpperCamelCase (a__ :Tuple ): """simple docstring""" UpperCamelCase__ = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , a__ , a__ ): UpperCamelCase__ = False return [i for i in range(2 , a__ ) if is_prime[i]] def _UpperCamelCase (a__ :Optional[int] = 80_0800 , a__ :List[Any] = 80_0800 ): """simple docstring""" UpperCamelCase__ = degree * loga(a__ ) UpperCamelCase__ = int(a__ ) UpperCamelCase__ = calculate_prime_numbers(a__ ) UpperCamelCase__ = 0 UpperCamelCase__ = 0 UpperCamelCase__ = len(a__ ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(f"""{solution() = }""")
619
from manim import * class _lowerCAmelCase ( _lowercase ): def __magic_name__( self ): lowerCAmelCase__ : Tuple = Rectangle(height=0.5 , width=0.5 ) lowerCAmelCase__ : Dict = Rectangle(height=0.25 , width=0.25 ) lowerCAmelCase__ : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) lowerCAmelCase__ : Optional[Any] = [mem.copy() for i in range(6 )] lowerCAmelCase__ : int = [mem.copy() for i in range(6 )] lowerCAmelCase__ : Optional[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : str = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : List[str] = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : int = Text('''CPU''' , font_size=24 ) lowerCAmelCase__ : int = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = [mem.copy() for i in range(4 )] lowerCAmelCase__ : Tuple = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Tuple = Text('''GPU''' , font_size=24 ) lowerCAmelCase__ : int = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) gpu.move_to([-1, -1, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : int = [mem.copy() for i in range(6 )] lowerCAmelCase__ : List[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Tuple = Text('''Model''' , font_size=24 ) lowerCAmelCase__ : List[Any] = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) model.move_to([3, -1.0, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : Dict = [] lowerCAmelCase__ : List[str] = [] lowerCAmelCase__ : Optional[Any] = [] for i, rect in enumerate(__UpperCAmelCase ): rect.set_stroke(__UpperCAmelCase ) lowerCAmelCase__ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCAmelCase ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=__UpperCAmelCase , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=__UpperCAmelCase , buff=0.0 ) self.add(__UpperCAmelCase ) model_cpu_arr.append(__UpperCAmelCase ) self.add(*__UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase ) lowerCAmelCase__ : Any = [mem.copy() for i in range(6 )] lowerCAmelCase__ : Optional[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Any = Text('''Loaded Checkpoint''' , font_size=24 ) lowerCAmelCase__ : Optional[Any] = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) checkpoint.move_to([3, 0.5, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : Dict = [] lowerCAmelCase__ : str = [] for i, rect in enumerate(__UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = fill.copy().set_fill(__UpperCAmelCase , opacity=0.7 ) target.move_to(__UpperCAmelCase ) ckpt_arr.append(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(__UpperCAmelCase ) self.add(*__UpperCAmelCase , *__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCAmelCase__ : List[Any] = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[str] = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , ) blue_text.next_to(__UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : str = MarkupText( f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) lowerCAmelCase__ : Optional[Any] = [meta_mem.copy() for i in range(6 )] lowerCAmelCase__ : Dict = [meta_mem.copy() for i in range(6 )] lowerCAmelCase__ : Union[str, Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Dict = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : str = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : List[str] = Text('''Disk''' , font_size=24 ) lowerCAmelCase__ : Any = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(__UpperCAmelCase , run_time=3 ) , Write(__UpperCAmelCase , run_time=1 ) , Create(__UpperCAmelCase , run_time=1 ) ) lowerCAmelCase__ : str = [] for i, rect in enumerate(__UpperCAmelCase ): lowerCAmelCase__ : Dict = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(__UpperCAmelCase , run_time=1.5 ) ) self.play(*__UpperCAmelCase ) self.play(FadeOut(__UpperCAmelCase ) ) lowerCAmelCase__ : int = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(__UpperCAmelCase , run_time=3 ) ) self.play( FadeOut(__UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase ) , ) self.wait()
678
0
class _lowerCamelCase : def __init__( self , lowerCAmelCase ) -> Optional[int]: SCREAMING_SNAKE_CASE__: List[Any]= val SCREAMING_SNAKE_CASE__: Tuple= None SCREAMING_SNAKE_CASE__: List[str]= None def UpperCamelCase_ ( self , lowerCAmelCase ) -> int: if self.val: if val < self.val: if self.left is None: SCREAMING_SNAKE_CASE__: Dict= Node(__UpperCAmelCase ) else: self.left.insert(__UpperCAmelCase ) elif val > self.val: if self.right is None: SCREAMING_SNAKE_CASE__: str= Node(__UpperCAmelCase ) else: self.right.insert(__UpperCAmelCase ) else: SCREAMING_SNAKE_CASE__: Optional[int]= val def A__ ( snake_case_ : List[str] , snake_case_ : Optional[Any] ): # Recursive traversal if root: inorder(root.left , snake_case_ ) res.append(root.val ) inorder(root.right , snake_case_ ) def A__ ( snake_case_ : Union[str, Any] ): # Build BST if len(snake_case_ ) == 0: return arr SCREAMING_SNAKE_CASE__: int= Node(arr[0] ) for i in range(1 , len(snake_case_ ) ): root.insert(arr[i] ) # Traverse BST in order. SCREAMING_SNAKE_CASE__: List[str]= [] inorder(snake_case_ , snake_case_ ) return res if __name__ == "__main__": print(tree_sort([1_0, 1, 3, 2, 9, 1_4, 1_3]))
64
import collections import os import re from pathlib import Path lowerCAmelCase_ = """src/transformers""" # Matches is_xxx_available() lowerCAmelCase_ = re.compile(R"""is\_([a-z_]*)_available()""") # Catches a one-line _import_struct = {xxx} lowerCAmelCase_ = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] lowerCAmelCase_ = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""") # Catches a line if not is_foo_available lowerCAmelCase_ = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""") # Catches a line _import_struct["bla"].append("foo") lowerCAmelCase_ = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] lowerCAmelCase_ = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""") # Catches a line with an object between quotes and a comma: "MyModel", lowerCAmelCase_ = re.compile(R"""^\s+\"([^\"]+)\",""") # Catches a line with objects between brackets only: ["foo", "bar"], lowerCAmelCase_ = re.compile(R"""^\s+\[([^\]]+)\]""") # Catches a line with from foo import bar, bla, boo lowerCAmelCase_ = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") # Catches a line with try: lowerCAmelCase_ = re.compile(R"""^\s*try:""") # Catches a line with else: lowerCAmelCase_ = re.compile(R"""^\s*else:""") def __lowerCAmelCase ( UpperCamelCase ) -> int: if _re_test_backend.search(UpperCamelCase ) is None: return None lowerCAmelCase__ : int = [b[0] for b in _re_backend.findall(UpperCamelCase )] backends.sort() return "_and_".join(UpperCamelCase ) def __lowerCAmelCase ( UpperCamelCase ) -> Any: with open(UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCAmelCase__ : Union[str, Any] = f.readlines() lowerCAmelCase__ : Tuple = 0 while line_index < len(UpperCamelCase ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(UpperCamelCase ): return None # First grab the objects without a specific backend in _import_structure lowerCAmelCase__ : List[str] = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: lowerCAmelCase__ : str = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(UpperCamelCase ): lowerCAmelCase__ : str = _re_one_line_import_struct.search(UpperCamelCase ).groups()[0] lowerCAmelCase__ : Optional[Any] = re.findall(R'''\[([^\]]+)\]''' , UpperCamelCase ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue lowerCAmelCase__ : Tuple = _re_import_struct_key_value.search(UpperCamelCase ) if single_line_import_search is not None: lowerCAmelCase__ : Optional[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 lowerCAmelCase__ : Any = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. lowerCAmelCase__ : List[str] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase__ : Union[str, Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase__ : Optional[int] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): lowerCAmelCase__ : str = lines[line_index] if _re_import_struct_add_one.search(UpperCamelCase ) is not None: objects.append(_re_import_struct_add_one.search(UpperCamelCase ).groups()[0] ) elif _re_import_struct_add_many.search(UpperCamelCase ) is not None: lowerCAmelCase__ : Optional[int] = _re_import_struct_add_many.search(UpperCamelCase ).groups()[0].split(''', ''' ) lowerCAmelCase__ : List[Any] = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif _re_between_brackets.search(UpperCamelCase ) is not None: lowerCAmelCase__ : List[Any] = _re_between_brackets.search(UpperCamelCase ).groups()[0].split(''', ''' ) lowerCAmelCase__ : Optional[int] = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif _re_quote_object.search(UpperCamelCase ) is not None: objects.append(_re_quote_object.search(UpperCamelCase ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 lowerCAmelCase__ : Optional[Any] = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend lowerCAmelCase__ : Any = [] while ( line_index < len(UpperCamelCase ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): lowerCAmelCase__ : Tuple = lines[line_index] lowerCAmelCase__ : List[Any] = _re_import.search(UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 lowerCAmelCase__ : Dict = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(UpperCamelCase ): # If the line is an if is_backend_available, we grab all objects associated. lowerCAmelCase__ : str = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase__ : Tuple = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase__ : str = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): lowerCAmelCase__ : Any = lines[line_index] lowerCAmelCase__ : Union[str, Any] = _re_import.search(UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 lowerCAmelCase__ : str = objects else: line_index += 1 return import_dict_objects, type_hint_objects def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> List[Any]: def find_duplicates(UpperCamelCase ): return [k for k, v in collections.Counter(UpperCamelCase ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] lowerCAmelCase__ : Optional[Any] = [] for key in import_dict_objects.keys(): lowerCAmelCase__ : str = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) lowerCAmelCase__ : int = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): lowerCAmelCase__ : List[Any] = '''base imports''' if key == '''none''' else F"""{key} backend""" errors.append(F"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def __lowerCAmelCase ( ) -> Optional[Any]: lowerCAmelCase__ : Dict = [] for root, _, files in os.walk(UpperCamelCase ): if "__init__.py" in files: lowerCAmelCase__ : Any = os.path.join(UpperCamelCase , '''__init__.py''' ) lowerCAmelCase__ : List[Any] = parse_init(UpperCamelCase ) if objects is not None: lowerCAmelCase__ : Optional[int] = analyze_results(*UpperCamelCase ) if len(UpperCamelCase ) > 0: lowerCAmelCase__ : Tuple = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append('''\n'''.join(UpperCamelCase ) ) if len(UpperCamelCase ) > 0: raise ValueError('''\n\n'''.join(UpperCamelCase ) ) def __lowerCAmelCase ( ) -> Tuple: lowerCAmelCase__ : str = [] for path, directories, files in os.walk(UpperCamelCase ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(UpperCamelCase ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(UpperCamelCase ) / folder).glob('''*.py''' ) ) ) == 0: continue lowerCAmelCase__ : Tuple = str((Path(UpperCamelCase ) / folder).relative_to(UpperCamelCase ) ) lowerCAmelCase__ : Dict = short_path.replace(os.path.sep , '''.''' ) submodules.append(UpperCamelCase ) for fname in files: if fname == "__init__.py": continue lowerCAmelCase__ : Union[str, Any] = str((Path(UpperCamelCase ) / fname).relative_to(UpperCamelCase ) ) lowerCAmelCase__ : Tuple = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(UpperCamelCase ) return submodules lowerCAmelCase_ = [ """convert_pytorch_checkpoint_to_tf2""", """modeling_flax_pytorch_utils""", """models.esm.openfold_utils""", ] def __lowerCAmelCase ( ) -> List[str]: # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import lowerCAmelCase__ : Dict = direct_transformers_import(UpperCamelCase ) lowerCAmelCase__ : int = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(UpperCamelCase , '''__init__.py''' ) , '''r''' ) as f: lowerCAmelCase__ : str = f.read() import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , UpperCamelCase ) ) ) lowerCAmelCase__ : Optional[int] = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(UpperCamelCase ) > 0: lowerCAmelCase__ : List[Any] = '''\n'''.join(F"""- {module}""" for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registed in the main init of Transformers:\n''' F"""{list_of_modules}\n""" '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
678
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging A : Tuple = logging.get_logger(__name__) A : List[str] = { "naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json", # See all Donut models at https://huggingface.co/models?filter=donut-swin } class _UpperCamelCase ( _lowercase ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] ="""donut-swin""" __UpperCAmelCase : List[Any] ={ """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self , __a=2_24 , __a=4 , __a=3 , __a=96 , __a=[2, 2, 6, 2] , __a=[3, 6, 12, 24] , __a=7 , __a=4.0 , __a=True , __a=0.0 , __a=0.0 , __a=0.1 , __a="gelu" , __a=False , __a=0.0_2 , __a=1e-5 , **__a , ): super().__init__(**__UpperCAmelCase ) __lowerCAmelCase = image_size __lowerCAmelCase = patch_size __lowerCAmelCase = num_channels __lowerCAmelCase = embed_dim __lowerCAmelCase = depths __lowerCAmelCase = len(__UpperCAmelCase ) __lowerCAmelCase = num_heads __lowerCAmelCase = window_size __lowerCAmelCase = mlp_ratio __lowerCAmelCase = qkv_bias __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = drop_path_rate __lowerCAmelCase = hidden_act __lowerCAmelCase = use_absolute_embeddings __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __lowerCAmelCase = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
636
import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class _lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=64 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ): lowerCAmelCase__ : Optional[int] = parent lowerCAmelCase__ : Tuple = batch_size lowerCAmelCase__ : Union[str, Any] = seq_length lowerCAmelCase__ : str = is_training lowerCAmelCase__ : Union[str, Any] = use_input_mask lowerCAmelCase__ : List[Any] = use_token_type_ids lowerCAmelCase__ : int = use_labels lowerCAmelCase__ : List[Any] = vocab_size lowerCAmelCase__ : Optional[int] = hidden_size lowerCAmelCase__ : List[str] = embedding_size lowerCAmelCase__ : Optional[int] = num_hidden_layers lowerCAmelCase__ : Optional[int] = num_attention_heads lowerCAmelCase__ : List[str] = intermediate_size lowerCAmelCase__ : Tuple = hidden_act lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob lowerCAmelCase__ : Union[str, Any] = max_position_embeddings lowerCAmelCase__ : List[Any] = type_vocab_size lowerCAmelCase__ : Optional[Any] = type_sequence_label_size lowerCAmelCase__ : List[Any] = initializer_range lowerCAmelCase__ : Optional[Any] = num_labels lowerCAmelCase__ : List[str] = num_choices lowerCAmelCase__ : Any = scope def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase__ : str = None if self.use_input_mask: lowerCAmelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase__ : Optional[Any] = None if self.use_token_type_ids: lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase__ : Dict = None lowerCAmelCase__ : Dict = None lowerCAmelCase__ : Optional[int] = None if self.use_labels: lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase__ : Union[str, Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __magic_name__( self ): return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = MegatronBertModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : List[Any] = MegatronBertForMaskedLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = MegatronBertForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = MegatronBertForNextSentencePrediction(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Dict = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = MegatronBertForPreTraining(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : int = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , next_sentence_label=__UpperCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = MegatronBertForQuestionAnswering(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = self.num_labels lowerCAmelCase__ : Union[str, Any] = MegatronBertForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = self.num_labels lowerCAmelCase__ : str = MegatronBertForTokenClassification(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = self.num_choices lowerCAmelCase__ : Dict = MegatronBertForMultipleChoice(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : Any = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs() ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) : Optional[int] = config_and_inputs lowerCAmelCase__ : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _lowerCAmelCase ( _lowercase , _lowercase , unittest.TestCase ): A__ = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) A__ = ( { 'feature-extraction': MegatronBertModel, 'fill-mask': MegatronBertForMaskedLM, 'question-answering': MegatronBertForQuestionAnswering, 'text-classification': MegatronBertForSequenceClassification, 'text-generation': MegatronBertForCausalLM, 'token-classification': MegatronBertForTokenClassification, 'zero-shot': MegatronBertForSequenceClassification, } if is_torch_available() else {} ) A__ = True # test_resize_embeddings = False A__ = False def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ): lowerCAmelCase__ : List[Any] = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) if return_labels: if model_class in get_values(__UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase ) return inputs_dict def __magic_name__( self ): lowerCAmelCase__ : str = MegatronBertModelTester(self ) lowerCAmelCase__ : Dict = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def __magic_name__( self ): self.config_tester.run_common_tests() def __magic_name__( self ): lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*__UpperCAmelCase ) def __lowerCAmelCase ( UpperCamelCase ) -> Optional[int]: return torch.tensor( UpperCamelCase , dtype=torch.long , device=UpperCamelCase , ) lowerCAmelCase_ = 1e-4 @require_torch @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( unittest.TestCase ): @slow @unittest.skip('''Model is not available.''' ) def __magic_name__( self ): lowerCAmelCase__ : int = '''nvidia/megatron-bert-uncased-345m''' if "MYDIR" in os.environ: lowerCAmelCase__ : Union[str, Any] = os.path.join(os.environ['''MYDIR'''] , __UpperCAmelCase ) lowerCAmelCase__ : Tuple = MegatronBertModel.from_pretrained(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.half() lowerCAmelCase__ : Optional[int] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] ) with torch.no_grad(): lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase )[0] lowerCAmelCase__ : List[Any] = torch.Size((1, 9, 1024) ) self.assertEqual(output.shape , __UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728] for ii in range(3 ): for jj in range(3 ): lowerCAmelCase__ : Union[str, Any] = output[0, ii, jj] lowerCAmelCase__ : Optional[Any] = expected[3 * ii + jj] lowerCAmelCase__ : List[str] = '''ii={} jj={} a={} b={}'''.format(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) self.assertTrue(math.isclose(__UpperCAmelCase , __UpperCAmelCase , rel_tol=__UpperCAmelCase , abs_tol=__UpperCAmelCase ) , msg=__UpperCAmelCase )
678
0
"""simple docstring""" import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :int ): __UpperCAmelCase = F'''{sampling_rate}''' __UpperCAmelCase = '''1''' __UpperCAmelCase = '''f32le''' __UpperCAmelCase = [ '''ffmpeg''', '''-i''', '''pipe:0''', '''-ac''', ac, '''-ar''', ar, '''-f''', format_for_conversion, '''-hide_banner''', '''-loglevel''', '''quiet''', '''pipe:1''', ] try: with subprocess.Popen(snake_case_ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process: __UpperCAmelCase = ffmpeg_process.communicate(snake_case_ ) except FileNotFoundError as error: raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error __UpperCAmelCase = output_stream[0] __UpperCAmelCase = np.frombuffer(snake_case_ , np.floataa ) if audio.shape[0] == 0: raise ValueError('''Malformed soundfile''' ) return audio def lowercase__ ( snake_case_ :int , snake_case_ :int , snake_case_ :Optional[Any] = "f32le" , ): __UpperCAmelCase = F'''{sampling_rate}''' __UpperCAmelCase = '''1''' if format_for_conversion == "s16le": __UpperCAmelCase = 2 elif format_for_conversion == "f32le": __UpperCAmelCase = 4 else: raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) __UpperCAmelCase = platform.system() if system == "Linux": __UpperCAmelCase = '''alsa''' __UpperCAmelCase = '''default''' elif system == "Darwin": __UpperCAmelCase = '''avfoundation''' __UpperCAmelCase = ''':0''' elif system == "Windows": __UpperCAmelCase = '''dshow''' __UpperCAmelCase = '''default''' __UpperCAmelCase = [ '''ffmpeg''', '''-f''', format_, '''-i''', input_, '''-ac''', ac, '''-ar''', ar, '''-f''', format_for_conversion, '''-fflags''', '''nobuffer''', '''-hide_banner''', '''-loglevel''', '''quiet''', '''pipe:1''', ] __UpperCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample __UpperCAmelCase = _ffmpeg_stream(snake_case_ , snake_case_ ) for item in iterator: yield item def lowercase__ ( snake_case_ :List[str] , snake_case_ :str , snake_case_ :Optional[int] = None , snake_case_ :str = None , snake_case_ :Any = "f32le" , ): if stream_chunk_s is not None: __UpperCAmelCase = stream_chunk_s else: __UpperCAmelCase = chunk_length_s __UpperCAmelCase = ffmpeg_microphone(snake_case_ , snake_case_ , format_for_conversion=snake_case_ ) if format_for_conversion == "s16le": __UpperCAmelCase = np.intaa __UpperCAmelCase = 2 elif format_for_conversion == "f32le": __UpperCAmelCase = np.floataa __UpperCAmelCase = 4 else: raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) if stride_length_s is None: __UpperCAmelCase = chunk_length_s / 6 __UpperCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(snake_case_ , (int, float) ): __UpperCAmelCase = [stride_length_s, stride_length_s] __UpperCAmelCase = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample __UpperCAmelCase = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample __UpperCAmelCase = datetime.datetime.now() __UpperCAmelCase = datetime.timedelta(seconds=snake_case_ ) for item in chunk_bytes_iter(snake_case_ , snake_case_ , stride=(stride_left, stride_right) , stream=snake_case_ ): # Put everything back in numpy scale __UpperCAmelCase = np.frombuffer(item['''raw'''] , dtype=snake_case_ ) __UpperCAmelCase = ( item['''stride'''][0] // size_of_sample, item['''stride'''][1] // size_of_sample, ) __UpperCAmelCase = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def lowercase__ ( snake_case_ :List[str] , snake_case_ :List[Any] , snake_case_ :Any , snake_case_ :Dict = False ): __UpperCAmelCase = b'''''' __UpperCAmelCase = stride if stride_left + stride_right >= chunk_len: raise ValueError( F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' ) __UpperCAmelCase = 0 for raw in iterator: acc += raw if stream and len(snake_case_ ) < chunk_len: __UpperCAmelCase = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(snake_case_ ) >= chunk_len: # We are flushing the accumulator __UpperCAmelCase = (_stride_left, stride_right) __UpperCAmelCase = {'''raw''': acc[:chunk_len], '''stride''': stride} if stream: __UpperCAmelCase = False yield item __UpperCAmelCase = stride_left __UpperCAmelCase = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(snake_case_ ) > stride_left: __UpperCAmelCase = {'''raw''': acc, '''stride''': (_stride_left, 0)} if stream: __UpperCAmelCase = False yield item def lowercase__ ( snake_case_ :List[Any] , snake_case_ :List[str] ): __UpperCAmelCase = 2**24 # 16Mo try: with subprocess.Popen(snake_case_ , stdout=subprocess.PIPE , bufsize=snake_case_ ) as ffmpeg_process: while True: __UpperCAmelCase = ffmpeg_process.stdout.read(snake_case_ ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
49
import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""", # See all BART models at https://huggingface.co/models?filter=bart } class _lowerCAmelCase ( _lowercase ): A__ = 'bart' A__ = ['past_key_values'] A__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , __UpperCAmelCase=5_0265 , __UpperCAmelCase=1024 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase="gelu" , __UpperCAmelCase=1024 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.0 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=2 , **__UpperCAmelCase , ): lowerCAmelCase__ : Union[str, Any] = vocab_size lowerCAmelCase__ : Optional[Any] = max_position_embeddings lowerCAmelCase__ : int = d_model lowerCAmelCase__ : str = encoder_ffn_dim lowerCAmelCase__ : Any = encoder_layers lowerCAmelCase__ : Dict = encoder_attention_heads lowerCAmelCase__ : Union[str, Any] = decoder_ffn_dim lowerCAmelCase__ : Union[str, Any] = decoder_layers lowerCAmelCase__ : Any = decoder_attention_heads lowerCAmelCase__ : Tuple = dropout lowerCAmelCase__ : Any = attention_dropout lowerCAmelCase__ : Any = activation_dropout lowerCAmelCase__ : Optional[Any] = activation_function lowerCAmelCase__ : Union[str, Any] = init_std lowerCAmelCase__ : Union[str, Any] = encoder_layerdrop lowerCAmelCase__ : int = decoder_layerdrop lowerCAmelCase__ : Optional[int] = classifier_dropout lowerCAmelCase__ : str = use_cache lowerCAmelCase__ : int = encoder_layers lowerCAmelCase__ : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , __UpperCAmelCase ): lowerCAmelCase__ : str = self.bos_token_id warnings.warn( f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """ '''The config can simply be saved and uploaded again to be fixed.''' ) class _lowerCAmelCase ( _lowercase ): @property def __magic_name__( self ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Tuple = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: lowerCAmelCase__ : Any = {0: '''batch'''} lowerCAmelCase__ : Tuple = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: lowerCAmelCase__ : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''} lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. lowerCAmelCase__ : List[str] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.num_layers for i in range(__UpperCAmelCase ): lowerCAmelCase__ : int = {0: '''batch''', 2: '''past_sequence + sequence'''} lowerCAmelCase__ : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} else: lowerCAmelCase__ : List[str] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property def __magic_name__( self ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Dict = super().outputs else: lowerCAmelCase__ : Any = super(__UpperCAmelCase , self ).outputs if self.use_past: lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.num_layers for i in range(__UpperCAmelCase ): lowerCAmelCase__ : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): lowerCAmelCase__ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # Generate decoder inputs lowerCAmelCase__ : int = seq_length if not self.use_past else 1 lowerCAmelCase__ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()} lowerCAmelCase__ : str = dict(**__UpperCAmelCase , **__UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = common_inputs['''input_ids'''].shape lowerCAmelCase__ : List[str] = common_inputs['''decoder_input_ids'''].shape[1] lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.num_attention_heads lowerCAmelCase__ : Any = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCAmelCase__ : List[Any] = decoder_seq_length + 3 lowerCAmelCase__ : Any = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) lowerCAmelCase__ : Any = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase )] , dim=1 ) lowerCAmelCase__ : List[str] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered lowerCAmelCase__ , lowerCAmelCase__ : Any = self.num_layers lowerCAmelCase__ : Union[str, Any] = min(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Any = max(__UpperCAmelCase , __UpperCAmelCase ) - min_num_layers lowerCAmelCase__ : Dict = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(__UpperCAmelCase ): common_inputs["past_key_values"].append( ( torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), ) ) # TODO: test this. lowerCAmelCase__ : Any = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(__UpperCAmelCase , __UpperCAmelCase ): common_inputs["past_key_values"].append((torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) ) return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): lowerCAmelCase__ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowerCAmelCase__ , lowerCAmelCase__ : Any = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values lowerCAmelCase__ : List[str] = seqlen + 2 lowerCAmelCase__ , lowerCAmelCase__ : int = self.num_layers lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.num_attention_heads lowerCAmelCase__ : Optional[Any] = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCAmelCase__ : Optional[Any] = common_inputs['''attention_mask'''].dtype lowerCAmelCase__ : List[Any] = torch.cat( [common_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 ) lowerCAmelCase__ : List[Any] = [ (torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(__UpperCAmelCase ) ] return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowerCAmelCase__ : Tuple = compute_effective_axis_dimension( __UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowerCAmelCase__ : Optional[Any] = tokenizer.num_special_tokens_to_add(__UpperCAmelCase ) lowerCAmelCase__ : int = compute_effective_axis_dimension( __UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCAmelCase ) # Generate dummy inputs according to compute batch and sequence lowerCAmelCase__ : Optional[int] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size lowerCAmelCase__ : str = dict(tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase ) ) return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) elif self.task == "causal-lm": lowerCAmelCase__ : List[str] = self._generate_dummy_inputs_for_causal_lm( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) else: lowerCAmelCase__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Any = super()._flatten_past_key_values_(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) else: lowerCAmelCase__ : Dict = super(__UpperCAmelCase , self )._flatten_past_key_values_( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
678
0
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process _a : Dict = logging.getLogger(__name__) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ): return (preds == labels).mean() @dataclass class lowercase_ : '''simple docstring''' __lowerCAmelCase : List[str] = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) __lowerCAmelCase : List[Any] = field( default=_lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) __lowerCAmelCase : Union[str, Any] = field( default=_lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) __lowerCAmelCase : Tuple = field( default=_lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) @dataclass class lowercase_ : '''simple docstring''' __lowerCAmelCase : Union[str, Any] = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} ) __lowerCAmelCase : Optional[Any] = field(metadata={"help": "Should contain the data files for the task."} ) __lowerCAmelCase : Union[str, Any] = field( default=1_28 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __lowerCAmelCase : Optional[int] = field( default=_lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def lowerCamelCase__ ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) UpperCAmelCase = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , SCREAMING_SNAKE_CASE ) # Set seed set_seed(training_args.seed ) try: UpperCAmelCase = processors[data_args.task_name]() UpperCAmelCase = processor.get_labels() UpperCAmelCase = len(SCREAMING_SNAKE_CASE ) except KeyError: raise ValueError('Task not found: %s' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=SCREAMING_SNAKE_CASE , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) UpperCAmelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) UpperCAmelCase = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , ) # Get datasets UpperCAmelCase = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) UpperCAmelCase = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(SCREAMING_SNAKE_CASE : Optional[Any] ) -> Dict: UpperCAmelCase = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE , p.label_ids )} # Data collator UpperCAmelCase = DataCollatorWithPadding(SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer UpperCAmelCase = Trainer( model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , compute_metrics=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation UpperCAmelCase = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) UpperCAmelCase = trainer.evaluate() UpperCAmelCase = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_master(): with open(SCREAMING_SNAKE_CASE , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) writer.write('%s = %s\n' % (key, value) ) results.update(SCREAMING_SNAKE_CASE ) return results def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Optional[int] ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
447
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""", # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class _lowerCAmelCase ( _lowercase ): A__ = 'sew-d' def __init__( self , __UpperCAmelCase=32 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase=2 , __UpperCAmelCase=512 , __UpperCAmelCase=256 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=("p2c", "c2p") , __UpperCAmelCase="layer_norm" , __UpperCAmelCase="gelu_python" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-7 , __UpperCAmelCase=1e-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __UpperCAmelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __UpperCAmelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __UpperCAmelCase=False , __UpperCAmelCase=128 , __UpperCAmelCase=16 , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=10 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=10 , __UpperCAmelCase=0 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=256 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , **__UpperCAmelCase , ): super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = hidden_size lowerCAmelCase__ : Optional[int] = feat_extract_norm lowerCAmelCase__ : str = feat_extract_activation lowerCAmelCase__ : int = list(__UpperCAmelCase ) lowerCAmelCase__ : int = list(__UpperCAmelCase ) lowerCAmelCase__ : Any = list(__UpperCAmelCase ) lowerCAmelCase__ : int = conv_bias lowerCAmelCase__ : List[Any] = num_conv_pos_embeddings lowerCAmelCase__ : Optional[int] = num_conv_pos_embedding_groups lowerCAmelCase__ : int = len(self.conv_dim ) lowerCAmelCase__ : Union[str, Any] = num_hidden_layers lowerCAmelCase__ : Any = intermediate_size lowerCAmelCase__ : int = squeeze_factor lowerCAmelCase__ : int = max_position_embeddings lowerCAmelCase__ : Any = position_buckets lowerCAmelCase__ : Optional[int] = share_att_key lowerCAmelCase__ : Tuple = relative_attention lowerCAmelCase__ : Optional[int] = norm_rel_ebd lowerCAmelCase__ : Tuple = list(__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = hidden_act lowerCAmelCase__ : Any = num_attention_heads lowerCAmelCase__ : Optional[int] = hidden_dropout lowerCAmelCase__ : Union[str, Any] = attention_dropout lowerCAmelCase__ : str = activation_dropout lowerCAmelCase__ : List[Any] = feat_proj_dropout lowerCAmelCase__ : Any = final_dropout lowerCAmelCase__ : Optional[int] = layer_norm_eps lowerCAmelCase__ : List[str] = feature_layer_norm_eps lowerCAmelCase__ : Tuple = initializer_range lowerCAmelCase__ : Tuple = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)""" f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCAmelCase__ : Tuple = apply_spec_augment lowerCAmelCase__ : List[str] = mask_time_prob lowerCAmelCase__ : int = mask_time_length lowerCAmelCase__ : int = mask_time_min_masks lowerCAmelCase__ : Optional[int] = mask_feature_prob lowerCAmelCase__ : int = mask_feature_length lowerCAmelCase__ : int = mask_feature_min_masks # ctc loss lowerCAmelCase__ : Optional[Any] = ctc_loss_reduction lowerCAmelCase__ : Any = ctc_zero_infinity # sequence classification lowerCAmelCase__ : Tuple = use_weighted_layer_sum lowerCAmelCase__ : Dict = classifier_proj_size @property def __magic_name__( self ): return functools.reduce(operator.mul , self.conv_stride , 1 )
678
0
'''simple docstring''' import numpy as np from PIL import Image def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : Dict , snake_case_ : Any ) -> np.ndarray: '''simple docstring''' UpperCAmelCase_ = np.array(snake_case_ ) if arr.shape[0] != arr.shape[1]: raise ValueError("The input array is not a square matrix" ) UpperCAmelCase_ = 0 UpperCAmelCase_ = 0 UpperCAmelCase_ = 0 UpperCAmelCase_ = 0 # compute the shape of the output matrix UpperCAmelCase_ = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape UpperCAmelCase_ = np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix UpperCAmelCase_ = np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 UpperCAmelCase_ = 0 UpperCAmelCase_ = 0 return updated_arr def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Any , snake_case_ : Optional[int] ) -> np.ndarray: '''simple docstring''' UpperCAmelCase_ = np.array(snake_case_ ) if arr.shape[0] != arr.shape[1]: raise ValueError("The input array is not a square matrix" ) UpperCAmelCase_ = 0 UpperCAmelCase_ = 0 UpperCAmelCase_ = 0 UpperCAmelCase_ = 0 # compute the shape of the output matrix UpperCAmelCase_ = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape UpperCAmelCase_ = np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix UpperCAmelCase_ = int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 UpperCAmelCase_ = 0 UpperCAmelCase_ = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name='avgpooling', verbose=True) # Loading the image SCREAMING_SNAKE_CASE_: Union[str, Any] =Image.open('path_to_image') # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
78
import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase_ = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""") @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( _lowercase , unittest.TestCase ): A__ = PegasusTokenizer A__ = PegasusTokenizerFast A__ = True A__ = True def __magic_name__( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ : Union[str, Any] = PegasusTokenizer(__UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __magic_name__( self ): return PegasusTokenizer.from_pretrained('''google/pegasus-large''' ) def __magic_name__( self , **__UpperCAmelCase ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __magic_name__( self , __UpperCAmelCase ): return ("This is a test", "This is a test") def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = '''</s>''' lowerCAmelCase__ : Optional[int] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Tuple = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''</s>''' ) self.assertEqual(vocab_keys[-1] , '''v''' ) self.assertEqual(len(__UpperCAmelCase ) , 1103 ) def __magic_name__( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1103 ) def __magic_name__( self ): lowerCAmelCase__ : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : Tuple = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : int = ( '''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important''' ''' </s> <pad> <pad> <pad>''' ) lowerCAmelCase__ : Any = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] lowerCAmelCase__ : Dict = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Any = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word lowerCAmelCase__ : List[str] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.''' lowerCAmelCase__ : Tuple = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1] lowerCAmelCase__ : Tuple = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Dict = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_6103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 lowerCAmelCase__ : str = '''To ensure a smooth flow of bank resolutions.''' lowerCAmelCase__ : int = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1] lowerCAmelCase__ : List[Any] = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = ['''This is going to be way too long.''' * 150, '''short example'''] lowerCAmelCase__ : List[str] = ['''not super long but more than 5 tokens''', '''tiny'''] lowerCAmelCase__ : Tuple = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) lowerCAmelCase__ : Optional[int] = self._large_tokenizer( text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask. @slow def __magic_name__( self ): # fmt: off lowerCAmelCase__ : Optional[int] = {'''input_ids''': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCAmelCase , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , ) @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( _lowercase , unittest.TestCase ): A__ = PegasusTokenizer A__ = PegasusTokenizerFast A__ = True A__ = True def __magic_name__( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ : List[Any] = PegasusTokenizer(__UpperCAmelCase , offset=0 , mask_token_sent=__UpperCAmelCase , mask_token='''[MASK]''' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __magic_name__( self ): return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' ) def __magic_name__( self , **__UpperCAmelCase ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __magic_name__( self , __UpperCAmelCase ): return ("This is a test", "This is a test") def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : int = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : str = ( '''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>''' ''' <pad> <pad> <pad>''' ) lowerCAmelCase__ : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] lowerCAmelCase__ : int = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) @require_torch def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = ['''This is going to be way too long.''' * 1000, '''short example'''] lowerCAmelCase__ : int = ['''not super long but more than 5 tokens''', '''tiny'''] lowerCAmelCase__ : Tuple = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) lowerCAmelCase__ : Tuple = self._large_tokenizer( text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask. def __magic_name__( self ): lowerCAmelCase__ : List[str] = ( '''This is an example string that is used to test the original TF implementation against the HF''' ''' implementation''' ) lowerCAmelCase__ : Union[str, Any] = self._large_tokenizer(__UpperCAmelCase ).input_ids self.assertListEqual( __UpperCAmelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
678
0
import argparse SCREAMING_SNAKE_CASE :List[str] = '''docs/source/_static/js/custom.js''' def _lowerCAmelCase ( lowerCAmelCase_ :str )->List[str]: '''simple docstring''' with open(lowerCAmelCase_ , encoding="utf-8" , newline="\n" ) as f: snake_case_ = f.readlines() snake_case_ = 0 # First let's put the right version while not lines[index].startswith("const stableVersion =" ): index += 1 snake_case_ = F'''const stableVersion = \"v{version}\"\n''' # Then update the dictionary while not lines[index].startswith("const versionMapping = {" ): index += 1 # We go until the end while not lines[index].startswith("}" ): index += 1 # We add the new version at the end lines[index - 1] += F''' \"v{version}\": \"v{version}\",\n''' with open(lowerCAmelCase_ , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(lowerCAmelCase_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE :Optional[int] = argparse.ArgumentParser() parser.add_argument('''--version''', help='''Release version.''') SCREAMING_SNAKE_CASE :List[Any] = parser.parse_args() update_custom_js(args.version)
283
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""", # See all Donut models at https://huggingface.co/models?filter=donut-swin } class _lowerCAmelCase ( _lowercase ): A__ = 'donut-swin' A__ = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[3, 6, 12, 24] , __UpperCAmelCase=7 , __UpperCAmelCase=4.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , **__UpperCAmelCase , ): super().__init__(**__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = image_size lowerCAmelCase__ : List[str] = patch_size lowerCAmelCase__ : int = num_channels lowerCAmelCase__ : Optional[Any] = embed_dim lowerCAmelCase__ : int = depths lowerCAmelCase__ : Dict = len(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = num_heads lowerCAmelCase__ : Dict = window_size lowerCAmelCase__ : str = mlp_ratio lowerCAmelCase__ : Optional[int] = qkv_bias lowerCAmelCase__ : Any = hidden_dropout_prob lowerCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob lowerCAmelCase__ : List[str] = drop_path_rate lowerCAmelCase__ : Tuple = hidden_act lowerCAmelCase__ : List[str] = use_absolute_embeddings lowerCAmelCase__ : Dict = layer_norm_eps lowerCAmelCase__ : Any = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCAmelCase__ : List[Any] = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
678
0
"""simple docstring""" import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class lowerCamelCase__ ( _lowercase ): __UpperCAmelCase = (DPMSolverSDEScheduler,) __UpperCAmelCase = 10 def _UpperCAmelCase ( self , **snake_case ) -> List[str]: """simple docstring""" lowercase : Dict = { '''num_train_timesteps''': 1_1_0_0, '''beta_start''': 0.00_01, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''noise_sampler_seed''': 0, } config.update(**__UpperCAmelCase ) return config def _UpperCAmelCase ( self ) -> List[Any]: """simple docstring""" for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Dict: """simple docstring""" for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ): self.check_over_configs(beta_start=__UpperCAmelCase , beta_end=__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Dict: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> str: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> List[Any]: """simple docstring""" lowercase : List[Any] = self.scheduler_classes[0] lowercase : str = self.get_scheduler_config() lowercase : Optional[Any] = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowercase : Union[str, Any] = self.dummy_model() lowercase : Any = self.dummy_sample_deter * scheduler.init_noise_sigma lowercase : Optional[Any] = sample.to(__UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowercase : Optional[Any] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowercase : int = model(__UpperCAmelCase , __UpperCAmelCase ) lowercase : List[str] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowercase : List[str] = output.prev_sample lowercase : Union[str, Any] = torch.sum(torch.abs(__UpperCAmelCase ) ) lowercase : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 1_67.47_82_10_44_92_18_75 ) < 1E-2 assert abs(result_mean.item() - 0.21_78_70_59_64_56_52_77 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 1_71.59_35_21_11_81_64_06 ) < 1E-2 assert abs(result_mean.item() - 0.2_23_42_90_68_92_29_96_52 ) < 1E-3 else: assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1E-2 assert abs(result_mean.item() - 0.2_11_61_95_70_85_13_26 ) < 1E-3 def _UpperCAmelCase ( self ) -> int: """simple docstring""" lowercase : Dict = self.scheduler_classes[0] lowercase : Tuple = self.get_scheduler_config(prediction_type="""v_prediction""" ) lowercase : Tuple = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowercase : Optional[Any] = self.dummy_model() lowercase : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma lowercase : Tuple = sample.to(__UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowercase : List[str] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowercase : List[Any] = model(__UpperCAmelCase , __UpperCAmelCase ) lowercase : int = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowercase : int = output.prev_sample lowercase : Any = torch.sum(torch.abs(__UpperCAmelCase ) ) lowercase : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 1_24.77_14_92_00_43_94_53 ) < 1E-2 assert abs(result_mean.item() - 0.1_62_26_28_90_14_81_62_84 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 1_28.1_66_33_60_59_57_03 ) < 1E-2 assert abs(result_mean.item() - 0.1_66_88_32_60_01_16_72_97 ) < 1E-3 else: assert abs(result_sum.item() - 1_19.8_48_75_48_82_81_25 ) < 1E-2 assert abs(result_mean.item() - 0.15_60_53_06_62_53_66_21 ) < 1E-3 def _UpperCAmelCase ( self ) -> Optional[Any]: """simple docstring""" lowercase : Optional[int] = self.scheduler_classes[0] lowercase : Tuple = self.get_scheduler_config() lowercase : Tuple = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase ) lowercase : Dict = self.dummy_model() lowercase : Union[str, Any] = self.dummy_sample_deter.to(__UpperCAmelCase ) * scheduler.init_noise_sigma for t in scheduler.timesteps: lowercase : List[str] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowercase : Optional[int] = model(__UpperCAmelCase , __UpperCAmelCase ) lowercase : Tuple = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowercase : Optional[int] = output.prev_sample lowercase : Dict = torch.sum(torch.abs(__UpperCAmelCase ) ) lowercase : Dict = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 1_67.46_95_73_97_46_09_38 ) < 1E-2 assert abs(result_mean.item() - 0.2_18_05_93_46_07_98_26_35 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 1_71.59_35_36_37_69_53_12 ) < 1E-2 assert abs(result_mean.item() - 0.2_23_42_90_83_82_41_57_71 ) < 1E-3 else: assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1E-2 assert abs(result_mean.item() - 0.2_11_61_95_70_85_13_26 ) < 1E-3 def _UpperCAmelCase ( self ) -> List[Any]: """simple docstring""" lowercase : Optional[Any] = self.scheduler_classes[0] lowercase : Dict = self.get_scheduler_config() lowercase : Optional[int] = scheduler_class(**__UpperCAmelCase , use_karras_sigmas=__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase ) lowercase : List[Any] = self.dummy_model() lowercase : int = self.dummy_sample_deter.to(__UpperCAmelCase ) * scheduler.init_noise_sigma lowercase : Union[str, Any] = sample.to(__UpperCAmelCase ) for t in scheduler.timesteps: lowercase : Optional[Any] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowercase : Union[str, Any] = model(__UpperCAmelCase , __UpperCAmelCase ) lowercase : Tuple = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowercase : Dict = output.prev_sample lowercase : int = torch.sum(torch.abs(__UpperCAmelCase ) ) lowercase : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 1_76.66_97_41_35_74_21_88 ) < 1E-2 assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1E-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 1_77.63_65_35_64_45_31_25 ) < 1E-2 assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1E-2 else: assert abs(result_sum.item() - 1_70.3_13_52_23_38_86_72 ) < 1E-2 assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1E-2
607
lowerCAmelCase_ = { """meter""": """m""", """kilometer""": """km""", """megametre""": """Mm""", """gigametre""": """Gm""", """terametre""": """Tm""", """petametre""": """Pm""", """exametre""": """Em""", """zettametre""": """Zm""", """yottametre""": """Ym""", } # Exponent of the factor(meter) lowerCAmelCase_ = { """m""": 0, """km""": 3, """Mm""": 6, """Gm""": 9, """Tm""": 12, """Pm""": 15, """Em""": 18, """Zm""": 21, """Ym""": 24, } def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> float: lowerCAmelCase__ : List[str] = from_type.lower().strip('''s''' ) lowerCAmelCase__ : List[str] = to_type.lower().strip('''s''' ) lowerCAmelCase__ : str = UNIT_SYMBOL.get(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Any = UNIT_SYMBOL.get(UpperCamelCase , UpperCamelCase ) if from_sanitized not in METRIC_CONVERSION: lowerCAmelCase__ : Tuple = ( F"""Invalid 'from_type' value: {from_type!r}.\n""" F"""Conversion abbreviations are: {", ".join(UpperCamelCase )}""" ) raise ValueError(UpperCamelCase ) if to_sanitized not in METRIC_CONVERSION: lowerCAmelCase__ : List[Any] = ( F"""Invalid 'to_type' value: {to_type!r}.\n""" F"""Conversion abbreviations are: {", ".join(UpperCamelCase )}""" ) raise ValueError(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = METRIC_CONVERSION[from_sanitized] lowerCAmelCase__ : Optional[Any] = METRIC_CONVERSION[to_sanitized] lowerCAmelCase__ : int = 1 if from_exponent > to_exponent: lowerCAmelCase__ : List[str] = from_exponent - to_exponent else: lowerCAmelCase__ : Dict = -(to_exponent - from_exponent) return value * pow(10 , UpperCamelCase ) if __name__ == "__main__": from doctest import testmod testmod()
678
0
"""simple docstring""" from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class _lowerCAmelCase ( _lowercase ): def lowerCamelCase ( self ) -> int: '''simple docstring''' snake_case : List[Any] = SMALL_MODEL_IDENTIFIER snake_case : Optional[int] = '''pt''' snake_case : Dict = '''tf''' def lowerCamelCase ( self , UpperCamelCase__ ) -> List[Any]: '''simple docstring''' snake_case : str = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(__UpperCAmelCase ) def lowerCamelCase ( self , UpperCamelCase__ ) -> str: '''simple docstring''' snake_case : Optional[Any] = TFAutoModel.from_pretrained(self.test_model , from_pt=__UpperCAmelCase ) model_tf.save_pretrained(__UpperCAmelCase ) def lowerCamelCase ( self ) -> Any: '''simple docstring''' snake_case : int = '''mock_framework''' # Framework provided - return whatever the user provides snake_case : int = FeaturesManager.determine_framework(self.test_model , __UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(__UpperCAmelCase ) snake_case : Tuple = FeaturesManager.determine_framework(__UpperCAmelCase , __UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(__UpperCAmelCase ) snake_case : int = FeaturesManager.determine_framework(__UpperCAmelCase , __UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ) -> List[str]: '''simple docstring''' with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(__UpperCAmelCase ) snake_case : Dict = FeaturesManager.determine_framework(__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(__UpperCAmelCase ) snake_case : Union[str, Any] = FeaturesManager.determine_framework(__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(__UpperCAmelCase ): snake_case : Any = FeaturesManager.determine_framework(__UpperCAmelCase ) def lowerCamelCase ( self ) -> Dict: '''simple docstring''' snake_case : List[str] = MagicMock(return_value=__UpperCAmelCase ) with patch("transformers.onnx.features.is_tf_available" , __UpperCAmelCase ): snake_case : Any = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(__UpperCAmelCase , self.framework_pt ) # PyTorch not in environment -> use TensorFlow snake_case : str = MagicMock(return_value=__UpperCAmelCase ) with patch("transformers.onnx.features.is_torch_available" , __UpperCAmelCase ): snake_case : Optional[Any] = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(__UpperCAmelCase , self.framework_tf ) # Both in environment -> use PyTorch snake_case : int = MagicMock(return_value=__UpperCAmelCase ) snake_case : int = MagicMock(return_value=__UpperCAmelCase ) with patch("transformers.onnx.features.is_tf_available" , __UpperCAmelCase ), patch( "transformers.onnx.features.is_torch_available" , __UpperCAmelCase ): snake_case : List[Any] = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(__UpperCAmelCase , self.framework_pt ) # Both not in environment -> raise error snake_case : Tuple = MagicMock(return_value=__UpperCAmelCase ) snake_case : Optional[Any] = MagicMock(return_value=__UpperCAmelCase ) with patch("transformers.onnx.features.is_tf_available" , __UpperCAmelCase ), patch( "transformers.onnx.features.is_torch_available" , __UpperCAmelCase ): with self.assertRaises(__UpperCAmelCase ): snake_case : int = FeaturesManager.determine_framework(self.test_model )
178
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _lowerCAmelCase : @staticmethod def __magic_name__( *__UpperCAmelCase , **__UpperCAmelCase ): pass @is_pipeline_test @require_vision class _lowerCAmelCase ( unittest.TestCase ): @require_torch def __magic_name__( self ): lowerCAmelCase__ : int = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , ) lowerCAmelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : List[str] = image_classifier(__UpperCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__UpperCAmelCase ) , [ [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}], [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}], ] , ) lowerCAmelCase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], ] , ) @require_tf def __magic_name__( self ): lowerCAmelCase__ : List[Any] = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' ) lowerCAmelCase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : str = image_classifier(__UpperCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , ) lowerCAmelCase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], ] , ) @slow @require_torch def __magic_name__( self ): lowerCAmelCase__ : str = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , ) # This is an image of 2 cats with remotes and no planes lowerCAmelCase__ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : str = image_classifier(__UpperCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) lowerCAmelCase__ : Tuple = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , ) @slow @require_tf def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' ) # This is an image of 2 cats with remotes and no planes lowerCAmelCase__ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : Union[str, Any] = image_classifier(__UpperCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) lowerCAmelCase__ : Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , )
678
0
import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __magic_name__ ( unittest.TestCase ): @property def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[Any]: '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , ) return model def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> int: '''simple docstring''' UpperCAmelCase = self.dummy_uncond_unet UpperCAmelCase = PNDMScheduler() UpperCAmelCase = PNDMPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase ) pndm.to(__UpperCAmelCase ) pndm.set_progress_bar_config(disable=__UpperCAmelCase ) UpperCAmelCase = torch.manual_seed(0 ) UpperCAmelCase = pndm(generator=__UpperCAmelCase , num_inference_steps=20 , output_type="numpy" ).images UpperCAmelCase = torch.manual_seed(0 ) UpperCAmelCase = pndm(generator=__UpperCAmelCase , num_inference_steps=20 , output_type="numpy" , return_dict=__UpperCAmelCase )[0] UpperCAmelCase = image[0, -3:, -3:, -1] UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCAmelCase = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class __magic_name__ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Tuple: '''simple docstring''' UpperCAmelCase = '''google/ddpm-cifar10-32''' UpperCAmelCase = UNetaDModel.from_pretrained(__UpperCAmelCase ) UpperCAmelCase = PNDMScheduler() UpperCAmelCase = PNDMPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase ) pndm.to(__UpperCAmelCase ) pndm.set_progress_bar_config(disable=__UpperCAmelCase ) UpperCAmelCase = torch.manual_seed(0 ) UpperCAmelCase = pndm(generator=__UpperCAmelCase , output_type="numpy" ).images UpperCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCAmelCase = np.array([0.15_64, 0.1_46_45, 0.14_06, 0.1_47_15, 0.1_24_25, 0.1_40_45, 0.1_31_15, 0.1_21_75, 0.1_25] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
323
lowerCAmelCase_ = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]: # Return True if there is node that has not iterated. lowerCAmelCase__ : Optional[int] = [False] * len(UpperCamelCase ) lowerCAmelCase__ : Tuple = [s] lowerCAmelCase__ : Dict = True while queue: lowerCAmelCase__ : int = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = True lowerCAmelCase__ : Optional[int] = u return visited[t] def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: lowerCAmelCase__ : Any = [-1] * (len(UpperCamelCase )) lowerCAmelCase__ : List[Any] = 0 lowerCAmelCase__ : Tuple = [] lowerCAmelCase__ : Optional[int] = [i[:] for i in graph] # Record original cut, copy. while bfs(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : List[Any] = float('''Inf''' ) lowerCAmelCase__ : Dict = sink while s != source: # Find the minimum value in select path lowerCAmelCase__ : Tuple = min(UpperCamelCase , graph[parent[s]][s] ) lowerCAmelCase__ : List[Any] = parent[s] max_flow += path_flow lowerCAmelCase__ : List[Any] = sink while v != source: lowerCAmelCase__ : Dict = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowerCAmelCase__ : Optional[Any] = parent[v] for i in range(len(UpperCamelCase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
678
0
import os import zipfile import pytest from datasets.utils.extract import ( BzipaExtractor, Extractor, GzipExtractor, LzaExtractor, SevenZipExtractor, TarExtractor, XzExtractor, ZipExtractor, ZstdExtractor, ) from .utils import require_lza, require_pyazr, require_zstandard @pytest.mark.parametrize( '''compression_format, is_archive''' , [ ('''7z''', True), ('''bz2''', False), ('''gzip''', False), ('''lz4''', False), ('''tar''', True), ('''xz''', False), ('''zip''', True), ('''zstd''', False), ] , ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> List[Any]: snake_case__ = { '''7z''': (seven_zip_file, SevenZipExtractor), '''bz2''': (bza_file, BzipaExtractor), '''gzip''': (gz_file, GzipExtractor), '''lz4''': (lza_file, LzaExtractor), '''tar''': (tar_file, TarExtractor), '''xz''': (xz_file, XzExtractor), '''zip''': (zip_file, ZipExtractor), '''zstd''': (zstd_file, ZstdExtractor), } snake_case__ = input_paths_and_base_extractors[compression_format] if input_path is None: snake_case__ = F"""for '{compression_format}' compression_format, """ if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(__lowerCAmelCase ) assert base_extractor.is_extractable(__lowerCAmelCase ) snake_case__ = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''') base_extractor.extract(__lowerCAmelCase , __lowerCAmelCase ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name snake_case__ = file_path.read_text(encoding='''utf-8''' ) else: snake_case__ = output_path.read_text(encoding='''utf-8''' ) snake_case__ = text_file.read_text(encoding='''utf-8''' ) assert extracted_file_content == expected_file_content @pytest.mark.parametrize( '''compression_format, is_archive''' , [ ('''7z''', True), ('''bz2''', False), ('''gzip''', False), ('''lz4''', False), ('''tar''', True), ('''xz''', False), ('''zip''', True), ('''zstd''', False), ] , ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> Optional[Any]: snake_case__ = { '''7z''': seven_zip_file, '''bz2''': bza_file, '''gzip''': gz_file, '''lz4''': lza_file, '''tar''': tar_file, '''xz''': xz_file, '''zip''': zip_file, '''zstd''': zstd_file, } snake_case__ = input_paths[compression_format] if input_path is None: snake_case__ = F"""for '{compression_format}' compression_format, """ if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(__lowerCAmelCase ) snake_case__ = Extractor.infer_extractor_format(__lowerCAmelCase ) assert extractor_format is not None snake_case__ = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''') Extractor.extract(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name snake_case__ = file_path.read_text(encoding='''utf-8''' ) else: snake_case__ = output_path.read_text(encoding='''utf-8''' ) snake_case__ = text_file.read_text(encoding='''utf-8''' ) assert extracted_file_content == expected_file_content @pytest.fixture def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Tuple: import tarfile snake_case__ = tmp_path / '''data_dot_dot''' directory.mkdir() snake_case__ = directory / '''tar_file_with_dot_dot.tar''' with tarfile.TarFile(__lowerCAmelCase , '''w''' ) as f: f.add(__lowerCAmelCase , arcname=os.path.join('''..''' , text_file.name ) ) return path @pytest.fixture def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[str]: import tarfile snake_case__ = tmp_path / '''data_sym_link''' directory.mkdir() snake_case__ = directory / '''tar_file_with_sym_link.tar''' os.symlink('''..''' , directory / '''subdir''' , target_is_directory=__lowerCAmelCase ) with tarfile.TarFile(__lowerCAmelCase , '''w''' ) as f: f.add(str(directory / '''subdir''' ) , arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8 return path @pytest.mark.parametrize( '''insecure_tar_file, error_log''' , [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] , ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]: snake_case__ = { '''tar_file_with_dot_dot''': tar_file_with_dot_dot, '''tar_file_with_sym_link''': tar_file_with_sym_link, } snake_case__ = insecure_tar_files[insecure_tar_file] snake_case__ = tmp_path / '''extracted''' TarExtractor.extract(__lowerCAmelCase , __lowerCAmelCase ) assert caplog.text for record in caplog.records: assert record.levelname == "ERROR" assert error_log in record.msg def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Any: # We should have less false positives than zipfile.is_zipfile # We do that by checking only the magic number snake_case__ = tmpdir / '''not_a_zip_file''' # From: https://github.com/python/cpython/pull/5053 snake_case__ = ( b'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00''' b'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I''' b'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07''' b'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82''' ) with not_a_zip_file.open('''wb''' ) as f: f.write(__lowerCAmelCase ) assert zipfile.is_zipfile(str(__lowerCAmelCase ) ) # is a false positive for `zipfile` assert not ZipExtractor.is_extractable(__lowerCAmelCase ) # but we're right
33
import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, require_torch_or_tf, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class _lowerCAmelCase ( unittest.TestCase ): A__ = MODEL_FOR_CAUSAL_LM_MAPPING A__ = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def __magic_name__( self ): lowerCAmelCase__ : Tuple = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' ) # Using `do_sample=False` to force deterministic output lowerCAmelCase__ : Optional[int] = text_generator('''This is a test''' , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ] , ) lowerCAmelCase__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] ) self.assertEqual( __UpperCAmelCase , [ [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ], [ { '''generated_text''': ( '''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy''' ''' oscope. oscope. FiliFili@@''' ) } ], ] , ) lowerCAmelCase__ : str = text_generator('''This is a test''' , do_sample=__UpperCAmelCase , num_return_sequences=2 , return_tensors=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ {'''generated_token_ids''': ANY(__UpperCAmelCase )}, {'''generated_token_ids''': ANY(__UpperCAmelCase )}, ] , ) lowerCAmelCase__ : List[Any] = text_generator.model.config.eos_token_id lowerCAmelCase__ : List[Any] = '''<pad>''' lowerCAmelCase__ : List[Any] = text_generator( ['''This is a test''', '''This is a second test'''] , do_sample=__UpperCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__UpperCAmelCase , ) self.assertEqual( __UpperCAmelCase , [ [ {'''generated_token_ids''': ANY(__UpperCAmelCase )}, {'''generated_token_ids''': ANY(__UpperCAmelCase )}, ], [ {'''generated_token_ids''': ANY(__UpperCAmelCase )}, {'''generated_token_ids''': ANY(__UpperCAmelCase )}, ], ] , ) @require_tf def __magic_name__( self ): lowerCAmelCase__ : int = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' ) # Using `do_sample=False` to force deterministic output lowerCAmelCase__ : List[Any] = text_generator('''This is a test''' , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ] , ) lowerCAmelCase__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ], [ { '''generated_text''': ( '''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes''' ''' Cannes 閲閲Cannes Cannes Cannes 攵 please,''' ) } ], ] , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Dict = TextGenerationPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) return text_generator, ["This is a test", "Another test"] def __magic_name__( self ): lowerCAmelCase__ : Any = '''Hello I believe in''' lowerCAmelCase__ : List[Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) lowerCAmelCase__ : Optional[int] = text_generator(__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , ) lowerCAmelCase__ : List[str] = text_generator(__UpperCAmelCase , stop_sequence=''' fe''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe'''}] ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = text_generator.model lowerCAmelCase__ : Optional[int] = text_generator.tokenizer lowerCAmelCase__ : Tuple = text_generator('''This is a test''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) lowerCAmelCase__ : Optional[int] = text_generator('''This is a test''' , return_full_text=__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) lowerCAmelCase__ : Dict = pipeline(task='''text-generation''' , model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , return_full_text=__UpperCAmelCase ) lowerCAmelCase__ : Dict = text_generator('''This is a test''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) lowerCAmelCase__ : List[str] = text_generator('''This is a test''' , return_full_text=__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) lowerCAmelCase__ : Optional[int] = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], ] , ) if text_generator.tokenizer.pad_token is not None: lowerCAmelCase__ : List[str] = text_generator( ['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], ] , ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Any = text_generator('''test''' , return_full_text=__UpperCAmelCase , return_text=__UpperCAmelCase ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = text_generator('''test''' , return_full_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : str = text_generator('''test''' , return_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase ) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): lowerCAmelCase__ : str = text_generator('''''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) else: with self.assertRaises((ValueError, AssertionError) ): lowerCAmelCase__ : List[str] = text_generator('''''' ) if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. lowerCAmelCase__ : Optional[Any] = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM'''] if ( tokenizer.model_max_length < 1_0000 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ): text_generator('''This is a test''' * 500 , max_new_tokens=20 ) lowerCAmelCase__ : Optional[Any] = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 ) # Hole strategy cannot work with self.assertRaises(__UpperCAmelCase ): text_generator( '''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , ) @require_torch @require_accelerate @require_torch_gpu def __magic_name__( self ): import torch # Classic `model_kwargs` lowerCAmelCase__ : List[str] = pipeline( model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowerCAmelCase__ : Any = pipe('''This is a test''' ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) lowerCAmelCase__ : Dict = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowerCAmelCase__ : Union[str, Any] = pipe('''This is a test''' ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 lowerCAmelCase__ : str = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa ) lowerCAmelCase__ : Any = pipe('''This is a test''' ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) @require_torch @require_torch_gpu def __magic_name__( self ): import torch lowerCAmelCase__ : List[str] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa ) pipe('''This is a test''' ) @require_torch @require_accelerate @require_torch_gpu def __magic_name__( self ): import torch lowerCAmelCase__ : Any = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa ) pipe('''This is a test''' , do_sample=__UpperCAmelCase , top_p=0.5 ) def __magic_name__( self ): lowerCAmelCase__ : int = '''Hello world''' lowerCAmelCase__ : Union[str, Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) if text_generator.model.framework == "tf": lowerCAmelCase__ : List[Any] = logging.get_logger('''transformers.generation.tf_utils''' ) else: lowerCAmelCase__ : Dict = logging.get_logger('''transformers.generation.utils''' ) lowerCAmelCase__ : Optional[Any] = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(__UpperCAmelCase ) as cl: lowerCAmelCase__ : List[str] = text_generator(__UpperCAmelCase , max_length=10 , max_new_tokens=1 ) self.assertIn(__UpperCAmelCase , cl.out ) # The user only sets one -> no warning with CaptureLogger(__UpperCAmelCase ) as cl: lowerCAmelCase__ : Any = text_generator(__UpperCAmelCase , max_new_tokens=1 ) self.assertNotIn(__UpperCAmelCase , cl.out ) with CaptureLogger(__UpperCAmelCase ) as cl: lowerCAmelCase__ : Union[str, Any] = text_generator(__UpperCAmelCase , max_length=10 ) self.assertNotIn(__UpperCAmelCase , cl.out )
678
0
def _UpperCamelCase (a__ :Dict , a__ :Tuple ): """simple docstring""" if mass < 0: raise ValueError("""The mass of a body cannot be negative""" ) return 0.5 * mass * abs(a__ ) * abs(a__ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
619
def __lowerCAmelCase ( UpperCamelCase ) -> str: return "".join([hex(UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase )] ) def __lowerCAmelCase ( UpperCamelCase ) -> bytes: # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(UpperCamelCase ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(UpperCamelCase ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCamelCase ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
678
0
import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel lowercase_ : Tuple = '0.12' # assumed parallelism: 8 @require_flax @is_staging_test class _lowerCamelCase ( unittest.TestCase ): @classmethod def UpperCamelCase_ ( cls ) -> Optional[Any]: SCREAMING_SNAKE_CASE__: Dict= TOKEN HfFolder.save_token(__UpperCAmelCase ) @classmethod def UpperCamelCase_ ( cls ) -> Union[str, Any]: try: delete_repo(token=cls._token , repo_id='''test-model-flax''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' ) except HTTPError: pass def UpperCamelCase_ ( self ) -> Optional[Any]: SCREAMING_SNAKE_CASE__: List[Any]= BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) SCREAMING_SNAKE_CASE__: List[Any]= FlaxBertModel(__UpperCAmelCase ) model.push_to_hub('''test-model-flax''' , use_auth_token=self._token ) SCREAMING_SNAKE_CASE__: List[Any]= FlaxBertModel.from_pretrained(f'{USER}/test-model-flax' ) SCREAMING_SNAKE_CASE__: Union[str, Any]= flatten_dict(unfreeze(model.params ) ) SCREAMING_SNAKE_CASE__: List[Any]= flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): SCREAMING_SNAKE_CASE__: str= (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f'{key} not identical' ) # Reset repo delete_repo(token=self._token , repo_id='''test-model-flax''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(__UpperCAmelCase , repo_id='''test-model-flax''' , push_to_hub=__UpperCAmelCase , use_auth_token=self._token ) SCREAMING_SNAKE_CASE__: Tuple= FlaxBertModel.from_pretrained(f'{USER}/test-model-flax' ) SCREAMING_SNAKE_CASE__: List[str]= flatten_dict(unfreeze(model.params ) ) SCREAMING_SNAKE_CASE__: Optional[int]= flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): SCREAMING_SNAKE_CASE__: Union[str, Any]= (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f'{key} not identical' ) def UpperCamelCase_ ( self ) -> str: SCREAMING_SNAKE_CASE__: List[str]= BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) SCREAMING_SNAKE_CASE__: Optional[Any]= FlaxBertModel(__UpperCAmelCase ) model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token ) SCREAMING_SNAKE_CASE__: str= FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) SCREAMING_SNAKE_CASE__: List[str]= flatten_dict(unfreeze(model.params ) ) SCREAMING_SNAKE_CASE__: Dict= flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): SCREAMING_SNAKE_CASE__: Tuple= (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f'{key} not identical' ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( __UpperCAmelCase , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=__UpperCAmelCase , use_auth_token=self._token ) SCREAMING_SNAKE_CASE__: Union[str, Any]= FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) SCREAMING_SNAKE_CASE__: Optional[int]= flatten_dict(unfreeze(model.params ) ) SCREAMING_SNAKE_CASE__: List[str]= flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): SCREAMING_SNAKE_CASE__: Tuple= (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f'{key} not identical' ) def A__ ( snake_case_ : Union[str, Any] , snake_case_ : List[Any] ): SCREAMING_SNAKE_CASE__: Any= True SCREAMING_SNAKE_CASE__: Any= flatten_dict(modela.params ) SCREAMING_SNAKE_CASE__: List[str]= flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4: SCREAMING_SNAKE_CASE__: Optional[Any]= False return models_are_equal @require_flax class _lowerCamelCase ( unittest.TestCase ): def UpperCamelCase_ ( self ) -> Tuple: SCREAMING_SNAKE_CASE__: List[str]= BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) SCREAMING_SNAKE_CASE__: List[str]= FlaxBertModel(__UpperCAmelCase ) SCREAMING_SNAKE_CASE__: Optional[int]= '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) ) with self.assertRaises(__UpperCAmelCase ): SCREAMING_SNAKE_CASE__: Optional[int]= FlaxBertModel.from_pretrained(__UpperCAmelCase ) SCREAMING_SNAKE_CASE__: List[str]= FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertTrue(check_models_equal(__UpperCAmelCase , __UpperCAmelCase ) ) def UpperCamelCase_ ( self ) -> int: SCREAMING_SNAKE_CASE__: Tuple= BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) SCREAMING_SNAKE_CASE__: Union[str, Any]= FlaxBertModel(__UpperCAmelCase ) SCREAMING_SNAKE_CASE__: Dict= '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , max_shard_size='''10KB''' ) with self.assertRaises(__UpperCAmelCase ): SCREAMING_SNAKE_CASE__: Tuple= FlaxBertModel.from_pretrained(__UpperCAmelCase ) SCREAMING_SNAKE_CASE__: Any= FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertTrue(check_models_equal(__UpperCAmelCase , __UpperCAmelCase ) ) def UpperCamelCase_ ( self ) -> Any: SCREAMING_SNAKE_CASE__: List[str]= '''bert''' SCREAMING_SNAKE_CASE__: int= '''hf-internal-testing/tiny-random-bert-subfolder''' with self.assertRaises(__UpperCAmelCase ): SCREAMING_SNAKE_CASE__: Dict= FlaxBertModel.from_pretrained(__UpperCAmelCase ) SCREAMING_SNAKE_CASE__: Optional[int]= FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def UpperCamelCase_ ( self ) -> List[Any]: SCREAMING_SNAKE_CASE__: List[Any]= '''bert''' SCREAMING_SNAKE_CASE__: Tuple= '''hf-internal-testing/tiny-random-bert-sharded-subfolder''' with self.assertRaises(__UpperCAmelCase ): SCREAMING_SNAKE_CASE__: Union[str, Any]= FlaxBertModel.from_pretrained(__UpperCAmelCase ) SCREAMING_SNAKE_CASE__: Optional[Any]= FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase )
64
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class _lowerCAmelCase ( _lowercase ): A__ = (DPMSolverSDEScheduler,) A__ = 10 def __magic_name__( self , **__UpperCAmelCase ): lowerCAmelCase__ : Dict = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''noise_sampler_seed''': 0, } config.update(**__UpperCAmelCase ) return config def __magic_name__( self ): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=__UpperCAmelCase ) def __magic_name__( self ): for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=__UpperCAmelCase , beta_end=__UpperCAmelCase ) def __magic_name__( self ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__UpperCAmelCase ) def __magic_name__( self ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[Any] = self.scheduler_classes[0] lowerCAmelCase__ : str = self.get_scheduler_config() lowerCAmelCase__ : Optional[Any] = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase__ : Union[str, Any] = self.dummy_model() lowerCAmelCase__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase__ : Optional[Any] = sample.to(__UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : int = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[str] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[str] = output.prev_sample lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2 assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2 assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def __magic_name__( self ): lowerCAmelCase__ : Dict = self.scheduler_classes[0] lowerCAmelCase__ : Tuple = self.get_scheduler_config(prediction_type='''v_prediction''' ) lowerCAmelCase__ : Tuple = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase__ : Optional[Any] = self.dummy_model() lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase__ : Tuple = sample.to(__UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase__ : List[str] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : int = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : int = output.prev_sample lowerCAmelCase__ : Any = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2 assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2 assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3 else: assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2 assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3 def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0] lowerCAmelCase__ : Tuple = self.get_scheduler_config() lowerCAmelCase__ : Tuple = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase ) lowerCAmelCase__ : Dict = self.dummy_model() lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter.to(__UpperCAmelCase ) * scheduler.init_noise_sigma for t in scheduler.timesteps: lowerCAmelCase__ : List[str] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Tuple = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = output.prev_sample lowerCAmelCase__ : Dict = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Dict = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2 assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2 assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = self.scheduler_classes[0] lowerCAmelCase__ : Dict = self.get_scheduler_config() lowerCAmelCase__ : Optional[int] = scheduler_class(**__UpperCAmelCase , use_karras_sigmas=__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = self.dummy_model() lowerCAmelCase__ : int = self.dummy_sample_deter.to(__UpperCAmelCase ) * scheduler.init_noise_sigma lowerCAmelCase__ : Union[str, Any] = sample.to(__UpperCAmelCase ) for t in scheduler.timesteps: lowerCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Tuple = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Dict = output.prev_sample lowerCAmelCase__ : int = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 else: assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
678
0
"""simple docstring""" def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' return "".join([hex(_UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(_UpperCamelCase )] ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' if (len(_UpperCamelCase ) % 2) != 0: raise ValueError( "Base16 encoded data is invalid:\nData does not have an even number of hex digits." ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(_UpperCamelCase ) <= set("0123456789ABCDEF" ): raise ValueError( "Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_UpperCamelCase ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
636
import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class _lowerCAmelCase ( unittest.TestCase ): def __magic_name__( self , __UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = 3 lowerCAmelCase__ : Tuple = 250 lowerCAmelCase__ : List[Any] = ids_tensor((batch_size, length) , __UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = torch.ones((batch_size, length) , device=__UpperCAmelCase , dtype=torch.float ) / length return input_ids, scores def __magic_name__( self ): lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._get_tensors(5 ) lowerCAmelCase__ : List[str] = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._get_tensors(9 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._get_tensors(10 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : Tuple = MaxLengthCriteria(max_length=10 ) lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self._get_tensors(5 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._get_tensors(9 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(10 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(5 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(9 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._get_tensors(10 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ : int = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 10 ) def __magic_name__( self ): lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._get_tensors(5 ) lowerCAmelCase__ : Any = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ : int = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 ) with self.assertWarns(__UpperCAmelCase ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 ) lowerCAmelCase__ : List[str] = validate_stopping_criteria(StoppingCriteriaList() , 11 ) self.assertEqual(len(__UpperCAmelCase ) , 1 )
678
0
"""simple docstring""" import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever _lowercase : Optional[Any] = logging.getLogger(__name__) class _UpperCAmelCase ( _lowercase ): def __init__( self : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : List[str] , _lowercase : str , _lowercase : str=None ): super().__init__( __UpperCAmelCase , question_encoder_tokenizer=__UpperCAmelCase , generator_tokenizer=__UpperCAmelCase , index=__UpperCAmelCase , init_retrieval=__UpperCAmelCase , ) __UpperCAmelCase = None def a ( self : Union[str, Any] , _lowercase : Union[str, Any] ): logger.info('''initializing retrieval''' ) # initializing a separate process group for retrieval as the default # nccl backend doesn't support gather/scatter operations while gloo # is too slow to replace nccl for the core gpu communication if dist.is_initialized(): logger.info('''dist initialized''' ) # needs to be set manually __UpperCAmelCase = self._infer_socket_ifname() # avoid clash with the NCCL port __UpperCAmelCase = str(distributed_port + 1 ) __UpperCAmelCase = dist.new_group(ranks=__UpperCAmelCase , backend='''gloo''' ) # initialize retriever only on the main worker if not dist.is_initialized() or self._is_main(): logger.info('''dist not initialized / main''' ) self.index.init_index() # all processes wait untill the retriever is initialized by the main process if dist.is_initialized(): torch.distributed.barrier(group=self.process_group ) def a ( self : int ): return dist.get_rank(group=self.process_group ) == 0 def a ( self : str , _lowercase : Tuple , _lowercase : Optional[Any] , _lowercase : Tuple=torch.floataa ): __UpperCAmelCase = torch.empty(__UpperCAmelCase , dtype=__UpperCAmelCase ) dist.scatter(__UpperCAmelCase , src=0 , scatter_list=__UpperCAmelCase , group=self.process_group ) return target_tensor def a ( self : Optional[int] ): __UpperCAmelCase = psutil.net_if_addrs() # a hacky way to deal with varying network interface names __UpperCAmelCase = next((addr for addr in addrs if addr.startswith('''e''' )) , __UpperCAmelCase ) return ifname def a ( self : Dict , _lowercase : List[Any] , _lowercase : int ): # single GPU training if not dist.is_initialized(): __UpperCAmelCase = self._main_retrieve(__UpperCAmelCase , __UpperCAmelCase ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__UpperCAmelCase ) # distributed training __UpperCAmelCase = dist.get_world_size(group=self.process_group ) # gather logic __UpperCAmelCase = None if self._is_main(): __UpperCAmelCase = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__UpperCAmelCase )] dist.gather(torch.tensor(__UpperCAmelCase ) , dst=0 , gather_list=__UpperCAmelCase , group=self.process_group ) # scatter logic __UpperCAmelCase = question_hidden_states.shape[0] __UpperCAmelCase = [] __UpperCAmelCase = [] if self._is_main(): assert len(__UpperCAmelCase ) == world_size __UpperCAmelCase = self._main_retrieve(torch.cat(__UpperCAmelCase ).numpy() , __UpperCAmelCase ) __UpperCAmelCase = torch.tensor(__UpperCAmelCase ), torch.tensor(__UpperCAmelCase ) __UpperCAmelCase = self._chunk_tensor(__UpperCAmelCase , __UpperCAmelCase ) __UpperCAmelCase = self._chunk_tensor(__UpperCAmelCase , __UpperCAmelCase ) __UpperCAmelCase = self._scattered(__UpperCAmelCase , [n_queries, n_docs] , target_type=torch.intaa ) __UpperCAmelCase = self._scattered(__UpperCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] ) return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__UpperCAmelCase )
49
from functools import reduce lowerCAmelCase_ = ( """73167176531330624919225119674426574742355349194934""" """96983520312774506326239578318016984801869478851843""" """85861560789112949495459501737958331952853208805511""" """12540698747158523863050715693290963295227443043557""" """66896648950445244523161731856403098711121722383113""" """62229893423380308135336276614282806444486645238749""" """30358907296290491560440772390713810515859307960866""" """70172427121883998797908792274921901699720888093776""" """65727333001053367881220235421809751254540594752243""" """52584907711670556013604839586446706324415722155397""" """53697817977846174064955149290862569321978468622482""" """83972241375657056057490261407972968652414535100474""" """82166370484403199890008895243450658541227588666881""" """16427171479924442928230863465674813919123162824586""" """17866458359124566529476545682848912883142607690042""" """24219022671055626321111109370544217506941658960408""" """07198403850962455444362981230987879927244284909188""" """84580156166097919133875499200524063689912560717606""" """05886116467109405077541002256983155200055935729725""" """71636269561882670428252483600823257530420752963450""" ) def __lowerCAmelCase ( UpperCamelCase = N ) -> int: return max( # mypy cannot properly interpret reduce int(reduce(lambda UpperCamelCase , UpperCamelCase : str(int(UpperCamelCase ) * int(UpperCamelCase ) ) , n[i : i + 13] ) ) for i in range(len(UpperCamelCase ) - 12 ) ) if __name__ == "__main__": print(F"""{solution() = }""")
678
0
'''simple docstring''' import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed _a : List[str] = { 'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), 'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), 'bert': (BertConfig, BertForMaskedLM, BertTokenizer), 'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Dict ): assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str ): if args.student_type == "roberta": UpperCAmelCase = False elif args.student_type == "gpt2": UpperCAmelCase = False def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] ): if args.student_type == "roberta": UpperCAmelCase = False def lowerCamelCase__ ( ): UpperCAmelCase = argparse.ArgumentParser(description='Training' ) parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.' ) parser.add_argument( '--dump_path' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='The output directory (log, checkpoints, parameters, etc.)' ) parser.add_argument( '--data_file' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , ) parser.add_argument( '--student_type' , type=SCREAMING_SNAKE_CASE , choices=['distilbert', 'roberta', 'gpt2'] , required=SCREAMING_SNAKE_CASE , help='The student type (DistilBERT, RoBERTa).' , ) parser.add_argument('--student_config' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='Path to the student configuration.' ) parser.add_argument( '--student_pretrained_weights' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help='Load student initialization checkpoint.' ) parser.add_argument( '--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=SCREAMING_SNAKE_CASE , help='Teacher type (BERT, RoBERTa).' ) parser.add_argument('--teacher_name' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='The teacher model.' ) parser.add_argument('--temperature' , default=2.0 , type=SCREAMING_SNAKE_CASE , help='Temperature for the softmax temperature.' ) parser.add_argument( '--alpha_ce' , default=0.5 , type=SCREAMING_SNAKE_CASE , help='Linear weight for the distillation loss. Must be >=0.' ) parser.add_argument( '--alpha_mlm' , default=0.0 , type=SCREAMING_SNAKE_CASE , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , ) parser.add_argument('--alpha_clm' , default=0.5 , type=SCREAMING_SNAKE_CASE , help='Linear weight for the CLM loss. Must be >=0.' ) parser.add_argument('--alpha_mse' , default=0.0 , type=SCREAMING_SNAKE_CASE , help='Linear weight of the MSE loss. Must be >=0.' ) parser.add_argument( '--alpha_cos' , default=0.0 , type=SCREAMING_SNAKE_CASE , help='Linear weight of the cosine embedding loss. Must be >=0.' ) parser.add_argument( '--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' ) parser.add_argument( '--mlm_mask_prop' , default=0.15 , type=SCREAMING_SNAKE_CASE , help='Proportion of tokens for which we need to make a prediction.' , ) parser.add_argument('--word_mask' , default=0.8 , type=SCREAMING_SNAKE_CASE , help='Proportion of tokens to mask out.' ) parser.add_argument('--word_keep' , default=0.1 , type=SCREAMING_SNAKE_CASE , help='Proportion of tokens to keep.' ) parser.add_argument('--word_rand' , default=0.1 , type=SCREAMING_SNAKE_CASE , help='Proportion of tokens to randomly replace.' ) parser.add_argument( '--mlm_smoothing' , default=0.7 , type=SCREAMING_SNAKE_CASE , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , ) parser.add_argument('--token_counts' , type=SCREAMING_SNAKE_CASE , help='The token counts in the data_file for MLM.' ) parser.add_argument( '--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , ) parser.add_argument( '--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , ) parser.add_argument( '--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , ) parser.add_argument('--n_epoch' , type=SCREAMING_SNAKE_CASE , default=3 , help='Number of pass on the whole dataset.' ) parser.add_argument('--batch_size' , type=SCREAMING_SNAKE_CASE , default=5 , help='Batch size (for each process).' ) parser.add_argument( '--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , ) parser.add_argument( '--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE , default=50 , help='Gradient accumulation for larger training batches.' , ) parser.add_argument('--warmup_prop' , default=0.05 , type=SCREAMING_SNAKE_CASE , help='Linear warmup proportion.' ) parser.add_argument('--weight_decay' , default=0.0 , type=SCREAMING_SNAKE_CASE , help='Weight decay if we apply some.' ) parser.add_argument('--learning_rate' , default=5E-4 , type=SCREAMING_SNAKE_CASE , help='The initial learning rate for Adam.' ) parser.add_argument('--adam_epsilon' , default=1E-6 , type=SCREAMING_SNAKE_CASE , help='Epsilon for Adam optimizer.' ) parser.add_argument('--max_grad_norm' , default=5.0 , type=SCREAMING_SNAKE_CASE , help='Max gradient norm.' ) parser.add_argument('--initializer_range' , default=0.02 , type=SCREAMING_SNAKE_CASE , help='Random initialization range.' ) parser.add_argument( '--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , ) parser.add_argument( '--fp16_opt_level' , type=SCREAMING_SNAKE_CASE , default='O1' , help=( 'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].' 'See details at https://nvidia.github.io/apex/amp.html' ) , ) parser.add_argument('--n_gpu' , type=SCREAMING_SNAKE_CASE , default=1 , help='Number of GPUs in the node.' ) parser.add_argument('--local_rank' , type=SCREAMING_SNAKE_CASE , default=-1 , help='Distributed training - Local rank' ) parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE , default=56 , help='Random seed' ) parser.add_argument('--log_interval' , type=SCREAMING_SNAKE_CASE , default=500 , help='Tensorboard logging interval.' ) parser.add_argument('--checkpoint_interval' , type=SCREAMING_SNAKE_CASE , default=4000 , help='Checkpoint interval.' ) UpperCAmelCase = parser.parse_args() sanity_checks(SCREAMING_SNAKE_CASE ) # ARGS # init_gpu_params(SCREAMING_SNAKE_CASE ) set_seed(SCREAMING_SNAKE_CASE ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( f'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite''' ' itUse `--force` if you want to overwrite it' ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(f'''Experiment will be dumped and logged in {args.dump_path}''' ) # SAVE PARAMS # logger.info(f'''Param: {args}''' ) with open(os.path.join(args.dump_path , 'parameters.json' ) , 'w' ) as f: json.dump(vars(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , indent=4 ) git_log(args.dump_path ) UpperCAmelCase = MODEL_CLASSES[args.student_type] UpperCAmelCase = MODEL_CLASSES[args.teacher_type] # TOKENIZER # UpperCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name ) UpperCAmelCase = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): UpperCAmelCase = tokenizer.all_special_tokens.index(SCREAMING_SNAKE_CASE ) UpperCAmelCase = tokenizer.all_special_ids[idx] logger.info(f'''Special tokens {special_tok_ids}''' ) UpperCAmelCase = special_tok_ids UpperCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(f'''Loading data from {args.data_file}''' ) with open(args.data_file , 'rb' ) as fp: UpperCAmelCase = pickle.load(SCREAMING_SNAKE_CASE ) if args.mlm: logger.info(f'''Loading token counts from {args.token_counts} (already pre-computed)''' ) with open(args.token_counts , 'rb' ) as fp: UpperCAmelCase = pickle.load(SCREAMING_SNAKE_CASE ) UpperCAmelCase = np.maximum(SCREAMING_SNAKE_CASE , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): UpperCAmelCase = 0.0 # do not predict special tokens UpperCAmelCase = torch.from_numpy(SCREAMING_SNAKE_CASE ) else: UpperCAmelCase = None UpperCAmelCase = LmSeqsDataset(params=SCREAMING_SNAKE_CASE , data=SCREAMING_SNAKE_CASE ) logger.info('Data loader created.' ) # STUDENT # logger.info(f'''Loading student config from {args.student_config}''' ) UpperCAmelCase = student_config_class.from_pretrained(args.student_config ) UpperCAmelCase = True if args.student_pretrained_weights is not None: logger.info(f'''Loading pretrained weights from {args.student_pretrained_weights}''' ) UpperCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=SCREAMING_SNAKE_CASE ) else: UpperCAmelCase = student_model_class(SCREAMING_SNAKE_CASE ) if args.n_gpu > 0: student.to(f'''cuda:{args.local_rank}''' ) logger.info('Student loaded.' ) # TEACHER # UpperCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=SCREAMING_SNAKE_CASE ) if args.n_gpu > 0: teacher.to(f'''cuda:{args.local_rank}''' ) logger.info(f'''Teacher loaded from {args.teacher_name}.''' ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if args.freeze_token_type_embds: freeze_token_type_embeddings(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() UpperCAmelCase = Distiller( params=SCREAMING_SNAKE_CASE , dataset=SCREAMING_SNAKE_CASE , token_probs=SCREAMING_SNAKE_CASE , student=SCREAMING_SNAKE_CASE , teacher=SCREAMING_SNAKE_CASE ) distiller.train() logger.info('Let\'s go get some drinks.' ) if __name__ == "__main__": main()
447
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging lowerCAmelCase_ = logging.get_logger(__name__) def __lowerCAmelCase ( UpperCamelCase ) -> List[str]: lowerCAmelCase__ : int = R'''\w+[.]\d+''' lowerCAmelCase__ : Tuple = re.findall(UpperCamelCase , UpperCamelCase ) for pat in pats: lowerCAmelCase__ : List[str] = key.replace(UpperCamelCase , '''_'''.join(pat.split('''.''' ) ) ) return key def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: lowerCAmelCase__ : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',) if ( any('''norm''' in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowerCAmelCase__ : List[str] = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''embedding''',) return renamed_pt_tuple_key, pt_tensor # conv layer lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowerCAmelCase__ : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCAmelCase__ : List[Any] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight": lowerCAmelCase__ : str = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCAmelCase__ : Any = pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCAmelCase__ : List[Any] = pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=42 ) -> Any: # Step 1: Convert pytorch tensor to numpy lowerCAmelCase__ : Optional[Any] = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowerCAmelCase__ : Tuple = flax_model.init_weights(PRNGKey(UpperCamelCase ) ) lowerCAmelCase__ : Any = flatten_dict(UpperCamelCase ) lowerCAmelCase__ : List[Any] = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCAmelCase__ : str = rename_key(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = tuple(renamed_pt_key.split('''.''' ) ) # Correctly rename weight parameters lowerCAmelCase__ , lowerCAmelCase__ : List[str] = rename_key_and_reshape_tensor(UpperCamelCase , UpperCamelCase , UpperCamelCase ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """ F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) # also add unexpected weight so that warning is thrown lowerCAmelCase__ : List[str] = jnp.asarray(UpperCamelCase ) return unflatten_dict(UpperCamelCase )
678
0