code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
import inspect import os import sys import unittest import accelerate from accelerate.test_utils import execute_subprocess_async, require_tpu class lowerCAmelCase_ ( unittest.TestCase ): def snake_case_ ( self ) -> Tuple: UpperCamelCase : Any = inspect.getfile(accelerate.test_utils ) UpperCamelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] ) UpperCamelCase : List[str] = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] ) @require_tpu def snake_case_ ( self ) -> Dict: UpperCamelCase : List[str] = F""" {self.test_dir}/xla_spawn.py --num_cores 8 {self.test_file_path} """.split() UpperCamelCase : Any = [sys.executable] + distributed_args execute_subprocess_async(SCREAMING_SNAKE_CASE_, env=os.environ.copy() )
40
def _a ( lowerCamelCase ): if num < 0: return False lowerCamelCase : int = num lowerCamelCase : int = 0 while num > 0: lowerCamelCase : str = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
681
0
'''simple docstring''' import torch from transformers import CamembertForMaskedLM, CamembertTokenizer def _A ( A__ , A__ , A__ , A__=5 ): """simple docstring""" assert masked_input.count('''<mask>''' ) == 1 __lowercase = torch.tensor(tokenizer.encode(A__ , add_special_tokens=A__ ) ).unsqueeze(0 ) # Batch size 1 __lowercase = model(A__ )[0] # The last hidden-state is the first element of the output tuple __lowercase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() __lowercase = logits[0, masked_index, :] __lowercase = logits.softmax(dim=0 ) __lowercase , __lowercase = prob.topk(k=A__ , dim=0 ) __lowercase = ''' '''.join( [tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(A__ ) )] ) __lowercase = tokenizer.mask_token __lowercase = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ): __lowercase = predicted_token_bpe.replace('''\u2581''' , ''' ''' ) if " {0}".format(A__ ) in masked_input: topk_filled_outputs.append( ( masked_input.replace(''' {0}'''.format(A__ ) , A__ ), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(A__ , A__ ), values[index].item(), predicted_token, ) ) return topk_filled_outputs lowerCAmelCase__ = CamembertTokenizer.from_pretrained('''camembert-base''') lowerCAmelCase__ = CamembertForMaskedLM.from_pretrained('''camembert-base''') model.eval() lowerCAmelCase__ = '''Le camembert est <mask> :)''' print(fill_mask(masked_input, model, tokenizer, topk=3))
41
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_torch_available from ...utils import OptionalDependencyNotAvailable _lowerCamelCase ={ """configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""], """tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase =[ """GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTNeoXJapaneseForCausalLM""", """GPTNeoXJapaneseLayer""", """GPTNeoXJapaneseModel""", """GPTNeoXJapanesePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox_japanese import ( GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseLayer, GPTNeoXJapaneseModel, GPTNeoXJapanesePreTrainedModel, ) else: import sys _lowerCamelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
681
0
'''simple docstring''' import argparse import torch from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: # Construct model if openai_config_file == "": lowerCamelCase_ = OpenAIGPTConfig() else: lowerCamelCase_ = OpenAIGPTConfig.from_json_file(__UpperCamelCase ) lowerCamelCase_ = OpenAIGPTModel(__UpperCamelCase ) # Load weights from numpy load_tf_weights_in_openai_gpt(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # Save pytorch-model lowerCamelCase_ = pytorch_dump_folder_path + '/' + WEIGHTS_NAME lowerCamelCase_ = pytorch_dump_folder_path + '/' + CONFIG_NAME print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' ) torch.save(model.state_dict() ,__UpperCamelCase ) print(f'''Save configuration file to {pytorch_config_dump_path}''' ) with open(__UpperCamelCase ,'w' ,encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": A_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--openai_checkpoint_folder_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--openai_config_file", default="", type=str, help=( "An optional config json file corresponding to the pre-trained OpenAI model. \n" "This specifies the model architecture." ), ) A_ = parser.parse_args() convert_openai_checkpoint_to_pytorch( args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path )
42
import copy import random from transformers import CLIPTokenizer class A__ ( __SCREAMING_SNAKE_CASE): def __init__( self , *__magic_name__ , **__magic_name__ ): super().__init__(*__magic_name__ , **__magic_name__ ) lowerCamelCase : Dict = {} def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , **__magic_name__ ): lowerCamelCase : Any = super().add_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ ) if num_added_tokens == 0: raise ValueError( F'''The tokenizer already contains the token {placeholder_token}. Please pass a different''' """ `placeholder_token` that is not already in the tokenizer.""" ) def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=1 , **__magic_name__ ): lowerCamelCase : List[Any] = [] if num_vec_per_token == 1: self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ ) output.append(__magic_name__ ) else: lowerCamelCase : Dict = [] for i in range(__magic_name__ ): lowerCamelCase : Optional[Any] = placeholder_token + F'''_{i}''' self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ ) output.append(__magic_name__ ) # handle cases where there is a new placeholder token that contains the current placeholder token but is larger for token in self.token_map: if token in placeholder_token: raise ValueError( F'''The tokenizer already has placeholder token {token} that can get confused with''' F''' {placeholder_token}keep placeholder tokens independent''' ) lowerCamelCase : Any = output def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=False , __magic_name__=1.0 ): if isinstance(__magic_name__ , __magic_name__ ): lowerCamelCase : List[str] = [] for i in range(len(__magic_name__ ) ): output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__magic_name__ ) ) return output for placeholder_token in self.token_map: if placeholder_token in text: lowerCamelCase : List[str] = self.token_map[placeholder_token] lowerCamelCase : Optional[Any] = tokens[: 1 + int(len(__magic_name__ ) * prop_tokens_to_load )] if vector_shuffle: lowerCamelCase : Union[str, Any] = copy.copy(__magic_name__ ) random.shuffle(__magic_name__ ) lowerCamelCase : str = text.replace(__magic_name__ , """ """.join(__magic_name__ ) ) return text def __call__( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ): return super().__call__( self.replace_placeholder_tokens_in_text( __magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , ) def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ): return super().encode( self.replace_placeholder_tokens_in_text( __magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , )
681
0
from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class _a ( unittest.TestCase ): def lowerCamelCase_ ( self: List[Any] ) -> Dict: """simple docstring""" lowercase__ = tf.convert_to_tensor( [ [ 8.2220991, # 3rd highest value; idx. 0 -0.5620044, 5.23229752, 4.0386393, -6.8798378, -0.54785802, -3.2012153, 2.92777176, 1.88171953, 7.35341276, # 5th highest value; idx. 9 8.43207833, # 2nd highest value; idx. 10 -9.85711836, -5.96209236, -1.13039161, -7.1115294, -0.8369633, -5.3186408, 7.06427407, 0.81369344, -0.82023817, -5.9179796, 0.58813443, -6.99778438, 4.71551189, -0.18771637, 7.44020759, # 4th highest value; idx. 25 9.38450987, # 1st highest value; idx. 26 2.12662941, -9.32562038, 2.35652522, ], # cummulative prob of 5 highest values <= 0.6 [ 0.58425518, 4.53139238, -5.57510464, -6.28030699, -7.19529503, -4.02122551, 1.39337037, -6.06707057, 1.59480517, -9.643119, 0.03907799, 0.67231762, -8.88206726, 6.27115922, # 4th highest value; idx. 13 2.28520723, 4.82767506, 4.30421368, 8.8275313, # 2nd highest value; idx. 17 5.44029958, # 5th highest value; idx. 18 -4.4735794, 7.38579536, # 3rd highest value; idx. 20 -2.91051663, 2.61946077, -2.5674762, -9.48959302, -4.02922645, -1.35416918, 9.67702323, # 1st highest value; idx. 27 -5.89478553, 1.85370467, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) lowercase__ = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above lowercase__ = tf.convert_to_tensor( [8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above lowercase__ = tf_top_k_top_p_filtering(UpperCamelCase_ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 ) lowercase__ = output[output != -float('''inf''' )] lowercase__ = tf.cast( tf.where(tf.not_equal(UpperCamelCase_ , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , ) tf.debugging.assert_near(UpperCamelCase_ , UpperCamelCase_ , rtol=1E-1_2 ) tf.debugging.assert_equal(UpperCamelCase_ , UpperCamelCase_ ) @require_tf class _a ( unittest.TestCase , UpperCamelCase__ ): # setting framework_dependent_parameters needs to be gated, just like its contents' imports if is_tf_available(): _lowercase : List[Any] = { '''AutoModelForCausalLM''': TFAutoModelForCausalLM, '''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq, '''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM, '''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq, '''LogitsProcessorList''': TFLogitsProcessorList, '''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor, '''create_tensor_fn''': tf.convert_to_tensor, '''floats_tensor''': floats_tensor, '''return_tensors''': '''tf''', } @slow def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]: """simple docstring""" lowercase__ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowercase__ = 2 lowercase__ = 2 class _a ( tf.Module ): def __init__( self: List[Any] , UpperCamelCase_: List[str] ) -> Optional[int]: """simple docstring""" super(UpperCamelCase_ , self ).__init__() lowercase__ = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ), tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ), ) , jit_compile=UpperCamelCase_ , ) def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: Dict ) -> Optional[Any]: """simple docstring""" lowercase__ = self.model.generate( input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , max_new_tokens=UpperCamelCase_ , return_dict_in_generate=UpperCamelCase_ , ) return {"sequences": outputs["sequences"]} lowercase__ = [[2, 0], [102, 103]] lowercase__ = [[1, 0], [1, 1]] lowercase__ = DummyModel(model=UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(UpperCamelCase_ , UpperCamelCase_ , signatures={'''serving_default''': dummy_model.serving} ) lowercase__ = tf.saved_model.load(UpperCamelCase_ ).signatures['''serving_default'''] for batch_size in range(1 , len(UpperCamelCase_ ) + 1 ): lowercase__ = { '''input_ids''': tf.constant(dummy_input_ids[:batch_size] ), '''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ), } lowercase__ = serving_func(**UpperCamelCase_ )['''sequences'''] lowercase__ = test_model.generate(**UpperCamelCase_ , max_new_tokens=UpperCamelCase_ ) tf.debugging.assert_equal(UpperCamelCase_ , UpperCamelCase_ ) @slow def lowerCamelCase_ ( self: Tuple ) -> Tuple: """simple docstring""" lowercase__ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowercase__ = 1 lowercase__ = 2 class _a ( tf.Module ): def __init__( self: Tuple , UpperCamelCase_: Dict ) -> Tuple: """simple docstring""" super(UpperCamelCase_ , self ).__init__() lowercase__ = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ), tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ), ) , jit_compile=UpperCamelCase_ , ) def lowerCamelCase_ ( self: Any , UpperCamelCase_: Dict , UpperCamelCase_: List[str] ) -> Any: """simple docstring""" lowercase__ = self.model.generate( input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , max_new_tokens=UpperCamelCase_ , return_dict_in_generate=UpperCamelCase_ , ) return {"sequences": outputs["sequences"]} lowercase__ = [[2], [102, 103]] lowercase__ = [[1], [1, 1]] lowercase__ = DummyModel(model=UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(UpperCamelCase_ , UpperCamelCase_ , signatures={'''serving_default''': dummy_model.serving} ) lowercase__ = tf.saved_model.load(UpperCamelCase_ ).signatures['''serving_default'''] for input_row in range(len(UpperCamelCase_ ) ): lowercase__ = { '''input_ids''': tf.constant([dummy_input_ids[input_row]] ), '''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ), } lowercase__ = serving_func(**UpperCamelCase_ )['''sequences'''] lowercase__ = test_model.generate(**UpperCamelCase_ , max_new_tokens=UpperCamelCase_ ) tf.debugging.assert_equal(UpperCamelCase_ , UpperCamelCase_ ) @slow @require_tensorflow_text def lowerCamelCase_ ( self: Dict ) -> List[str]: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=UpperCamelCase_ ) class _a ( tf.keras.layers.Layer ): def __init__( self: str ) -> Tuple: """simple docstring""" super().__init__() lowercase__ = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(UpperCamelCase_ , '''spiece.model''' ) , '''rb''' ).read() ) lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) def lowerCamelCase_ ( self: int , UpperCamelCase_: int , *UpperCamelCase_: List[Any] , **UpperCamelCase_: Dict ) -> Dict: """simple docstring""" lowercase__ = self.tokenizer.tokenize(UpperCamelCase_ ) lowercase__ , lowercase__ = text.pad_model_inputs( UpperCamelCase_ , max_seq_length=64 , pad_value=self.model.config.pad_token_id ) lowercase__ = self.model.generate(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ ) return self.tokenizer.detokenize(UpperCamelCase_ ) lowercase__ = CompleteSentenceTransformer() lowercase__ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' ) lowercase__ = complete_model(UpperCamelCase_ ) lowercase__ = tf.keras.Model(UpperCamelCase_ , UpperCamelCase_ ) keras_model.save(UpperCamelCase_ ) def lowerCamelCase_ ( self: Dict ) -> Tuple: """simple docstring""" lowercase__ = { '''do_sample''': True, '''num_beams''': 1, '''top_p''': 0.7, '''top_k''': 10, '''temperature''': 0.7, } lowercase__ = 14 lowercase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowercase__ = '''Hello, my dog is cute and''' lowercase__ = tokenizer(UpperCamelCase_ , return_tensors='''tf''' ) lowercase__ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowercase__ = 638 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(''':/CPU:0''' ): tf.random.set_seed(0 ) lowercase__ = model.generate(**UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) lowercase__ = [638, 198] with tf.device(''':/CPU:0''' ): tf.random.set_seed(0 ) lowercase__ = model.generate(**UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) def lowerCamelCase_ ( self: Optional[int] ) -> Dict: """simple docstring""" lowercase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) lowercase__ = '''Hugging Face is a technology company based in New York and Paris.''' lowercase__ = bart_tokenizer(UpperCamelCase_ , return_tensors='''tf''' ).input_ids lowercase__ = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) lowercase__ = bart_model.generate(UpperCamelCase_ ).numpy() class _a ( UpperCamelCase__ ): def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: Optional[int] ) -> List[Any]: """simple docstring""" return super().call(UpperCamelCase_ , **UpperCamelCase_ ) lowercase__ = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) lowercase__ = bart_model.generate(UpperCamelCase_ , foo='''bar''' ).numpy() self.assertTrue(np.array_equal(UpperCamelCase_ , UpperCamelCase_ ) ) class _a ( bart_model.model.encoder.__class__ ): def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: Tuple , **UpperCamelCase_: int ) -> List[Any]: """simple docstring""" return super().call(UpperCamelCase_ , **UpperCamelCase_ ) lowercase__ = FakeEncoder(bart_model.config , bart_model.model.shared ) lowercase__ = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) lowercase__ = bart_model.generate(UpperCamelCase_ ).numpy() with self.assertRaises(UpperCamelCase_ ): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(UpperCamelCase_ , foo='''bar''' )
43
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class A__ ( unittest.TestCase): def __init__( self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=1_8 , __magic_name__=3_0 , __magic_name__=4_0_0 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=[0.48_145_466, 0.4_578_275, 0.40_821_073] , __magic_name__=[0.26_862_954, 0.26_130_258, 0.27_577_711] , __magic_name__=True , ): lowerCamelCase : Union[str, Any] = size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4} lowerCamelCase : str = crop_size if crop_size is not None else {"""height""": 1_8, """width""": 1_8} lowerCamelCase : Optional[int] = parent lowerCamelCase : Union[str, Any] = batch_size lowerCamelCase : str = num_channels lowerCamelCase : Any = image_size lowerCamelCase : Optional[int] = min_resolution lowerCamelCase : Union[str, Any] = max_resolution lowerCamelCase : Union[str, Any] = do_resize lowerCamelCase : int = size lowerCamelCase : int = do_center_crop lowerCamelCase : Union[str, Any] = crop_size lowerCamelCase : Union[str, Any] = do_normalize lowerCamelCase : Dict = image_mean lowerCamelCase : Optional[Any] = image_std lowerCamelCase : Union[str, Any] = do_convert_rgb def UpperCamelCase__ ( self ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def UpperCamelCase__ ( self , __magic_name__=False , __magic_name__=False , __magic_name__=False ): assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: lowerCamelCase : Tuple = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) ) else: lowerCamelCase : Dict = [] for i in range(self.batch_size ): lowerCamelCase , lowerCamelCase : int = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 ) image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension lowerCamelCase : int = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs] if torchify: lowerCamelCase : int = [torch.from_numpy(__magic_name__ ) for x in image_inputs] return image_inputs @require_torch @require_vision class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase): _UpperCAmelCase : Any = ChineseCLIPImageProcessor if is_vision_available() else None def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = ChineseCLIPImageProcessingTester(self , do_center_crop=__magic_name__ ) @property def UpperCamelCase__ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) ) self.assertTrue(hasattr(__magic_name__ , """size""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_center_crop""" ) ) self.assertTrue(hasattr(__magic_name__ , """center_crop""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) ) self.assertTrue(hasattr(__magic_name__ , """image_mean""" ) ) self.assertTrue(hasattr(__magic_name__ , """image_std""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_convert_rgb""" ) ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 2_2_4, """width""": 2_2_4} ) self.assertEqual(image_processor.crop_size , {"""height""": 1_8, """width""": 1_8} ) lowerCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} ) self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): # Initialize image_processing lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , Image.Image ) # Test not batched input lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCamelCase : Optional[Any] = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def UpperCamelCase__ ( self ): # Initialize image_processing lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ , numpify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , np.ndarray ) # Test not batched input lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCamelCase : Tuple = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def UpperCamelCase__ ( self ): # Initialize image_processing lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase : Any = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ , torchify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , torch.Tensor ) # Test not batched input lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCamelCase : str = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) @require_torch @require_vision class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase): _UpperCAmelCase : Tuple = ChineseCLIPImageProcessor if is_vision_available() else None def UpperCamelCase__ ( self ): lowerCamelCase : Union[str, Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__magic_name__ ) lowerCamelCase : Any = 3 @property def UpperCamelCase__ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase__ ( self ): lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) ) self.assertTrue(hasattr(__magic_name__ , """size""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_center_crop""" ) ) self.assertTrue(hasattr(__magic_name__ , """center_crop""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) ) self.assertTrue(hasattr(__magic_name__ , """image_mean""" ) ) self.assertTrue(hasattr(__magic_name__ , """image_std""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_convert_rgb""" ) ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): # Initialize image_processing lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , Image.Image ) # Test not batched input lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCamelCase : Optional[Any] = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
681
0
'''simple docstring''' import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int ): """simple docstring""" _lowerCamelCase : Optional[int] = LxmertConfig.from_json_file(_lowerCAmelCase ) print(F'Building PyTorch model from configuration: {config}' ) _lowerCamelCase : str = LxmertForPreTraining(_lowerCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Save pytorch-model print(F'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , _lowerCAmelCase ) if __name__ == "__main__": UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) UpperCAmelCase_ : Tuple = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
44
from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A__ : def __init__( self , __magic_name__ , __magic_name__=3 , __magic_name__=3_2 , __magic_name__=3 , __magic_name__=1_0 , __magic_name__=[1_0, 2_0, 3_0, 4_0] , __magic_name__=[1, 1, 2, 1] , __magic_name__=True , __magic_name__=True , __magic_name__="relu" , __magic_name__=3 , __magic_name__=None , ): lowerCamelCase : Tuple = parent lowerCamelCase : Tuple = batch_size lowerCamelCase : List[Any] = image_size lowerCamelCase : Optional[Any] = num_channels lowerCamelCase : Dict = embeddings_size lowerCamelCase : Optional[int] = hidden_sizes lowerCamelCase : Union[str, Any] = depths lowerCamelCase : Optional[Any] = is_training lowerCamelCase : Union[str, Any] = use_labels lowerCamelCase : Dict = hidden_act lowerCamelCase : Any = num_labels lowerCamelCase : int = scope lowerCamelCase : Optional[Any] = len(__magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase : Tuple = None if self.use_labels: lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) lowerCamelCase : Tuple = self.get_config() return config, pixel_values, labels def UpperCamelCase__ ( self ): return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ): lowerCamelCase : Dict = TFResNetModel(config=__magic_name__ ) lowerCamelCase : Tuple = model(__magic_name__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ): lowerCamelCase : str = self.num_labels lowerCamelCase : Dict = TFResNetForImageClassification(__magic_name__ ) lowerCamelCase : Union[str, Any] = model(__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = self.prepare_config_and_inputs() lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = config_and_inputs lowerCamelCase : List[str] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase): _UpperCAmelCase : Any = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () _UpperCAmelCase : List[str] = ( {"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification} if is_tf_available() else {} ) _UpperCAmelCase : Optional[Any] = False _UpperCAmelCase : Optional[Any] = False _UpperCAmelCase : Dict = False _UpperCAmelCase : List[Any] = False _UpperCAmelCase : Any = False def UpperCamelCase__ ( self ): lowerCamelCase : int = TFResNetModelTester(self ) lowerCamelCase : str = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ ) def UpperCamelCase__ ( self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase__ ( self ): return @unittest.skip(reason="""ResNet does not use inputs_embeds""" ) def UpperCamelCase__ ( self ): pass @unittest.skip(reason="""ResNet does not support input and output embeddings""" ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase : List[str] = model_class(__magic_name__ ) lowerCamelCase : str = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase : Tuple = [*signature.parameters.keys()] lowerCamelCase : List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCamelCase__ ( self ): def check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ): lowerCamelCase : Any = model_class(__magic_name__ ) lowerCamelCase : List[Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) lowerCamelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase : Union[str, Any] = self.model_tester.num_stages self.assertEqual(len(__magic_name__ ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : Tuple = ["""basic""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: lowerCamelCase : Union[str, Any] = layer_type lowerCamelCase : str = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase : int = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__magic_name__ ) @slow def UpperCamelCase__ ( self ): for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase : Any = TFResNetModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def _a ( ): lowerCamelCase : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class A__ ( unittest.TestCase): @cached_property def UpperCamelCase__ ( self ): return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) lowerCamelCase : List[str] = self.default_image_processor lowerCamelCase : str = prepare_img() lowerCamelCase : Tuple = image_processor(images=__magic_name__ , return_tensors="""tf""" ) # forward pass lowerCamelCase : Tuple = model(**__magic_name__ ) # verify the logits lowerCamelCase : Optional[Any] = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , __magic_name__ ) lowerCamelCase : Optional[Any] = tf.constant([-11.1_069, -9.7_877, -8.3_777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __magic_name__ , atol=1e-4 ) )
681
0
from typing import Optional from urllib.parse import quote import huggingface_hub as hfh from packaging import version def A ( lowercase__ : str , lowercase__ : str , lowercase__ : Optional[str] = None ) -> str: if version.parse(hfh.__version__ ).release < version.parse("""0.11.0""" ).release: # old versions of hfh don't url-encode the file path UpperCamelCase__ :str = quote(lowercase__ ) return hfh.hf_hub_url(lowercase__ , lowercase__ , repo_type="""dataset""" , revision=lowercase__ )
45
import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ): # Initialise PyTorch model lowerCamelCase : str = MobileBertConfig.from_json_file(lowerCamelCase ) print(F'''Building PyTorch model from configuration: {config}''' ) lowerCamelCase : Tuple = MobileBertForPreTraining(lowerCamelCase ) # Load weights from tf checkpoint lowerCamelCase : Tuple = load_tf_weights_in_mobilebert(lowerCamelCase, lowerCamelCase, lowerCamelCase ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict(), lowerCamelCase ) if __name__ == "__main__": _lowerCamelCase =argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--mobilebert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained MobileBERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) _lowerCamelCase =parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
681
0
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() _lowerCAmelCase : int = logging.get_logger(__name__) _lowerCAmelCase : Optional[Any] = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''', '''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''', '''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''', '''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''', '''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''', '''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''', '''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''', '''self_attn.rotary_emb''': '''encoder.embed_positions''', '''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''', '''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''', '''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''', '''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''', '''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''', '''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''', '''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''', '''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''', '''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''', '''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''', '''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''', '''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } _lowerCAmelCase : str = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int: '''simple docstring''' for attribute in key.split("." ): _lowerCamelCase : str = getattr(_lowerCamelCase , _lowerCamelCase ) if weight_type is not None: _lowerCamelCase : List[str] = getattr(_lowerCamelCase , _lowerCamelCase ).shape else: _lowerCamelCase : Union[str, Any] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": _lowerCamelCase : List[Any] = value elif weight_type == "weight_g": _lowerCamelCase : str = value elif weight_type == "weight_v": _lowerCamelCase : Any = value elif weight_type == "bias": _lowerCamelCase : Union[str, Any] = value elif weight_type == "running_mean": _lowerCamelCase : Union[str, Any] = value elif weight_type == "running_var": _lowerCamelCase : Any = value elif weight_type == "num_batches_tracked": _lowerCamelCase : Optional[int] = value elif weight_type == "inv_freq": _lowerCamelCase : List[Any] = value else: _lowerCamelCase : int = value logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]: '''simple docstring''' _lowerCamelCase : int = [] _lowerCamelCase : str = fairseq_model.state_dict() _lowerCamelCase : Union[str, Any] = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): _lowerCamelCase : str = False if "conv_layers" in name: load_conv_layer( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , ) _lowerCamelCase : Dict = True else: for key, mapped_key in MAPPING.items(): _lowerCamelCase : Optional[Any] = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: _lowerCamelCase : List[str] = True if "*" in mapped_key: _lowerCamelCase : int = name.split(_lowerCamelCase )[0].split("." )[-2] _lowerCamelCase : Tuple = mapped_key.replace("*" , _lowerCamelCase ) if "pos_bias_u" in name: _lowerCamelCase : List[str] = None elif "pos_bias_v" in name: _lowerCamelCase : Optional[int] = None elif "weight_g" in name: _lowerCamelCase : Optional[Any] = "weight_g" elif "weight_v" in name: _lowerCamelCase : Any = "weight_v" elif "bias" in name: _lowerCamelCase : Optional[Any] = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj _lowerCamelCase : int = "weight" elif "running_mean" in name: _lowerCamelCase : str = "running_mean" elif "inv_freq" in name: _lowerCamelCase : List[str] = "inv_freq" elif "running_var" in name: _lowerCamelCase : str = "running_var" elif "num_batches_tracked" in name: _lowerCamelCase : List[Any] = "num_batches_tracked" else: _lowerCamelCase : Dict = None set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) continue if not is_used: unused_weights.append(_lowerCamelCase ) logger.warning(F"""Unused weights: {unused_weights}""" ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]: '''simple docstring''' _lowerCamelCase : List[Any] = full_name.split("conv_layers." )[-1] _lowerCamelCase : List[Any] = name.split("." ) _lowerCamelCase : Union[str, Any] = int(items[0] ) _lowerCamelCase : Any = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) _lowerCamelCase : Dict = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) _lowerCamelCase : Tuple = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) _lowerCamelCase : Tuple = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) _lowerCamelCase : int = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_lowerCamelCase ) @torch.no_grad() def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ) -> List[Any]: '''simple docstring''' if config_path is not None: _lowerCamelCase : Optional[Any] = WavaVecaConformerConfig.from_pretrained(_lowerCamelCase , hidden_act="swish" ) else: _lowerCamelCase : Optional[Any] = WavaVecaConformerConfig() if "rope" in checkpoint_path: _lowerCamelCase : Optional[int] = "rotary" if is_finetuned: if dict_path: _lowerCamelCase : List[Any] = Dictionary.load(_lowerCamelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _lowerCamelCase : List[str] = target_dict.pad_index _lowerCamelCase : List[Any] = target_dict.bos_index _lowerCamelCase : Dict = target_dict.eos_index _lowerCamelCase : List[str] = len(target_dict.symbols ) _lowerCamelCase : Optional[int] = os.path.join(_lowerCamelCase , "vocab.json" ) if not os.path.isdir(_lowerCamelCase ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCamelCase ) ) return os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) _lowerCamelCase : Tuple = target_dict.indices # fairseq has the <pad> and <s> switched _lowerCamelCase : List[str] = 0 _lowerCamelCase : List[Any] = 1 with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_handle: json.dump(_lowerCamelCase , _lowerCamelCase ) _lowerCamelCase : Any = WavaVecaCTCTokenizer( _lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCamelCase , ) _lowerCamelCase : Union[str, Any] = True if config.feat_extract_norm == "layer" else False _lowerCamelCase : Tuple = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ) _lowerCamelCase : str = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase ) processor.save_pretrained(_lowerCamelCase ) _lowerCamelCase : Tuple = WavaVecaConformerForCTC(_lowerCamelCase ) else: _lowerCamelCase : Tuple = WavaVecaConformerForPreTraining(_lowerCamelCase ) if is_finetuned: _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: _lowerCamelCase : List[Any] = argparse.Namespace(task="audio_pretraining" ) _lowerCamelCase : Optional[int] = fairseq.tasks.setup_task(_lowerCamelCase ) _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCamelCase ) _lowerCamelCase : Union[str, Any] = model[0].eval() recursively_load_weights(_lowerCamelCase , _lowerCamelCase , not is_finetuned ) hf_wavavec.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": _lowerCAmelCase : Any = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) _lowerCAmelCase : str = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
46
import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def _a ( lowerCamelCase ): # vision encoder if "img_encoder.pos_embed" in name: lowerCamelCase : Tuple = name.replace("""img_encoder.pos_embed""", """vision_model.embeddings.position_embeddings""" ) if "img_encoder.patch_embed.proj" in name: lowerCamelCase : Union[str, Any] = name.replace("""img_encoder.patch_embed.proj""", """vision_model.embeddings.patch_embeddings.projection""" ) if "img_encoder.patch_embed.norm" in name: lowerCamelCase : Optional[int] = name.replace("""img_encoder.patch_embed.norm""", """vision_model.embeddings.layernorm""" ) if "img_encoder.layers" in name: lowerCamelCase : List[str] = name.replace("""img_encoder.layers""", """vision_model.encoder.stages""" ) if "blocks" in name and "res" not in name: lowerCamelCase : List[Any] = name.replace("""blocks""", """layers""" ) if "attn" in name and "pre_assign" not in name: lowerCamelCase : Optional[int] = name.replace("""attn""", """self_attn""" ) if "proj" in name and "self_attn" in name and "text" not in name: lowerCamelCase : Optional[int] = name.replace("""proj""", """out_proj""" ) if "pre_assign_attn.attn.proj" in name: lowerCamelCase : Any = name.replace("""pre_assign_attn.attn.proj""", """pre_assign_attn.attn.out_proj""" ) if "norm1" in name: lowerCamelCase : Optional[Any] = name.replace("""norm1""", """layer_norm1""" ) if "norm2" in name and "pre_assign" not in name: lowerCamelCase : Union[str, Any] = name.replace("""norm2""", """layer_norm2""" ) if "img_encoder.norm" in name: lowerCamelCase : Optional[int] = name.replace("""img_encoder.norm""", """vision_model.layernorm""" ) # text encoder if "text_encoder.token_embedding" in name: lowerCamelCase : int = name.replace("""text_encoder.token_embedding""", """text_model.embeddings.token_embedding""" ) if "text_encoder.positional_embedding" in name: lowerCamelCase : Optional[Any] = name.replace("""text_encoder.positional_embedding""", """text_model.embeddings.position_embedding.weight""" ) if "text_encoder.transformer.resblocks." in name: lowerCamelCase : Optional[Any] = name.replace("""text_encoder.transformer.resblocks.""", """text_model.encoder.layers.""" ) if "ln_1" in name: lowerCamelCase : Optional[Any] = name.replace("""ln_1""", """layer_norm1""" ) if "ln_2" in name: lowerCamelCase : str = name.replace("""ln_2""", """layer_norm2""" ) if "c_fc" in name: lowerCamelCase : Any = name.replace("""c_fc""", """fc1""" ) if "c_proj" in name: lowerCamelCase : Tuple = name.replace("""c_proj""", """fc2""" ) if "text_encoder" in name: lowerCamelCase : List[str] = name.replace("""text_encoder""", """text_model""" ) if "ln_final" in name: lowerCamelCase : Tuple = name.replace("""ln_final""", """final_layer_norm""" ) # projection layers if "img_projector.linear_hidden." in name: lowerCamelCase : Optional[int] = name.replace("""img_projector.linear_hidden.""", """visual_projection.""" ) if "img_projector.linear_out." in name: lowerCamelCase : Tuple = name.replace("""img_projector.linear_out.""", """visual_projection.3.""" ) if "text_projector.linear_hidden" in name: lowerCamelCase : Tuple = name.replace("""text_projector.linear_hidden""", """text_projection""" ) if "text_projector.linear_out" in name: lowerCamelCase : Tuple = name.replace("""text_projector.linear_out""", """text_projection.3""" ) return name def _a ( lowerCamelCase, lowerCamelCase ): for key in orig_state_dict.copy().keys(): lowerCamelCase : Tuple = orig_state_dict.pop(lowerCamelCase ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors lowerCamelCase : Any = key.split(""".""" ) lowerCamelCase , lowerCamelCase : Optional[Any] = int(key_split[2] ), int(key_split[4] ) lowerCamelCase : List[Any] = config.vision_config.hidden_size if "weight" in key: lowerCamelCase : int = val[:dim, :] lowerCamelCase : List[str] = val[dim : dim * 2, :] lowerCamelCase : Dict = val[-dim:, :] else: lowerCamelCase : List[Any] = val[:dim] lowerCamelCase : List[Any] = val[dim : dim * 2] lowerCamelCase : Tuple = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors lowerCamelCase : str = key.split(""".""" ) lowerCamelCase : Optional[int] = int(key_split[3] ) lowerCamelCase : List[str] = config.text_config.hidden_size if "weight" in key: lowerCamelCase : Optional[int] = val[:dim, :] lowerCamelCase : Any = val[ dim : dim * 2, : ] lowerCamelCase : Optional[Any] = val[-dim:, :] else: lowerCamelCase : Union[str, Any] = val[:dim] lowerCamelCase : Optional[int] = val[dim : dim * 2] lowerCamelCase : Union[str, Any] = val[-dim:] else: lowerCamelCase : List[Any] = rename_key(lowerCamelCase ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): lowerCamelCase : Any = val.squeeze_() else: lowerCamelCase : Union[str, Any] = val return orig_state_dict def _a ( ): lowerCamelCase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase : List[str] = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw ) return im @torch.no_grad() def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase="groupvit-gcc-yfcc", lowerCamelCase=False ): lowerCamelCase : int = GroupViTConfig() lowerCamelCase : Dict = GroupViTModel(lowerCamelCase ).eval() lowerCamelCase : Optional[int] = torch.load(lowerCamelCase, map_location="""cpu""" )["""model"""] lowerCamelCase : Tuple = convert_state_dict(lowerCamelCase, lowerCamelCase ) lowerCamelCase , lowerCamelCase : Tuple = model.load_state_dict(lowerCamelCase, strict=lowerCamelCase ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCamelCase ) == 0) # verify result lowerCamelCase : int = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" ) lowerCamelCase : int = prepare_img() lowerCamelCase : int = processor(text=["""a photo of a cat""", """a photo of a dog"""], images=lowerCamelCase, padding=lowerCamelCase, return_tensors="""pt""" ) with torch.no_grad(): lowerCamelCase : int = model(**lowerCamelCase ) if model_name == "groupvit-gcc-yfcc": lowerCamelCase : Any = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] ) elif model_name == "groupvit-gcc-redcaps": lowerCamelCase : Any = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] ) else: raise ValueError(F'''Model name {model_name} not supported.''' ) assert torch.allclose(outputs.logits_per_image, lowerCamelCase, atol=1e-3 ) processor.save_pretrained(lowerCamelCase ) model.save_pretrained(lowerCamelCase ) print("""Successfully saved processor and model to""", lowerCamelCase ) if push_to_hub: print("""Pushing to the hub...""" ) processor.push_to_hub(lowerCamelCase, organization="""nielsr""" ) model.push_to_hub(lowerCamelCase, organization="""nielsr""" ) if __name__ == "__main__": _lowerCamelCase =argparse.ArgumentParser() parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model.""" ) parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""") parser.add_argument( """--model_name""", default="""groupvit-gccy-fcc""", type=str, help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""", ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""", ) _lowerCamelCase =parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
681
0
def UpperCAmelCase__ ( lowerCamelCase_ : list[int] , lowerCamelCase_ : list[int] ): # Check if the input is valid if not len(lowerCamelCase_ ) == len(lowerCamelCase_ ) == 3: raise ValueError('Please enter a valid equation.' ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError('Both a & b of two equations can\'t be zero.' ) # Extract the coefficients __a , __a , __a : Optional[Any] = equationa __a , __a , __a : Optional[int] = equationa # Calculate the determinants of the matrices __a : str = aa * ba - aa * ba __a : Tuple = ca * ba - ca * ba __a : Union[str, Any] = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError('Infinite solutions. (Consistent system)' ) else: raise ValueError('No solution. (Inconsistent system)' ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: __a : Any = determinant_x / determinant __a : Optional[Any] = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
47
from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class A__ : # setable values _UpperCAmelCase : Optional[int] = None _UpperCAmelCase : Optional[jnp.ndarray] = None _UpperCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i) @classmethod def UpperCamelCase__ ( cls ): return cls() @dataclass class A__ ( __SCREAMING_SNAKE_CASE): _UpperCAmelCase : jnp.ndarray _UpperCAmelCase : jnp.ndarray _UpperCAmelCase : KarrasVeSchedulerState class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): @property def UpperCamelCase__ ( self ): return True @register_to_config def __init__( self , __magic_name__ = 0.02 , __magic_name__ = 1_0_0 , __magic_name__ = 1.007 , __magic_name__ = 8_0 , __magic_name__ = 0.05 , __magic_name__ = 5_0 , ): pass def UpperCamelCase__ ( self ): return KarrasVeSchedulerState.create() def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = () ): lowerCamelCase : Dict = jnp.arange(0 , __magic_name__ )[::-1].copy() lowerCamelCase : int = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=__magic_name__ , schedule=jnp.array(__magic_name__ , dtype=jnp.floataa ) , timesteps=__magic_name__ , ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ): if self.config.s_min <= sigma <= self.config.s_max: lowerCamelCase : Dict = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 ) else: lowerCamelCase : Dict = 0 # sample eps ~ N(0, S_noise^2 * I) lowerCamelCase : List[Any] = random.split(__magic_name__ , num=1 ) lowerCamelCase : Union[str, Any] = self.config.s_noise * random.normal(key=__magic_name__ , shape=sample.shape ) lowerCamelCase : List[Any] = sigma + gamma * sigma lowerCamelCase : str = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = True , ): lowerCamelCase : Optional[Any] = sample_hat + sigma_hat * model_output lowerCamelCase : Dict = (sample_hat - pred_original_sample) / sigma_hat lowerCamelCase : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=__magic_name__ , derivative=__magic_name__ , state=__magic_name__ ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = True , ): lowerCamelCase : str = sample_prev + sigma_prev * model_output lowerCamelCase : str = (sample_prev - pred_original_sample) / sigma_prev lowerCamelCase : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=__magic_name__ , derivative=__magic_name__ , state=__magic_name__ ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): raise NotImplementedError()
681
0
'''simple docstring''' from manim import * class A ( SCREAMING_SNAKE_CASE__ ): def __SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" lowerCAmelCase__ = Rectangle(height=0.5 , width=0.5 ) lowerCAmelCase__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) lowerCAmelCase__ = [mem.copy() for i in range(6 )] lowerCAmelCase__ = [mem.copy() for i in range(6 )] lowerCAmelCase__ = VGroup(*__magic_name__ ).arrange(__magic_name__ , buff=0 ) lowerCAmelCase__ = VGroup(*__magic_name__ ).arrange(__magic_name__ , buff=0 ) lowerCAmelCase__ = VGroup(__magic_name__ , __magic_name__ ).arrange(__magic_name__ , buff=0 ) lowerCAmelCase__ = Text("CPU" , font_size=24 ) lowerCAmelCase__ = Group(__magic_name__ , __magic_name__ ).arrange(__magic_name__ , buff=0.5 , aligned_edge=__magic_name__ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__magic_name__ ) lowerCAmelCase__ = [mem.copy() for i in range(4 )] lowerCAmelCase__ = VGroup(*__magic_name__ ).arrange(__magic_name__ , buff=0 ) lowerCAmelCase__ = Text("GPU" , font_size=24 ) lowerCAmelCase__ = Group(__magic_name__ , __magic_name__ ).arrange(__magic_name__ , buff=0.5 , aligned_edge=__magic_name__ ) gpu.move_to([-1, -1, 0] ) self.add(__magic_name__ ) lowerCAmelCase__ = [mem.copy() for i in range(6 )] lowerCAmelCase__ = VGroup(*__magic_name__ ).arrange(__magic_name__ , buff=0 ) lowerCAmelCase__ = Text("Model" , font_size=24 ) lowerCAmelCase__ = Group(__magic_name__ , __magic_name__ ).arrange(__magic_name__ , buff=0.5 , aligned_edge=__magic_name__ ) model.move_to([3, -1.0, 0] ) self.add(__magic_name__ ) lowerCAmelCase__ = [] for i, rect in enumerate(__magic_name__ ): rect.set_stroke(__magic_name__ ) # target = fill.copy().set_fill(YELLOW, opacity=0.7) # target.move_to(rect) # self.add(target) lowerCAmelCase__ = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__magic_name__ , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__magic_name__ ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(cpu_targs[0] , direction=__magic_name__ , buff=0.0 ) else: cpu_target.next_to(cpu_targs[i - 1] , direction=__magic_name__ , buff=0.0 ) self.add(__magic_name__ ) cpu_targs.append(__magic_name__ ) lowerCAmelCase__ = [mem.copy() for i in range(6 )] lowerCAmelCase__ = VGroup(*__magic_name__ ).arrange(__magic_name__ , buff=0 ) lowerCAmelCase__ = Text("Loaded Checkpoint" , font_size=24 ) lowerCAmelCase__ = Group(__magic_name__ , __magic_name__ ).arrange(__magic_name__ , aligned_edge=__magic_name__ , buff=0.4 ) checkpoint.move_to([3, 0.5, 0] ) lowerCAmelCase__ = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCAmelCase__ = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__magic_name__ , __magic_name__ ) lowerCAmelCase__ = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , ) blue_text.next_to(__magic_name__ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) lowerCAmelCase__ = MarkupText( f"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(__magic_name__ ) , Write(__magic_name__ ) ) self.play(Write(__magic_name__ , run_time=1 ) , Create(__magic_name__ , run_time=1 ) ) lowerCAmelCase__ = [] lowerCAmelCase__ = [] for i, rect in enumerate(__magic_name__ ): lowerCAmelCase__ = fill.copy().set_fill(__magic_name__ , opacity=0.7 ) target.move_to(__magic_name__ ) first_animations.append(GrowFromCenter(__magic_name__ , run_time=1 ) ) lowerCAmelCase__ = target.copy() cpu_target.generate_target() if i < 5: cpu_target.target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.target.move_to(cpu_right_col_base[i - 5] ) second_animations.append(MoveToTarget(__magic_name__ , run_time=1.5 ) ) self.play(*__magic_name__ ) self.play(*__magic_name__ ) self.wait()
48
from itertools import product from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros def _a ( lowerCamelCase, lowerCamelCase ): lowerCamelCase : List[str] = k_size // 2 lowerCamelCase , lowerCamelCase : Optional[int] = mgrid[0 - center : k_size - center, 0 - center : k_size - center] lowerCamelCase : Optional[Any] = 1 / (2 * pi * sigma) * exp(-(square(lowerCamelCase ) + square(lowerCamelCase )) / (2 * square(lowerCamelCase )) ) return g def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ): lowerCamelCase , lowerCamelCase : Union[str, Any] = image.shape[0], image.shape[1] # dst image height and width lowerCamelCase : Dict = height - k_size + 1 lowerCamelCase : str = width - k_size + 1 # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows lowerCamelCase : Tuple = zeros((dst_height * dst_width, k_size * k_size) ) lowerCamelCase : List[Any] = 0 for i, j in product(range(lowerCamelCase ), range(lowerCamelCase ) ): lowerCamelCase : Dict = ravel(image[i : i + k_size, j : j + k_size] ) lowerCamelCase : Union[str, Any] = window row += 1 # turn the kernel into shape(k*k, 1) lowerCamelCase : Dict = gen_gaussian_kernel(lowerCamelCase, lowerCamelCase ) lowerCamelCase : str = ravel(lowerCamelCase ) # reshape and get the dst image lowerCamelCase : List[str] = dot(lowerCamelCase, lowerCamelCase ).reshape(lowerCamelCase, lowerCamelCase ).astype(lowerCamelCase ) return dst if __name__ == "__main__": # read original image _lowerCamelCase =imread(R"""../image_data/lena.jpg""") # turn image in gray scale value _lowerCamelCase =cvtColor(img, COLOR_BGR2GRAY) # get values with two different mask size _lowerCamelCase =gaussian_filter(gray, 3, sigma=1) _lowerCamelCase =gaussian_filter(gray, 5, sigma=0.8) # show result images imshow("""gaussian filter with 3x3 mask""", gaussianaxa) imshow("""gaussian filter with 5x5 mask""", gaussianaxa) waitKey()
681
0
"""simple docstring""" import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowercase : Optional[Any] = logging.get_logger(__name__) _lowercase : List[str] = '▁' _lowercase : Optional[int] = { 'vocab_file': 'vocab.json', 'spm_file': 'sentencepiece.bpe.model', } _lowercase : str = { 'vocab_file': { 'facebook/s2t-small-librispeech-asr': ( 'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json' ), }, 'spm_file': { 'facebook/s2t-small-librispeech-asr': ( 'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model' ) }, } _lowercase : Optional[Any] = { 'facebook/s2t-small-librispeech-asr': 10_24, } _lowercase : Optional[Any] = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de'] _lowercase : Union[str, Any] = {'mustc': MUSTC_LANGS} class _UpperCAmelCase ( _lowerCAmelCase ): a__ : Optional[Any] = VOCAB_FILES_NAMES a__ : str = PRETRAINED_VOCAB_FILES_MAP a__ : Any = MAX_MODEL_INPUT_SIZES a__ : int = ["input_ids", "attention_mask"] a__ : List[int] = [] def __init__( self : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : List[str] , _lowercase : Optional[Any]="<s>" , _lowercase : str="</s>" , _lowercase : List[Any]="<pad>" , _lowercase : Tuple="<unk>" , _lowercase : Union[str, Any]=False , _lowercase : Union[str, Any]=False , _lowercase : List[Any]=None , _lowercase : int=None , _lowercase : Optional[Dict[str, Any]] = None , **_lowercase : List[Any] , ): __UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , do_upper_case=_lowercase , do_lower_case=_lowercase , tgt_lang=_lowercase , lang_codes=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , ) __UpperCAmelCase = do_upper_case __UpperCAmelCase = do_lower_case __UpperCAmelCase = load_json(_lowercase ) __UpperCAmelCase = {v: k for k, v in self.encoder.items()} __UpperCAmelCase = spm_file __UpperCAmelCase = load_spm(_lowercase , self.sp_model_kwargs ) if lang_codes is not None: __UpperCAmelCase = lang_codes __UpperCAmelCase = LANGUAGES[lang_codes] __UpperCAmelCase = [F'''<lang:{lang}>''' for lang in self.langs] __UpperCAmelCase = {lang: self.sp_model.PieceToId(F'''<lang:{lang}>''' ) for lang in self.langs} __UpperCAmelCase = self.lang_tokens __UpperCAmelCase = tgt_lang if tgt_lang is not None else self.langs[0] self.set_tgt_lang_special_tokens(self._tgt_lang ) else: __UpperCAmelCase = {} @property def a ( self : List[str] ): return len(self.encoder ) @property def a ( self : str ): return self._tgt_lang @tgt_lang.setter def a ( self : Optional[int] , _lowercase : Optional[int] ): __UpperCAmelCase = new_tgt_lang self.set_tgt_lang_special_tokens(_lowercase ) def a ( self : Any , _lowercase : str ): __UpperCAmelCase = self.lang_code_to_id[tgt_lang] __UpperCAmelCase = [lang_code_id] def a ( self : str , _lowercase : str ): return self.sp_model.encode(_lowercase , out_type=_lowercase ) def a ( self : List[Any] , _lowercase : Tuple ): return self.encoder.get(_lowercase , self.encoder[self.unk_token] ) def a ( self : List[Any] , _lowercase : int ): return self.decoder.get(_lowercase , self.unk_token ) def a ( self : Dict , _lowercase : List[str] ): __UpperCAmelCase = [] __UpperCAmelCase = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: __UpperCAmelCase = self.sp_model.decode(_lowercase ) out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " " __UpperCAmelCase = [] else: current_sub_tokens.append(_lowercase ) __UpperCAmelCase = self.sp_model.decode(_lowercase ) out_string += decoded.upper() if self.do_upper_case else decoded return out_string.strip() def a ( self : Tuple , _lowercase : Optional[Any] , _lowercase : Any=None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id] def a ( self : List[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase ) __UpperCAmelCase = [1] * len(self.prefix_tokens ) __UpperCAmelCase = [1] if token_ids_a is None: return prefix_ones + ([0] * len(_lowercase )) + suffix_ones return prefix_ones + ([0] * len(_lowercase )) + ([0] * len(_lowercase )) + suffix_ones def a ( self : Any ): __UpperCAmelCase = self.encoder.copy() vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Union[str, Any] ): __UpperCAmelCase = self.__dict__.copy() __UpperCAmelCase = None return state def __setstate__( self : Any , _lowercase : Dict ): __UpperCAmelCase = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __UpperCAmelCase = {} __UpperCAmelCase = load_spm(self.spm_file , self.sp_model_kwargs ) def a ( self : Dict , _lowercase : str , _lowercase : Optional[str] = None ): __UpperCAmelCase = Path(_lowercase ) assert save_dir.is_dir(), F'''{save_directory} should be a directory''' __UpperCAmelCase = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file'''] ) __UpperCAmelCase = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file'''] ) save_json(self.encoder , _lowercase ) if os.path.abspath(self.spm_file ) != os.path.abspath(_lowercase ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , _lowercase ) elif not os.path.isfile(self.spm_file ): with open(_lowercase , '''wb''' ) as fi: __UpperCAmelCase = self.sp_model.serialized_model_proto() fi.write(_lowercase ) return (str(_lowercase ), str(_lowercase )) def lowercase__ ( snake_case_ :str , snake_case_ :Dict[str, Any] ): __UpperCAmelCase = sentencepiece.SentencePieceProcessor(**snake_case_ ) spm.Load(str(snake_case_ ) ) return spm def lowercase__ ( snake_case_ :str ): with open(snake_case_ , '''r''' ) as f: return json.load(snake_case_ ) def lowercase__ ( snake_case_ :Dict , snake_case_ :str ): with open(snake_case_ , '''w''' ) as f: json.dump(snake_case_ , snake_case_ , indent=2 )
49
import pytest _lowerCamelCase ="""__dummy_dataset1__""" _lowerCamelCase =""" import json import os import datasets REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\" URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"} class __DummyDataset1__(datasets.GeneratorBasedBuilder): def _info(self): features = datasets.Features( { \"tokens\": datasets.Sequence(datasets.Value(\"string\")), \"ner_tags\": datasets.Sequence( datasets.features.ClassLabel( names=[ \"O\", \"B-PER\", \"I-PER\", \"B-ORG\", \"I-ORG\", \"B-LOC\", \"I-LOC\", ] ) ), \"langs\": datasets.Sequence(datasets.Value(\"string\")), \"spans\": datasets.Sequence(datasets.Value(\"string\")), } ) return datasets.DatasetInfo(features=features) def _split_generators(self, dl_manager): dl_path = dl_manager.download(URLS) return [ datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}), datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}), ] def _generate_examples(self, filepath): with open(filepath, \"r\", encoding=\"utf-8\") as f: for i, line in enumerate(f): yield i, json.loads(line) """ @pytest.fixture def _a ( ): return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def _a ( ): return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ): lowerCamelCase : Union[str, Any] = dataset_loading_script_name lowerCamelCase : Dict = tmp_path / """datasets""" / script_name script_dir.mkdir(parents=lowerCamelCase ) lowerCamelCase : str = script_dir / F'''{script_name}.py''' with open(lowerCamelCase, """w""" ) as f: f.write(lowerCamelCase ) return str(lowerCamelCase )
681
0
'''simple docstring''' from statistics import mean import numpy as np def A__ ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int ): lowerCamelCase__ = 0 # Number of processes finished lowerCamelCase__ = 0 # Displays the finished process. # If it is 0, the performance is completed if it is 1, before the performance. lowerCamelCase__ = [0] * no_of_process # List to include calculation results lowerCamelCase__ = [0] * no_of_process # Sort by arrival time. lowerCamelCase__ = [burst_time[i] for i in np.argsort(__lowerCAmelCase )] lowerCamelCase__ = [process_name[i] for i in np.argsort(__lowerCAmelCase )] arrival_time.sort() while no_of_process > finished_process_count: lowerCamelCase__ = 0 while finished_process[i] == 1: i += 1 if current_time < arrival_time[i]: lowerCamelCase__ = arrival_time[i] lowerCamelCase__ = 0 # Index showing the location of the process being performed lowerCamelCase__ = 0 # Saves the current response ratio. lowerCamelCase__ = 0 for i in range(0 , __lowerCAmelCase ): if finished_process[i] == 0 and arrival_time[i] <= current_time: lowerCamelCase__ = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[ i ] if response_ratio < temp: lowerCamelCase__ = temp lowerCamelCase__ = i # Calculate the turn around time lowerCamelCase__ = current_time + burst_time[loc] - arrival_time[loc] current_time += burst_time[loc] # Indicates that the process has been performed. lowerCamelCase__ = 1 # Increase finished_process_count by 1 finished_process_count += 1 return turn_around_time def A__ ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int ): lowerCamelCase__ = [0] * no_of_process for i in range(0 , __lowerCAmelCase ): lowerCamelCase__ = turn_around_time[i] - burst_time[i] return waiting_time if __name__ == "__main__": UpperCamelCase : Optional[int] = 5 UpperCamelCase : str = ['A', 'B', 'C', 'D', 'E'] UpperCamelCase : Optional[int] = [1, 2, 3, 4, 5] UpperCamelCase : Tuple = [1, 2, 3, 4, 5] UpperCamelCase : str = calculate_turn_around_time( process_name, arrival_time, burst_time, no_of_process ) UpperCamelCase : Dict = calculate_waiting_time( process_name, turn_around_time, burst_time, no_of_process ) print('Process name \tArrival time \tBurst time \tTurn around time \tWaiting time') for i in range(0, no_of_process): print( F'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t' F'{turn_around_time[i]}\t\t\t{waiting_time[i]}' ) print(F'average waiting time : {mean(waiting_time):.5f}') print(F'average turn around time : {mean(turn_around_time):.5f}')
50
import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""): _lowerCamelCase ={ """linear""": PIL.Image.Resampling.BILINEAR, """bilinear""": PIL.Image.Resampling.BILINEAR, """bicubic""": PIL.Image.Resampling.BICUBIC, """lanczos""": PIL.Image.Resampling.LANCZOS, """nearest""": PIL.Image.Resampling.NEAREST, } else: _lowerCamelCase ={ """linear""": PIL.Image.LINEAR, """bilinear""": PIL.Image.BILINEAR, """bicubic""": PIL.Image.BICUBIC, """lanczos""": PIL.Image.LANCZOS, """nearest""": PIL.Image.NEAREST, } def _a ( lowerCamelCase ): lowerCamelCase : Optional[Any] = (images / 2 + 0.5).clamp(0, 1 ) lowerCamelCase : Optional[Any] = images.cpu().permute(0, 2, 3, 1 ).float().numpy() lowerCamelCase : Any = numpy_to_pil(lowerCamelCase ) return images def _a ( lowerCamelCase ): if images.ndim == 3: lowerCamelCase : Optional[Any] = images[None, ...] lowerCamelCase : List[Any] = (images * 255).round().astype("""uint8""" ) if images.shape[-1] == 1: # special case for grayscale (single channel) images lowerCamelCase : Optional[int] = [Image.fromarray(image.squeeze(), mode="""L""" ) for image in images] else: lowerCamelCase : int = [Image.fromarray(lowerCamelCase ) for image in images] return pil_images
681
0
'''simple docstring''' from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class lowerCAmelCase__ : '''simple docstring''' _lowerCamelCase =42 _lowerCamelCase =None _lowerCamelCase =None a__ : str = namedtuple('CoinsDistribResult', 'moves excess') def __snake_case ( SCREAMING_SNAKE_CASE_ : TreeNode | None ) -> int: """simple docstring""" if root is None: return 0 # Validation def count_nodes(SCREAMING_SNAKE_CASE_ : TreeNode | None ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(SCREAMING_SNAKE_CASE_ : TreeNode | None ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(SCREAMING_SNAKE_CASE_ ) != count_coins(SCREAMING_SNAKE_CASE_ ): raise ValueError('''The nodes number should be same as the number of coins''' ) # Main calculation def get_distrib(SCREAMING_SNAKE_CASE_ : TreeNode | None ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) UpperCAmelCase, UpperCAmelCase = get_distrib(node.left ) UpperCAmelCase, UpperCAmelCase = get_distrib(node.right ) UpperCAmelCase = 1 - left_distrib_excess UpperCAmelCase = 1 - right_distrib_excess UpperCAmelCase = ( left_distrib_moves + right_distrib_moves + abs(SCREAMING_SNAKE_CASE_ ) + abs(SCREAMING_SNAKE_CASE_ ) ) UpperCAmelCase = node.data - coins_to_left - coins_to_right return CoinsDistribResult(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return get_distrib(SCREAMING_SNAKE_CASE_ )[0] if __name__ == "__main__": import doctest doctest.testmod()
51
from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class A__ ( nn.Module): def __init__( self , __magic_name__ = 1_6 , __magic_name__ = 8_8 , __magic_name__ = None , __magic_name__ = 1 , __magic_name__ = 0.0 , __magic_name__ = 3_2 , __magic_name__ = None , __magic_name__ = False , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "geglu" , __magic_name__ = None , ): super().__init__() lowerCamelCase : Any = nn.ModuleList( [ TransformeraDModel( num_attention_heads=__magic_name__ , attention_head_dim=__magic_name__ , in_channels=__magic_name__ , num_layers=__magic_name__ , dropout=__magic_name__ , norm_num_groups=__magic_name__ , cross_attention_dim=__magic_name__ , attention_bias=__magic_name__ , sample_size=__magic_name__ , num_vector_embeds=__magic_name__ , activation_fn=__magic_name__ , num_embeds_ada_norm=__magic_name__ , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference lowerCamelCase : Any = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` lowerCamelCase : List[Any] = [7_7, 2_5_7] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` lowerCamelCase : Optional[int] = [1, 0] def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__ = True , ): lowerCamelCase : List[Any] = hidden_states lowerCamelCase : Dict = [] lowerCamelCase : List[Any] = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens lowerCamelCase : Dict = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] lowerCamelCase : Optional[int] = self.transformer_index_for_condition[i] lowerCamelCase : List[Any] = self.transformers[transformer_index]( __magic_name__ , encoder_hidden_states=__magic_name__ , timestep=__magic_name__ , cross_attention_kwargs=__magic_name__ , return_dict=__magic_name__ , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] lowerCamelCase : Any = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) lowerCamelCase : Dict = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=__magic_name__ )
681
0
"""simple docstring""" import importlib.metadata from typing import Union from packaging.version import Version, parse from .constants import STR_OPERATION_TO_FUNC A = parse(importlib.metadata.version('''torch''')) def __A ( a_ :Union[str, Version] , a_ :str , a_ :str) -> Dict: if operation not in STR_OPERATION_TO_FUNC.keys(): raise ValueError(F"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys())}, received {operation}""") __a : int = STR_OPERATION_TO_FUNC[operation] if isinstance(a_ , a_): __a : List[str] = parse(importlib.metadata.version(a_)) return operation(a_ , parse(a_)) def __A ( a_ :str , a_ :str) -> int: return compare_versions(a_ , a_ , a_)
52
import unittest from transformers import BertGenerationTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _lowerCamelCase ="""▁""" _lowerCamelCase =get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase): _UpperCAmelCase : str = BertGenerationTokenizer _UpperCAmelCase : Tuple = False _UpperCAmelCase : List[Any] = True def UpperCamelCase__ ( self ): super().setUp() lowerCamelCase : int = BertGenerationTokenizer(__magic_name__ , keep_accents=__magic_name__ ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = """<s>""" lowerCamelCase : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<unk>""" ) self.assertEqual(vocab_keys[1] , """<s>""" ) self.assertEqual(vocab_keys[-1] , """<pad>""" ) self.assertEqual(len(__magic_name__ ) , 1_0_0_2 ) def UpperCamelCase__ ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = BertGenerationTokenizer(__magic_name__ , keep_accents=__magic_name__ ) lowerCamelCase : Optional[Any] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(__magic_name__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__magic_name__ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , ) lowerCamelCase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __magic_name__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) lowerCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__magic_name__ ) self.assertListEqual( __magic_name__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , ) lowerCamelCase : int = tokenizer.convert_ids_to_tokens(__magic_name__ ) self.assertListEqual( __magic_name__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) @cached_property def UpperCamelCase__ ( self ): return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" ) @slow def UpperCamelCase__ ( self ): lowerCamelCase : List[Any] = """Hello World!""" lowerCamelCase : Any = [1_8_5_3_6, 2_2_6_0, 1_0_1] self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) ) @slow def UpperCamelCase__ ( self ): lowerCamelCase : str = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth""" ) lowerCamelCase : str = [ 8_7_1, 4_1_9, 3_5_8, 9_4_6, 9_9_1, 2_5_2_1, 4_5_2, 3_5_8, 1_3_5_7, 3_8_7, 7_7_5_1, 3_5_3_6, 1_1_2, 9_8_5, 4_5_6, 1_2_6, 8_6_5, 9_3_8, 5_4_0_0, 5_7_3_4, 4_5_8, 1_3_6_8, 4_6_7, 7_8_6, 2_4_6_2, 5_2_4_6, 1_1_5_9, 6_3_3, 8_6_5, 4_5_1_9, 4_5_7, 5_8_2, 8_5_2, 2_5_5_7, 4_2_7, 9_1_6, 5_0_8, 4_0_5, 3_4_3_2_4, 4_9_7, 3_9_1, 4_0_8, 1_1_3_4_2, 1_2_4_4, 3_8_5, 1_0_0, 9_3_8, 9_8_5, 4_5_6, 5_7_4, 3_6_2, 1_2_5_9_7, 3_2_0_0, 3_1_2_9, 1_1_7_2, ] self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) ) @require_torch @slow def UpperCamelCase__ ( self ): import torch from transformers import BertGenerationConfig, BertGenerationEncoder # Build sequence lowerCamelCase : Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:1_0] lowerCamelCase : Dict = """ """.join(__magic_name__ ) lowerCamelCase : Any = self.big_tokenizer.encode_plus(__magic_name__ , return_tensors="""pt""" , return_token_type_ids=__magic_name__ ) lowerCamelCase : List[str] = self.big_tokenizer.batch_encode_plus( [sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=__magic_name__ ) lowerCamelCase : Tuple = BertGenerationConfig() lowerCamelCase : Optional[int] = BertGenerationEncoder(__magic_name__ ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**__magic_name__ ) model(**__magic_name__ ) @slow def UpperCamelCase__ ( self ): # fmt: off lowerCamelCase : Any = {"""input_ids""": [[3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4], [4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__magic_name__ , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
681
0
import inspect import math import tempfile import unittest import numpy as np from transformers import ViTMAEConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMAEForPreTraining, ViTMAEModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _UpperCAmelCase : """simple docstring""" def __init__( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int]=1_3 , lowerCAmelCase_ : Any=3_0 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Tuple=3_2 , lowerCAmelCase_ : Union[str, Any]=5 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : Tuple=3_7 , lowerCAmelCase_ : str="gelu" , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Any=1_0 , lowerCAmelCase_ : List[str]=0.02 , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : int=0.6 , lowerCAmelCase_ : Tuple=None , ) -> List[Any]: __lowerCAmelCase = parent __lowerCAmelCase = batch_size __lowerCAmelCase = image_size __lowerCAmelCase = patch_size __lowerCAmelCase = num_channels __lowerCAmelCase = is_training __lowerCAmelCase = use_labels __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_act __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = type_sequence_label_size __lowerCAmelCase = initializer_range __lowerCAmelCase = mask_ratio __lowerCAmelCase = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) __lowerCAmelCase = (image_size // patch_size) ** 2 __lowerCAmelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def lowercase ( self : Any ) -> Optional[Any]: __lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCAmelCase = None if self.use_labels: __lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCAmelCase = self.get_config() return config, pixel_values, labels def lowercase ( self : Any ) -> int: return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def lowercase ( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]: __lowerCAmelCase = ViTMAEModel(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() __lowerCAmelCase = model(lowerCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase ( self : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : int ) -> List[str]: __lowerCAmelCase = ViTMAEForPreTraining(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() __lowerCAmelCase = model(lowerCAmelCase_ ) __lowerCAmelCase = (self.image_size // self.patch_size) ** 2 __lowerCAmelCase = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images __lowerCAmelCase = 1 __lowerCAmelCase = ViTMAEForPreTraining(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() __lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowerCAmelCase = model(lowerCAmelCase_ ) __lowerCAmelCase = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def lowercase ( self : Tuple ) -> List[Any]: __lowerCAmelCase = self.prepare_config_and_inputs() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs __lowerCAmelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): """simple docstring""" a_ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else () a_ = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {} a_ = False a_ = False a_ = False a_ = False def lowercase ( self : str ) -> Optional[Any]: __lowerCAmelCase = ViTMAEModelTester(self ) __lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=3_7 ) def lowercase ( self : List[Any] ) -> List[str]: self.config_tester.run_common_tests() @unittest.skip(reason='ViTMAE does not use inputs_embeds' ) def lowercase ( self : List[str] ) -> Dict: pass def lowercase ( self : List[str] ) -> List[str]: __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase = model_class(lowerCAmelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __lowerCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) ) def lowercase ( self : int ) -> int: __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase = model_class(lowerCAmelCase_ ) __lowerCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCAmelCase = [*signature.parameters.keys()] __lowerCAmelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , lowerCAmelCase_ ) def lowercase ( self : Union[str, Any] ) -> Any: __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase_ ) def lowercase ( self : List[Any] ) -> int: __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase_ ) def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] ) -> List[Any]: # make masks reproducible np.random.seed(2 ) __lowerCAmelCase = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 ) __lowerCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) __lowerCAmelCase = torch.from_numpy(lowerCAmelCase_ ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument __lowerCAmelCase = pt_noise super().check_pt_tf_models(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def lowercase ( self : Tuple ) -> Union[str, Any]: __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase = model_class(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): __lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) ) __lowerCAmelCase = outputs[0].cpu().numpy() __lowerCAmelCase = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCAmelCase_ ) __lowerCAmelCase = model_class.from_pretrained(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): __lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) ) # Make sure we don't have nans __lowerCAmelCase = after_outputs[0].cpu().numpy() __lowerCAmelCase = 0 __lowerCAmelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCAmelCase_ , 1e-5 ) @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' ) def lowercase ( self : Dict ) -> Optional[Any]: pass @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' ) def lowercase ( self : str ) -> List[Any]: pass @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' ) def lowercase ( self : int ) -> List[str]: pass @unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' ) def lowercase ( self : List[str] ) -> Optional[Any]: pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowercase ( self : List[Any] ) -> Any: pass @slow def lowercase ( self : Dict ) -> List[Any]: for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase = ViTMAEModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) def a_ ( ): __lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def lowercase ( self : Tuple ) -> List[Any]: return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None @slow def lowercase ( self : Optional[int] ) -> Union[str, Any]: # make random mask reproducible across the PT and TF model np.random.seed(2 ) __lowerCAmelCase = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(lowerCAmelCase_ ) __lowerCAmelCase = self.default_image_processor __lowerCAmelCase = prepare_img() __lowerCAmelCase = image_processor(images=lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) __lowerCAmelCase = ViTMAEConfig() __lowerCAmelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) __lowerCAmelCase = np.random.uniform(size=(1, num_patches) ) # forward pass with torch.no_grad(): __lowerCAmelCase = model(**lowerCAmelCase_ , noise=torch.from_numpy(lowerCAmelCase_ ).to(device=lowerCAmelCase_ ) ) # verify the logits __lowerCAmelCase = torch.Size((1, 1_9_6, 7_6_8) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase_ ) __lowerCAmelCase = torch.tensor( [[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(lowerCAmelCase_ ) , atol=1e-4 ) )
53
from arguments import InitializationArguments from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser # Configuration _lowerCamelCase =HfArgumentParser(InitializationArguments) _lowerCamelCase =parser.parse_args() # Load codeparrot tokenizer trained for Python code tokenization _lowerCamelCase =AutoTokenizer.from_pretrained(args.tokenizer_name) # Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks _lowerCamelCase ={ """vocab_size""": len(tokenizer), """scale_attn_by_inverse_layer_idx""": True, """reorder_and_upcast_attn""": True, } # Load model config (GPT-2 large in this case) _lowerCamelCase =AutoConfig.from_pretrained(args.config_name, **config_kwargs) # Initialize new model with config _lowerCamelCase =AutoModelForCausalLM.from_config(config) # Save model to the hub model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
681
0
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __lowercase : List[Any] =logging.get_logger(__name__) __lowercase : Optional[Any] ={ """BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""", """BridgeTower/bridgetower-base-itm-mlm""": ( """https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json""" ), } class A ( __lowercase ): _snake_case ='''bridgetower_vision_model''' def __init__( self: Union[str, Any] , _lowerCAmelCase: Optional[int]=768 , _lowerCAmelCase: Tuple=12 , _lowerCAmelCase: Tuple=3 , _lowerCAmelCase: Optional[int]=16 , _lowerCAmelCase: List[Any]=288 , _lowerCAmelCase: int=1 , _lowerCAmelCase: List[Any]=1e-05 , _lowerCAmelCase: int=False , _lowerCAmelCase: List[str]=True , _lowerCAmelCase: List[str]=False , **_lowerCAmelCase: Tuple , ) -> Optional[int]: '''simple docstring''' super().__init__(**_lowerCAmelCase ) UpperCAmelCase_ =hidden_size UpperCAmelCase_ =num_hidden_layers UpperCAmelCase_ =num_channels UpperCAmelCase_ =patch_size UpperCAmelCase_ =image_size UpperCAmelCase_ =initializer_factor UpperCAmelCase_ =layer_norm_eps UpperCAmelCase_ =stop_gradient UpperCAmelCase_ =share_layernorm UpperCAmelCase_ =remove_last_layer @classmethod def lowerCAmelCase__ ( cls: str , _lowerCAmelCase: Union[str, os.PathLike] , **_lowerCAmelCase: Dict ) -> "PretrainedConfig": '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ =cls.get_config_dict(_lowerCAmelCase , **_lowerCAmelCase ) if config_dict.get("model_type" ) == "bridgetower": UpperCAmelCase_ =config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(_lowerCAmelCase , **_lowerCAmelCase ) class A ( __lowercase ): _snake_case ='''bridgetower_text_model''' def __init__( self: List[str] , _lowerCAmelCase: List[str]=5_0265 , _lowerCAmelCase: List[Any]=768 , _lowerCAmelCase: Optional[int]=12 , _lowerCAmelCase: Tuple=12 , _lowerCAmelCase: List[Any]=1 , _lowerCAmelCase: List[str]=3072 , _lowerCAmelCase: int="gelu" , _lowerCAmelCase: Optional[int]=0.1 , _lowerCAmelCase: str=0.1 , _lowerCAmelCase: int=514 , _lowerCAmelCase: Tuple=1 , _lowerCAmelCase: Optional[Any]=1e-05 , _lowerCAmelCase: str=1 , _lowerCAmelCase: Optional[int]=0 , _lowerCAmelCase: Dict=2 , _lowerCAmelCase: int="absolute" , _lowerCAmelCase: Tuple=True , **_lowerCAmelCase: Tuple , ) -> Optional[int]: '''simple docstring''' super().__init__(**_lowerCAmelCase ) UpperCAmelCase_ =vocab_size UpperCAmelCase_ =hidden_size UpperCAmelCase_ =num_hidden_layers UpperCAmelCase_ =num_attention_heads UpperCAmelCase_ =hidden_act UpperCAmelCase_ =initializer_factor UpperCAmelCase_ =intermediate_size UpperCAmelCase_ =hidden_dropout_prob UpperCAmelCase_ =attention_probs_dropout_prob UpperCAmelCase_ =max_position_embeddings UpperCAmelCase_ =type_vocab_size UpperCAmelCase_ =layer_norm_eps UpperCAmelCase_ =position_embedding_type UpperCAmelCase_ =use_cache UpperCAmelCase_ =pad_token_id UpperCAmelCase_ =bos_token_id UpperCAmelCase_ =eos_token_id @classmethod def lowerCAmelCase__ ( cls: Dict , _lowerCAmelCase: Union[str, os.PathLike] , **_lowerCAmelCase: str ) -> "PretrainedConfig": '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ =cls.get_config_dict(_lowerCAmelCase , **_lowerCAmelCase ) if config_dict.get("model_type" ) == "bridgetower": UpperCAmelCase_ =config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(_lowerCAmelCase , **_lowerCAmelCase ) class A ( __lowercase ): _snake_case ='''bridgetower''' def __init__( self: List[Any] , _lowerCAmelCase: List[Any]=True , _lowerCAmelCase: Optional[Any]="gelu" , _lowerCAmelCase: Tuple=768 , _lowerCAmelCase: Optional[Any]=1 , _lowerCAmelCase: Optional[int]=1e-05 , _lowerCAmelCase: List[Any]=False , _lowerCAmelCase: Union[str, Any]="add" , _lowerCAmelCase: int=12 , _lowerCAmelCase: Optional[Any]=6 , _lowerCAmelCase: List[Any]=False , _lowerCAmelCase: Optional[int]=False , _lowerCAmelCase: Optional[Any]=None , _lowerCAmelCase: Union[str, Any]=None , **_lowerCAmelCase: Dict , ) -> List[str]: '''simple docstring''' UpperCAmelCase_ =kwargs.pop("text_config_dict" , _lowerCAmelCase ) UpperCAmelCase_ =kwargs.pop("vision_config_dict" , _lowerCAmelCase ) super().__init__(**_lowerCAmelCase ) UpperCAmelCase_ =share_cross_modal_transformer_layers UpperCAmelCase_ =hidden_act UpperCAmelCase_ =hidden_size UpperCAmelCase_ =initializer_factor UpperCAmelCase_ =layer_norm_eps UpperCAmelCase_ =share_link_tower_layers UpperCAmelCase_ =link_tower_type UpperCAmelCase_ =num_attention_heads UpperCAmelCase_ =num_hidden_layers UpperCAmelCase_ =tie_word_embeddings UpperCAmelCase_ =init_layernorm_from_vision_encoder if text_config is None: UpperCAmelCase_ ={} logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." ) if vision_config is None: UpperCAmelCase_ ={} logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." ) UpperCAmelCase_ =BridgeTowerTextConfig(**_lowerCAmelCase ) UpperCAmelCase_ =BridgeTowerVisionConfig(**_lowerCAmelCase ) @classmethod def lowerCAmelCase__ ( cls: Optional[int] , _lowerCAmelCase: BridgeTowerTextConfig , _lowerCAmelCase: BridgeTowerVisionConfig , **_lowerCAmelCase: int ) -> List[Any]: '''simple docstring''' return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_lowerCAmelCase ) def lowerCAmelCase__ ( self: List[str] ) -> Any: '''simple docstring''' UpperCAmelCase_ =copy.deepcopy(self.__dict__ ) UpperCAmelCase_ =self.text_config.to_dict() UpperCAmelCase_ =self.vision_config.to_dict() UpperCAmelCase_ =self.__class__.model_type return output
54
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class A__ ( unittest.TestCase): def UpperCamelCase__ ( self , __magic_name__ ): for model_result in results.values(): for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ): lowerCamelCase : List[str] = model_result["""result"""][batch_size][sequence_length] self.assertIsNotNone(__magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = """sshleifer/tiny-gpt2""" lowerCamelCase : str = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__magic_name__ , multi_process=__magic_name__ , ) lowerCamelCase : Dict = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : Any = """sgugger/tiny-distilbert-classification""" lowerCamelCase : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , only_pretrain_model=__magic_name__ , ) lowerCamelCase : List[Any] = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2""" lowerCamelCase : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) lowerCamelCase : Any = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : List[Any] = """sshleifer/tiny-gpt2""" lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ ) lowerCamelCase : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__magic_name__ , multi_process=__magic_name__ , ) lowerCamelCase : Optional[Any] = TensorFlowBenchmark(__magic_name__ , [config] ) lowerCamelCase : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = """sshleifer/tiny-gpt2""" lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(__magic_name__ ) lowerCamelCase : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) lowerCamelCase : Union[str, Any] = TensorFlowBenchmark(__magic_name__ , [config] ) lowerCamelCase : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2""" lowerCamelCase : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) lowerCamelCase : int = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase__ ( self ): lowerCamelCase : int = """sshleifer/tiny-gpt2""" lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ ) lowerCamelCase : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) lowerCamelCase : Any = TensorFlowBenchmark(__magic_name__ , [config] ) lowerCamelCase : str = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase__ ( self ): lowerCamelCase : str = """patrickvonplaten/t5-tiny-random""" lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ ) lowerCamelCase : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) lowerCamelCase : List[Any] = TensorFlowBenchmark(__magic_name__ , configs=[config] ) lowerCamelCase : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[Any] = """sshleifer/tiny-gpt2""" lowerCamelCase : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__magic_name__ , multi_process=__magic_name__ , ) lowerCamelCase : int = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : str = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2""" with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase : List[str] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=__magic_name__ , save_to_csv=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__magic_name__ , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(__magic_name__ , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(__magic_name__ , """env.csv""" ) , multi_process=__magic_name__ , ) lowerCamelCase : List[str] = TensorFlowBenchmark(__magic_name__ ) benchmark.run() self.assertTrue(Path(os.path.join(__magic_name__ , """inf_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(__magic_name__ , """inf_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(__magic_name__ , """env.csv""" ) ).exists() ) def UpperCamelCase__ ( self ): lowerCamelCase : str = """sshleifer/tiny-gpt2""" def _check_summary_is_not_empty(__magic_name__ ): self.assertTrue(hasattr(__magic_name__ , """sequential""" ) ) self.assertTrue(hasattr(__magic_name__ , """cumulative""" ) ) self.assertTrue(hasattr(__magic_name__ , """current""" ) ) self.assertTrue(hasattr(__magic_name__ , """total""" ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__magic_name__ , """log.txt""" ) , log_print=__magic_name__ , trace_memory_line_by_line=__magic_name__ , eager_mode=__magic_name__ , multi_process=__magic_name__ , ) lowerCamelCase : Tuple = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : Union[str, Any] = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(__magic_name__ , """log.txt""" ) ).exists() )
681
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE :Tuple = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :Optional[int] = [ 'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST', 'ViTMSNModel', 'ViTMSNForImageClassification', 'ViTMSNPreTrainedModel', ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE :List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
55
import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def _a ( lowerCamelCase ): return x + 2 class A__ ( unittest.TestCase): def UpperCamelCase__ ( self ): lowerCamelCase : List[Any] = """x = 3""" lowerCamelCase : Tuple = {} lowerCamelCase : List[str] = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result == 3 self.assertDictEqual(__magic_name__ , {"""x""": 3} ) lowerCamelCase : Optional[int] = """x = y""" lowerCamelCase : Tuple = {"""y""": 5} lowerCamelCase : Tuple = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 5, """y""": 5} ) def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = """y = add_two(x)""" lowerCamelCase : List[Any] = {"""x""": 3} lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} ) # Won't work without the tool with CaptureStdout() as out: lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result is None assert "tried to execute add_two" in out.out def UpperCamelCase__ ( self ): lowerCamelCase : int = """x = 3""" lowerCamelCase : Dict = {} lowerCamelCase : Tuple = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result == 3 self.assertDictEqual(__magic_name__ , {"""x""": 3} ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[Any] = """test_dict = {'x': x, 'y': add_two(x)}""" lowerCamelCase : Optional[int] = {"""x""": 3} lowerCamelCase : Tuple = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} ) self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = """x = 3\ny = 5""" lowerCamelCase : Optional[int] = {} lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = """text = f'This is x: {x}.'""" lowerCamelCase : Optional[int] = {"""x""": 3} lowerCamelCase : Optional[int] = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(__magic_name__ , {"""x""": 3, """text""": """This is x: 3."""} ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = """if x <= 3:\n y = 2\nelse:\n y = 5""" lowerCamelCase : Tuple = {"""x""": 3} lowerCamelCase : int = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 2} ) lowerCamelCase : Tuple = {"""x""": 8} lowerCamelCase : Dict = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 8, """y""": 5} ) def UpperCamelCase__ ( self ): lowerCamelCase : Dict = """test_list = [x, add_two(x)]""" lowerCamelCase : List[Any] = {"""x""": 3} lowerCamelCase : List[str] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) self.assertListEqual(__magic_name__ , [3, 5] ) self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_list""": [3, 5]} ) def UpperCamelCase__ ( self ): lowerCamelCase : str = """y = x""" lowerCamelCase : List[Any] = {"""x""": 3} lowerCamelCase : Any = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result == 3 self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 3} ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = """test_list = [x, add_two(x)]\ntest_list[1]""" lowerCamelCase : Any = {"""x""": 3} lowerCamelCase : List[str] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_list""": [3, 5]} ) lowerCamelCase : Any = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']""" lowerCamelCase : Dict = {"""x""": 3} lowerCamelCase : Any = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def UpperCamelCase__ ( self ): lowerCamelCase : Union[str, Any] = """x = 0\nfor i in range(3):\n x = i""" lowerCamelCase : int = {} lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {"""range""": range} , state=__magic_name__ ) assert result == 2 self.assertDictEqual(__magic_name__ , {"""x""": 2, """i""": 2} )
681
0
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _a : Union[str, Any] = logging.get_logger(__name__) _a : Any = {"vocab_file": "sentencepiece.bpe.model"} _a : List[Any] = { "vocab_file": { "moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model", "moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model", "moussaKam/barthez-orangesum-title": ( "https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model" ), }, } _a : Union[str, Any] = { "moussaKam/mbarthez": 1_024, "moussaKam/barthez": 1_024, "moussaKam/barthez-orangesum-title": 1_024, } _a : Dict = "▁" class _lowercase ( __lowercase ): _SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE : List[str] = ["input_ids", "attention_mask"] def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int]="<s>" , SCREAMING_SNAKE_CASE_ : Optional[int]="</s>" , SCREAMING_SNAKE_CASE_ : List[Any]="</s>" , SCREAMING_SNAKE_CASE_ : Tuple="<s>" , SCREAMING_SNAKE_CASE_ : List[str]="<unk>" , SCREAMING_SNAKE_CASE_ : Any="<pad>" , SCREAMING_SNAKE_CASE_ : str="<mask>" , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ) -> None: # Mask token behave like a normal word, i.e. include the space before it __snake_case = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token __snake_case = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , ) __snake_case = vocab_file __snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) ) __snake_case = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} __snake_case = len(self.sp_model ) - 1 __snake_case = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __snake_case = [self.cls_token_id] __snake_case = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def a ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]: __snake_case = [self.sep_token_id] __snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def a ( self : List[str] ) -> Dict: return len(self.sp_model ) def a ( self : Optional[int] ) -> List[str]: __snake_case = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def a ( self : Dict , SCREAMING_SNAKE_CASE_ : str ) -> List[str]: return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ ) def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple ) -> List[str]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __snake_case = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ ) return spm_id if spm_id else self.unk_token_id def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> Any: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE_ ) def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[int]: __snake_case = [] __snake_case = '' __snake_case = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ ) + token __snake_case = True __snake_case = [] else: current_sub_tokens.append(SCREAMING_SNAKE_CASE_ ) __snake_case = False out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ ) return out_string.strip() def __getstate__( self : List[str] ) -> Tuple: __snake_case = self.__dict__.copy() __snake_case = None return state def __setstate__( self : str , SCREAMING_SNAKE_CASE_ : List[Any] ) -> str: __snake_case = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __snake_case = {} __snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __snake_case = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ ) elif not os.path.isfile(self.vocab_file ): with open(SCREAMING_SNAKE_CASE_ , 'wb' ) as fi: __snake_case = self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE_ ) return (out_vocab_file,)
56
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase =logging.get_logger(__name__) _lowerCamelCase ={ """edbeeching/decision-transformer-gym-hopper-medium""": ( """https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json""" ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class A__ ( __SCREAMING_SNAKE_CASE): _UpperCAmelCase : Optional[int] = """decision_transformer""" _UpperCAmelCase : str = ["""past_key_values"""] _UpperCAmelCase : Any = { """max_position_embeddings""": """n_positions""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , __magic_name__=1_7 , __magic_name__=4 , __magic_name__=1_2_8 , __magic_name__=4_0_9_6 , __magic_name__=True , __magic_name__=1 , __magic_name__=1_0_2_4 , __magic_name__=3 , __magic_name__=1 , __magic_name__=None , __magic_name__="relu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=True , __magic_name__=5_0_2_5_6 , __magic_name__=5_0_2_5_6 , __magic_name__=False , __magic_name__=False , **__magic_name__ , ): lowerCamelCase : Optional[int] = state_dim lowerCamelCase : int = act_dim lowerCamelCase : int = hidden_size lowerCamelCase : Union[str, Any] = max_ep_len lowerCamelCase : Optional[int] = action_tanh lowerCamelCase : Any = vocab_size lowerCamelCase : List[str] = n_positions lowerCamelCase : List[Any] = n_layer lowerCamelCase : Dict = n_head lowerCamelCase : Optional[Any] = n_inner lowerCamelCase : Tuple = activation_function lowerCamelCase : Tuple = resid_pdrop lowerCamelCase : str = embd_pdrop lowerCamelCase : Dict = attn_pdrop lowerCamelCase : Tuple = layer_norm_epsilon lowerCamelCase : Tuple = initializer_range lowerCamelCase : Tuple = scale_attn_weights lowerCamelCase : str = use_cache lowerCamelCase : List[Any] = scale_attn_by_inverse_layer_idx lowerCamelCase : List[str] = reorder_and_upcast_attn lowerCamelCase : Optional[Any] = bos_token_id lowerCamelCase : str = eos_token_id super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
681
0
import argparse from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird from transformers.utils import logging logging.set_verbosity_info() def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Any: # Initialise PyTorch model UpperCamelCase_: Union[str, Any] = BigBirdConfig.from_json_file(UpperCAmelCase__ ) print(F'''Building PyTorch model from configuration: {config}''' ) if is_trivia_qa: UpperCamelCase_: List[Any] = BigBirdForQuestionAnswering(UpperCAmelCase__ ) else: UpperCamelCase_: str = BigBirdForPreTraining(UpperCAmelCase__ ) # Load weights from tf checkpoint load_tf_weights_in_big_bird(UpperCAmelCase__ , UpperCAmelCase__ , is_trivia_qa=UpperCAmelCase__ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(UpperCAmelCase__ ) if __name__ == "__main__": A_ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--big_bird_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained BERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--is_trivia_qa', action='store_true', help='Whether to convert a model with a trivia_qa head.' ) A_ : List[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa )
57
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig _lowerCamelCase =logging.get_logger(__name__) class A__ : def __init__( self , __magic_name__ , __magic_name__ ): lowerCamelCase : Any = question_encoder lowerCamelCase : Dict = generator lowerCamelCase : Tuple = self.question_encoder def UpperCamelCase__ ( self , __magic_name__ ): if os.path.isfile(__magic_name__ ): raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) lowerCamelCase : Any = os.path.join(__magic_name__ , """question_encoder_tokenizer""" ) lowerCamelCase : str = os.path.join(__magic_name__ , """generator_tokenizer""" ) self.question_encoder.save_pretrained(__magic_name__ ) self.generator.save_pretrained(__magic_name__ ) @classmethod def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ): # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer lowerCamelCase : Any = kwargs.pop("""config""" , __magic_name__ ) if config is None: lowerCamelCase : Tuple = RagConfig.from_pretrained(__magic_name__ ) lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained( __magic_name__ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" ) lowerCamelCase : Any = AutoTokenizer.from_pretrained( __magic_name__ , config=config.generator , subfolder="""generator_tokenizer""" ) return cls(question_encoder=__magic_name__ , generator=__magic_name__ ) def __call__( self , *__magic_name__ , **__magic_name__ ): return self.current_tokenizer(*__magic_name__ , **__magic_name__ ) def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ): return self.generator.batch_decode(*__magic_name__ , **__magic_name__ ) def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ): return self.generator.decode(*__magic_name__ , **__magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : Union[str, Any] = self.question_encoder def UpperCamelCase__ ( self ): lowerCamelCase : str = self.generator def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "longest" , __magic_name__ = None , __magic_name__ = True , **__magic_name__ , ): warnings.warn( """`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """ """regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """ """context manager to prepare your targets. See the documentation of your specific tokenizer for more """ """details""" , __magic_name__ , ) if max_length is None: lowerCamelCase : int = self.current_tokenizer.model_max_length lowerCamelCase : int = self( __magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , max_length=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: lowerCamelCase : int = self.current_tokenizer.model_max_length lowerCamelCase : Dict = self( text_target=__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , padding=__magic_name__ , max_length=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , ) lowerCamelCase : List[Any] = labels["""input_ids"""] return model_inputs
681
0
"""simple docstring""" from __future__ import annotations __lowerCAmelCase : List[Any] = 10 def __lowerCAmelCase ( __UpperCamelCase : list[int] ): '''simple docstring''' snake_case_ : Optional[Any] = 1 snake_case_ : Any = max(__UpperCamelCase ) while placement <= max_digit: # declare and initialize empty buckets snake_case_ : list[list] = [[] for _ in range(__UpperCamelCase )] # split list_of_ints between the buckets for i in list_of_ints: snake_case_ : str = int((i / placement) % RADIX ) buckets[tmp].append(__UpperCamelCase ) # put each buckets' contents into list_of_ints snake_case_ : Optional[int] = 0 for b in range(__UpperCamelCase ): for i in buckets[b]: snake_case_ : str = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
58
import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def _a ( lowerCamelCase, lowerCamelCase ): lowerCamelCase : List[Any] = F'''{sampling_rate}''' lowerCamelCase : Optional[int] = """1""" lowerCamelCase : Any = """f32le""" lowerCamelCase : Any = [ """ffmpeg""", """-i""", """pipe:0""", """-ac""", ac, """-ar""", ar, """-f""", format_for_conversion, """-hide_banner""", """-loglevel""", """quiet""", """pipe:1""", ] try: with subprocess.Popen(lowerCamelCase, stdin=subprocess.PIPE, stdout=subprocess.PIPE ) as ffmpeg_process: lowerCamelCase : Optional[int] = ffmpeg_process.communicate(lowerCamelCase ) except FileNotFoundError as error: raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error lowerCamelCase : Union[str, Any] = output_stream[0] lowerCamelCase : Optional[Any] = np.frombuffer(lowerCamelCase, np.floataa ) if audio.shape[0] == 0: raise ValueError("""Malformed soundfile""" ) return audio def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase = "f32le", ): lowerCamelCase : Dict = F'''{sampling_rate}''' lowerCamelCase : List[Any] = """1""" if format_for_conversion == "s16le": lowerCamelCase : Any = 2 elif format_for_conversion == "f32le": lowerCamelCase : Dict = 4 else: raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) lowerCamelCase : Dict = platform.system() if system == "Linux": lowerCamelCase : Union[str, Any] = """alsa""" lowerCamelCase : List[Any] = """default""" elif system == "Darwin": lowerCamelCase : List[Any] = """avfoundation""" lowerCamelCase : List[Any] = """:0""" elif system == "Windows": lowerCamelCase : int = """dshow""" lowerCamelCase : Any = """default""" lowerCamelCase : Any = [ """ffmpeg""", """-f""", format_, """-i""", input_, """-ac""", ac, """-ar""", ar, """-f""", format_for_conversion, """-fflags""", """nobuffer""", """-hide_banner""", """-loglevel""", """quiet""", """pipe:1""", ] lowerCamelCase : List[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample lowerCamelCase : Any = _ffmpeg_stream(lowerCamelCase, lowerCamelCase ) for item in iterator: yield item def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = "f32le", ): if stream_chunk_s is not None: lowerCamelCase : int = stream_chunk_s else: lowerCamelCase : Dict = chunk_length_s lowerCamelCase : Optional[Any] = ffmpeg_microphone(lowerCamelCase, lowerCamelCase, format_for_conversion=lowerCamelCase ) if format_for_conversion == "s16le": lowerCamelCase : Optional[int] = np.intaa lowerCamelCase : Optional[Any] = 2 elif format_for_conversion == "f32le": lowerCamelCase : int = np.floataa lowerCamelCase : Any = 4 else: raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) if stride_length_s is None: lowerCamelCase : Any = chunk_length_s / 6 lowerCamelCase : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(lowerCamelCase, (int, float) ): lowerCamelCase : Optional[int] = [stride_length_s, stride_length_s] lowerCamelCase : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample lowerCamelCase : Optional[int] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample lowerCamelCase : List[Any] = datetime.datetime.now() lowerCamelCase : List[Any] = datetime.timedelta(seconds=lowerCamelCase ) for item in chunk_bytes_iter(lowerCamelCase, lowerCamelCase, stride=(stride_left, stride_right), stream=lowerCamelCase ): # Put everything back in numpy scale lowerCamelCase : Dict = np.frombuffer(item["""raw"""], dtype=lowerCamelCase ) lowerCamelCase : List[Any] = ( item["""stride"""][0] // size_of_sample, item["""stride"""][1] // size_of_sample, ) lowerCamelCase : Tuple = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = False ): lowerCamelCase : Optional[int] = B"""""" lowerCamelCase , lowerCamelCase : str = stride if stride_left + stride_right >= chunk_len: raise ValueError( F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' ) lowerCamelCase : str = 0 for raw in iterator: acc += raw if stream and len(lowerCamelCase ) < chunk_len: lowerCamelCase : Optional[int] = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(lowerCamelCase ) >= chunk_len: # We are flushing the accumulator lowerCamelCase : str = (_stride_left, stride_right) lowerCamelCase : Dict = {"""raw""": acc[:chunk_len], """stride""": stride} if stream: lowerCamelCase : Optional[int] = False yield item lowerCamelCase : str = stride_left lowerCamelCase : Tuple = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(lowerCamelCase ) > stride_left: lowerCamelCase : List[str] = {"""raw""": acc, """stride""": (_stride_left, 0)} if stream: lowerCamelCase : List[Any] = False yield item def _a ( lowerCamelCase, lowerCamelCase ): lowerCamelCase : Optional[int] = 2**24 # 16Mo try: with subprocess.Popen(lowerCamelCase, stdout=subprocess.PIPE, bufsize=lowerCamelCase ) as ffmpeg_process: while True: lowerCamelCase : Any = ffmpeg_process.stdout.read(lowerCamelCase ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
681
0
import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def SCREAMING_SNAKE_CASE_ (self : int) ->Dict: '''simple docstring''' lowerCamelCase__: Any =tempfile.mkdtemp() lowerCamelCase__: Any =8 # DPR tok lowerCamelCase__: Tuple =[ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] lowerCamelCase__: int =os.path.join(self.tmpdirname , "dpr_tokenizer") os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_) lowerCamelCase__: Any =os.path.join(UpperCAmelCase_ , DPR_VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) # BART tok lowerCamelCase__: str =[ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] lowerCamelCase__: Tuple =dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_)))) lowerCamelCase__: Tuple =["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] lowerCamelCase__: Any ={"unk_token": "<unk>"} lowerCamelCase__: str =os.path.join(self.tmpdirname , "bart_tokenizer") os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_) lowerCamelCase__: Dict =os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES["vocab_file"]) lowerCamelCase__: List[str] =os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file , "w" , encoding="utf-8") as fp: fp.write(json.dumps(UpperCAmelCase_) + "\n") with open(self.merges_file , "w" , encoding="utf-8") as fp: fp.write("\n".join(UpperCAmelCase_)) def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->DPRQuestionEncoderTokenizer: '''simple docstring''' return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer")) def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->DPRContextEncoderTokenizer: '''simple docstring''' return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer")) def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->BartTokenizer: '''simple docstring''' return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer")) def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->str: '''simple docstring''' shutil.rmtree(self.tmpdirname) def SCREAMING_SNAKE_CASE_ (self : int) ->Tuple: '''simple docstring''' lowerCamelCase__: Union[str, Any] =Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size), 2 * np.ones(self.retrieval_vector_size)], }) dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT) return dataset def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->int: '''simple docstring''' lowerCamelCase__: Union[str, Any] =self.get_dummy_dataset() lowerCamelCase__: int =RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset: lowerCamelCase__: int =dataset lowerCamelCase__: str =RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : bool) ->Union[str, Any]: '''simple docstring''' lowerCamelCase__: int =self.get_dummy_dataset() lowerCamelCase__: int =RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , ) if from_disk: lowerCamelCase__: Optional[Any] =os.path.join(self.tmpdirname , "dataset") lowerCamelCase__: Tuple =os.path.join(self.tmpdirname , "index.faiss") dataset.get_index("embeddings").save(os.path.join(self.tmpdirname , "index.faiss")) dataset.drop_index("embeddings") dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset")) del dataset lowerCamelCase__: Dict =RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: lowerCamelCase__: Tuple =RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , UpperCAmelCase_) , ) return retriever def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Union[str, Any]: '''simple docstring''' lowerCamelCase__: List[Any] =Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size + 1), 2 * np.ones(self.retrieval_vector_size + 1)], }) dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT) lowerCamelCase__: Dict =os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index") dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr") pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb")) lowerCamelCase__: Dict =os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl") lowerCamelCase__: str ={sample["id"]: [sample["text"], sample["title"]] for sample in dataset} pickle.dump(UpperCAmelCase_ , open(UpperCAmelCase_ , "wb")) lowerCamelCase__: Dict =RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , ) lowerCamelCase__: Any =RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer()) return retriever def SCREAMING_SNAKE_CASE_ (self : int) ->List[str]: '''simple docstring''' lowerCamelCase__: Optional[Any] =1 lowerCamelCase__: List[Any] =self.get_dummy_canonical_hf_index_retriever() lowerCamelCase__: Union[str, Any] =np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[int] =retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size)) self.assertEqual(len(UpperCAmelCase_) , 2) self.assertEqual(sorted(doc_dicts[0]) , ["embeddings", "id", "text", "title"]) self.assertEqual(len(doc_dicts[0]["id"]) , UpperCAmelCase_) self.assertEqual(doc_dicts[0]["id"][0] , "1") # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0") # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]]) def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Any: '''simple docstring''' lowerCamelCase__: str =self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset: lowerCamelCase__: List[Any] =self.get_dummy_dataset() retriever.save_pretrained(UpperCAmelCase_) lowerCamelCase__: List[str] =RagRetriever.from_pretrained(UpperCAmelCase_) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: Optional[Any] =np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa) lowerCamelCase__: Any =retriever.retrieve(UpperCAmelCase_ , n_docs=1) self.assertTrue(out is not None) def SCREAMING_SNAKE_CASE_ (self : Any) ->str: '''simple docstring''' lowerCamelCase__: Optional[int] =1 lowerCamelCase__: Optional[Any] =self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_) lowerCamelCase__: Any =np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: str =retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size)) self.assertEqual(len(UpperCAmelCase_) , 2) self.assertEqual(sorted(doc_dicts[0]) , ["embeddings", "id", "text", "title"]) self.assertEqual(len(doc_dicts[0]["id"]) , UpperCAmelCase_) self.assertEqual(doc_dicts[0]["id"][0] , "1") # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0") # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]]) def SCREAMING_SNAKE_CASE_ (self : str) ->List[Any]: '''simple docstring''' lowerCamelCase__: Tuple =self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_) lowerCamelCase__: List[Any] =RagRetriever.from_pretrained(UpperCAmelCase_) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: Tuple =np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa) lowerCamelCase__: Any =retriever.retrieve(UpperCAmelCase_ , n_docs=1) self.assertTrue(out is not None) def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any: '''simple docstring''' lowerCamelCase__: Tuple =1 lowerCamelCase__: Dict =self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_) lowerCamelCase__: str =np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[Any] =retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size)) self.assertEqual(len(UpperCAmelCase_) , 2) self.assertEqual(sorted(doc_dicts[0]) , ["embeddings", "id", "text", "title"]) self.assertEqual(len(doc_dicts[0]["id"]) , UpperCAmelCase_) self.assertEqual(doc_dicts[0]["id"][0] , "1") # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0") # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]]) def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[int]: '''simple docstring''' lowerCamelCase__: List[str] =self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_) lowerCamelCase__: Optional[Any] =RagRetriever.from_pretrained(UpperCAmelCase_) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: List[Any] =np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa) lowerCamelCase__: str =retriever.retrieve(UpperCAmelCase_ , n_docs=1) self.assertTrue(out is not None) def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]: '''simple docstring''' lowerCamelCase__: Any =1 lowerCamelCase__: Optional[int] =self.get_dummy_legacy_index_retriever() lowerCamelCase__: Any =np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[int] =retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size)) self.assertEqual(len(UpperCAmelCase_) , 2) self.assertEqual(sorted(doc_dicts[0]) , ["text", "title"]) self.assertEqual(len(doc_dicts[0]["text"]) , UpperCAmelCase_) self.assertEqual(doc_dicts[0]["text"][0] , "bar") # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["text"][0] , "foo") # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]]) def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[Any]: '''simple docstring''' lowerCamelCase__: str =self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_) lowerCamelCase__: Optional[int] =RagRetriever.from_pretrained(UpperCAmelCase_) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: int =np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa) lowerCamelCase__: Tuple =retriever.retrieve(UpperCAmelCase_ , n_docs=1) self.assertTrue(out is not None) @require_torch @require_tokenizers @require_sentencepiece def SCREAMING_SNAKE_CASE_ (self : Dict) ->Union[str, Any]: '''simple docstring''' import torch lowerCamelCase__: Union[str, Any] =1 lowerCamelCase__: Optional[Any] =self.get_dummy_canonical_hf_index_retriever() lowerCamelCase__: str =[[5, 7], [10, 11]] lowerCamelCase__: List[str] =np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa) lowerCamelCase__: Optional[int] =retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any =( out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size)) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_) self.assertIsInstance(UpperCAmelCase_ , np.ndarray) lowerCamelCase__: Optional[Any] =retriever( UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ , return_tensors="pt" , ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[Any] =( # noqa: F841 out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], out["doc_ids"], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size)) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor) @require_torch @require_tokenizers @require_sentencepiece def SCREAMING_SNAKE_CASE_ (self : Any) ->Any: '''simple docstring''' lowerCamelCase__: Any =self.get_dpr_ctx_encoder_tokenizer() lowerCamelCase__: Dict =1 lowerCamelCase__: Tuple =self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_) retriever.set_ctx_encoder_tokenizer(UpperCAmelCase_) lowerCamelCase__: List[Any] =[[5, 7], [10, 11]] lowerCamelCase__: Any =np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa) lowerCamelCase__: List[Any] =retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_) self.assertEqual( len(UpperCAmelCase_) , 6) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask")) , UpperCAmelCase_) # check for doc token related keys in dictionary.
59
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , ) @pytest.mark.usefixtures("""sm_env""") @parameterized_class( [ { """framework""": """pytorch""", """script""": """run_glue_model_parallelism.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, { """framework""": """pytorch""", """script""": """run_glue.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, ]) class A__ ( unittest.TestCase): def UpperCamelCase__ ( self ): if self.framework == "pytorch": subprocess.run( F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=__magic_name__ , ) assert hasattr(self , """env""" ) def UpperCamelCase__ ( self , __magic_name__ ): # configuration for running training on smdistributed Model Parallel lowerCamelCase : Any = { """enabled""": True, """processes_per_host""": 8, } lowerCamelCase : Any = { """enabled""": True, """parameters""": { """microbatches""": 4, """placement_strategy""": """spread""", """pipeline""": """interleaved""", """optimize""": """speed""", """partitions""": 4, """ddp""": True, }, } lowerCamelCase : Optional[Any] = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options} lowerCamelCase : Dict = """trainer""" if self.script == """run_glue.py""" else """smtrainer""" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=__magic_name__ , instance_type=self.instance_type , debugger_hook_config=__magic_name__ , hyperparameters={ **self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path, """max_steps""": 5_0_0, } , metric_definitions=self.env.metric_definitions , distribution=__magic_name__ , py_version="""py36""" , ) def UpperCamelCase__ ( self , __magic_name__ ): TrainingJobAnalytics(__magic_name__ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' ) @parameterized.expand([(1,)] ) def UpperCamelCase__ ( self , __magic_name__ ): # create estimator lowerCamelCase : int = self.create_estimator(__magic_name__ ) # run training estimator.fit() # result dataframe lowerCamelCase : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis lowerCamelCase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) lowerCamelCase : int = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping lowerCamelCase : int = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(F'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __magic_name__ )
681
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_ = { '''configuration_lxmert''': ['''LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LxmertConfig'''], '''tokenization_lxmert''': ['''LxmertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''LxmertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''LxmertEncoder''', '''LxmertForPreTraining''', '''LxmertForQuestionAnswering''', '''LxmertModel''', '''LxmertPreTrainedModel''', '''LxmertVisualFeatureEncoder''', '''LxmertXLayer''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLxmertForPreTraining''', '''TFLxmertMainLayer''', '''TFLxmertModel''', '''TFLxmertPreTrainedModel''', '''TFLxmertVisualFeatureEncoder''', ] if TYPE_CHECKING: from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig from .tokenization_lxmert import LxmertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_lxmert_fast import LxmertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lxmert import ( LxmertEncoder, LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel, LxmertPreTrainedModel, LxmertVisualFeatureEncoder, LxmertXLayer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_lxmert import ( TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFLxmertForPreTraining, TFLxmertMainLayer, TFLxmertModel, TFLxmertPreTrainedModel, TFLxmertVisualFeatureEncoder, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
60
from __future__ import annotations def _a ( lowerCamelCase ): lowerCamelCase : Union[str, Any] = str(lowerCamelCase ) return n == n[::-1] def _a ( lowerCamelCase = 100_0000 ): lowerCamelCase : Any = 0 for i in range(1, lowerCamelCase ): if is_palindrome(lowerCamelCase ) and is_palindrome(bin(lowerCamelCase ).split("""b""" )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
681
0
import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class __lowerCamelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : int = 32 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 255 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = [0.48_145_466, 0.4_578_275, 0.40_821_073] , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = [0.26_862_954, 0.26_130_258, 0.27_577_711] , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Dict=7 , SCREAMING_SNAKE_CASE__ : List[Any]=30 , SCREAMING_SNAKE_CASE__ : Optional[Any]=400 , SCREAMING_SNAKE_CASE__ : Any=3 , ) -> Tuple: lowerCAmelCase__ = parent lowerCAmelCase__ = do_resize lowerCAmelCase__ = size if size is not None else {"shortest_edge": 288} lowerCAmelCase__ = size_divisor lowerCAmelCase__ = do_rescale lowerCAmelCase__ = rescale_factor lowerCAmelCase__ = do_normalize lowerCAmelCase__ = do_center_crop lowerCAmelCase__ = image_mean lowerCAmelCase__ = image_std lowerCAmelCase__ = do_pad lowerCAmelCase__ = batch_size lowerCAmelCase__ = num_channels lowerCAmelCase__ = min_resolution lowerCAmelCase__ = max_resolution def a ( self : str ) -> Tuple: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any=False ) -> Optional[Any]: if not batched: lowerCAmelCase__ = self.size["shortest_edge"] lowerCAmelCase__ = image_inputs[0] if isinstance(SCREAMING_SNAKE_CASE__ , Image.Image ): lowerCAmelCase__ , lowerCAmelCase__ = image.size else: lowerCAmelCase__ , lowerCAmelCase__ = image.shape[1], image.shape[2] lowerCAmelCase__ = size / min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if h < w: lowerCAmelCase__ , lowerCAmelCase__ = size, scale * w else: lowerCAmelCase__ , lowerCAmelCase__ = scale * h, size lowerCAmelCase__ = int((1_333 / 800) * size ) if max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) > max_size: lowerCAmelCase__ = max_size / max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = newh * scale lowerCAmelCase__ = neww * scale lowerCAmelCase__ , lowerCAmelCase__ = int(newh + 0.5 ), int(neww + 0.5 ) lowerCAmelCase__ , lowerCAmelCase__ = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: lowerCAmelCase__ = [] for image in image_inputs: lowerCAmelCase__ , lowerCAmelCase__ = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowerCAmelCase__ = max(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : item[0] )[0] lowerCAmelCase__ = max(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __lowerCamelCase ( UpperCamelCase__ , unittest.TestCase ): """simple docstring""" snake_case__ = BridgeTowerImageProcessor if is_vision_available() else None def a ( self : Tuple ) -> int: lowerCAmelCase__ = BridgeTowerImageProcessingTester(self ) @property def a ( self : str ) -> Optional[int]: return self.image_processor_tester.prepare_image_processor_dict() def a ( self : List[Any] ) -> Dict: lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "image_mean" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "image_std" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_normalize" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_resize" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "size" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "size_divisor" ) ) def a ( self : Any ) -> Tuple: pass def a ( self : Tuple ) -> Any: # Initialize image processor lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image ) # Test not batched input lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCAmelCase__ = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).pixel_values lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def a ( self : Optional[Any] ) -> List[Any]: # Initialize image processor lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray ) # Test not batched input lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCAmelCase__ = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).pixel_values lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def a ( self : Optional[Any] ) -> Tuple: # Initialize image processor lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ) # Test not batched input lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCAmelCase__ = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).pixel_values lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
61
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def _a ( lowerCamelCase, lowerCamelCase=False ): lowerCamelCase : Dict = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("""module.cls_token""", """vit.embeddings.cls_token"""), ("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""module.pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""module.norm.weight""", """layernorm.weight"""), ("""module.norm.bias""", """layernorm.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" lowerCamelCase : Any = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase=False ): for i in range(config.num_hidden_layers ): if base_model: lowerCamelCase : Optional[Any] = """""" else: lowerCamelCase : Optional[int] = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCamelCase : Dict = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' ) lowerCamelCase : List[str] = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase : Union[str, Any] = in_proj_weight[ : config.hidden_size, : ] lowerCamelCase : Optional[int] = in_proj_bias[: config.hidden_size] lowerCamelCase : Optional[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase : List[str] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCamelCase : Union[str, Any] = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase : Any = in_proj_bias[-config.hidden_size :] def _a ( lowerCamelCase ): lowerCamelCase : Tuple = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(lowerCamelCase, lowerCamelCase ) def _a ( lowerCamelCase ): # projection head is used in the self-supervised pre-training in MSN, # for downstream task it's not needed. lowerCamelCase : Any = [ """module.fc.fc1.weight""", """module.fc.fc1.bias""", """module.fc.bn1.weight""", """module.fc.bn1.bias""", """module.fc.bn1.running_mean""", """module.fc.bn1.running_var""", """module.fc.bn1.num_batches_tracked""", """module.fc.fc2.weight""", """module.fc.fc2.bias""", """module.fc.bn2.weight""", """module.fc.bn2.bias""", """module.fc.bn2.running_mean""", """module.fc.bn2.running_var""", """module.fc.bn2.num_batches_tracked""", """module.fc.fc3.weight""", """module.fc.fc3.bias""", ] for k in ignore_keys: state_dict.pop(lowerCamelCase, lowerCamelCase ) def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ): lowerCamelCase : Dict = dct.pop(lowerCamelCase ) lowerCamelCase : str = val def _a ( lowerCamelCase, lowerCamelCase ): lowerCamelCase : Any = ViTMSNConfig() lowerCamelCase : Tuple = 1000 lowerCamelCase : List[Any] = """datasets/huggingface/label-files""" lowerCamelCase : Optional[Any] = """imagenet-1k-id2label.json""" lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase, lowerCamelCase ), """r""" ) ) lowerCamelCase : List[Any] = {int(lowerCamelCase ): v for k, v in idalabel.items()} lowerCamelCase : Optional[int] = idalabel lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: lowerCamelCase : int = 384 lowerCamelCase : Optional[int] = 1536 lowerCamelCase : Tuple = 6 elif "l16" in checkpoint_url: lowerCamelCase : Dict = 1024 lowerCamelCase : List[Any] = 4096 lowerCamelCase : Optional[int] = 24 lowerCamelCase : str = 16 lowerCamelCase : str = 0.1 elif "b4" in checkpoint_url: lowerCamelCase : Union[str, Any] = 4 elif "l7" in checkpoint_url: lowerCamelCase : Tuple = 7 lowerCamelCase : Optional[int] = 1024 lowerCamelCase : List[Any] = 4096 lowerCamelCase : Tuple = 24 lowerCamelCase : Dict = 16 lowerCamelCase : str = 0.1 lowerCamelCase : List[Any] = ViTMSNModel(lowerCamelCase ) lowerCamelCase : Dict = torch.hub.load_state_dict_from_url(lowerCamelCase, map_location="""cpu""" )["""target_encoder"""] lowerCamelCase : Any = ViTImageProcessor(size=config.image_size ) remove_projection_head(lowerCamelCase ) lowerCamelCase : Dict = create_rename_keys(lowerCamelCase, base_model=lowerCamelCase ) for src, dest in rename_keys: rename_key(lowerCamelCase, lowerCamelCase, lowerCamelCase ) read_in_q_k_v(lowerCamelCase, lowerCamelCase, base_model=lowerCamelCase ) model.load_state_dict(lowerCamelCase ) model.eval() lowerCamelCase : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase : Dict = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw ) lowerCamelCase : Union[str, Any] = ViTImageProcessor( size=config.image_size, image_mean=lowerCamelCase, image_std=lowerCamelCase ) lowerCamelCase : Tuple = image_processor(images=lowerCamelCase, return_tensors="""pt""" ) # forward pass torch.manual_seed(2 ) lowerCamelCase : int = model(**lowerCamelCase ) lowerCamelCase : Union[str, Any] = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: lowerCamelCase : Union[str, Any] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] ) elif "b16" in checkpoint_url: lowerCamelCase : Tuple = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] ) elif "l16" in checkpoint_url: lowerCamelCase : List[str] = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] ) elif "b4" in checkpoint_url: lowerCamelCase : Tuple = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] ) else: lowerCamelCase : List[str] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3], lowerCamelCase, atol=1e-4 ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCamelCase ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(lowerCamelCase ) if __name__ == "__main__": _lowerCamelCase =argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar""", type=str, help="""URL of the checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) _lowerCamelCase =parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
681
0
def lowerCamelCase__ ( lowercase = 600851475143 ): """simple docstring""" try: SCREAMING_SNAKE_CASE : Tuple = int(lowercase ) except (TypeError, ValueError): raise TypeError("Parameter n must be int or castable to int." ) if n <= 0: raise ValueError("Parameter n must be greater than or equal to one." ) SCREAMING_SNAKE_CASE : Optional[int] = 1 SCREAMING_SNAKE_CASE : Optional[int] = 2 while i * i <= n: while n % i == 0: SCREAMING_SNAKE_CASE : Tuple = i n //= i i += 1 if n > 1: SCREAMING_SNAKE_CASE : int = n return int(lowercase ) if __name__ == "__main__": print(F"""{solution() = }""")
62
def _a ( lowerCamelCase ): if num < 0: return False lowerCamelCase : int = num lowerCamelCase : int = 0 while num > 0: lowerCamelCase : str = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
681
0
from ...configuration_utils import PretrainedConfig from ...utils import logging a : Union[str, Any] = logging.get_logger(__name__) a : int = { "facebook/s2t-wav2vec2-large-en-de": ( "https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class a ( lowercase__ ): """simple docstring""" a : Dict = 'speech_to_text_2' a : Optional[int] = ['past_key_values'] a : Union[str, Any] = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : List[str] , __lowercase : Union[str, Any]=10000 , __lowercase : List[Any]=6 , __lowercase : Tuple=2048 , __lowercase : int=4 , __lowercase : Dict=0.0 , __lowercase : int=True , __lowercase : Optional[int]="relu" , __lowercase : Optional[int]=256 , __lowercase : int=0.1 , __lowercase : Optional[int]=0.0 , __lowercase : Optional[Any]=0.0 , __lowercase : Union[str, Any]=0.02 , __lowercase : Any=2 , __lowercase : List[Any]=True , __lowercase : Tuple=1 , __lowercase : str=0 , __lowercase : Tuple=2 , __lowercase : List[Any]=1024 , **__lowercase : Tuple , ) -> List[Any]: __UpperCAmelCase : Union[str, Any] = vocab_size __UpperCAmelCase : int = d_model __UpperCAmelCase : Any = decoder_ffn_dim __UpperCAmelCase : List[Any] = decoder_layers __UpperCAmelCase : Union[str, Any] = decoder_attention_heads __UpperCAmelCase : List[str] = dropout __UpperCAmelCase : List[Any] = attention_dropout __UpperCAmelCase : Tuple = activation_dropout __UpperCAmelCase : List[str] = activation_function __UpperCAmelCase : Any = init_std __UpperCAmelCase : str = decoder_layerdrop __UpperCAmelCase : Tuple = use_cache __UpperCAmelCase : Dict = decoder_layers __UpperCAmelCase : str = scale_embedding # scale factor will be sqrt(d_model) if True __UpperCAmelCase : str = max_target_positions super().__init__( pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , decoder_start_token_id=__lowercase , **__lowercase , )
63
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_torch_available from ...utils import OptionalDependencyNotAvailable _lowerCamelCase ={ """configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""], """tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase =[ """GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTNeoXJapaneseForCausalLM""", """GPTNeoXJapaneseLayer""", """GPTNeoXJapaneseModel""", """GPTNeoXJapanesePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox_japanese import ( GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseLayer, GPTNeoXJapaneseModel, GPTNeoXJapanesePreTrainedModel, ) else: import sys _lowerCamelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
681
0
def A__ ( snake_case_ : str , snake_case_ : str ): SCREAMING_SNAKE_CASE__: Optional[Any]= len(snake_case_ ) SCREAMING_SNAKE_CASE__: Optional[int]= [] for i in range(len(snake_case_ ) - pat_len + 1 ): SCREAMING_SNAKE_CASE__: List[str]= True for j in range(snake_case_ ): if s[i + j] != pattern[j]: SCREAMING_SNAKE_CASE__: List[Any]= False break if match_found: position.append(snake_case_ ) return position if __name__ == "__main__": assert naive_pattern_search('ABCDEFG', 'DE') == [3] print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
64
import copy import random from transformers import CLIPTokenizer class A__ ( __SCREAMING_SNAKE_CASE): def __init__( self , *__magic_name__ , **__magic_name__ ): super().__init__(*__magic_name__ , **__magic_name__ ) lowerCamelCase : Dict = {} def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , **__magic_name__ ): lowerCamelCase : Any = super().add_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ ) if num_added_tokens == 0: raise ValueError( F'''The tokenizer already contains the token {placeholder_token}. Please pass a different''' """ `placeholder_token` that is not already in the tokenizer.""" ) def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=1 , **__magic_name__ ): lowerCamelCase : List[Any] = [] if num_vec_per_token == 1: self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ ) output.append(__magic_name__ ) else: lowerCamelCase : Dict = [] for i in range(__magic_name__ ): lowerCamelCase : Optional[Any] = placeholder_token + F'''_{i}''' self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ ) output.append(__magic_name__ ) # handle cases where there is a new placeholder token that contains the current placeholder token but is larger for token in self.token_map: if token in placeholder_token: raise ValueError( F'''The tokenizer already has placeholder token {token} that can get confused with''' F''' {placeholder_token}keep placeholder tokens independent''' ) lowerCamelCase : Any = output def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=False , __magic_name__=1.0 ): if isinstance(__magic_name__ , __magic_name__ ): lowerCamelCase : List[str] = [] for i in range(len(__magic_name__ ) ): output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__magic_name__ ) ) return output for placeholder_token in self.token_map: if placeholder_token in text: lowerCamelCase : List[str] = self.token_map[placeholder_token] lowerCamelCase : Optional[Any] = tokens[: 1 + int(len(__magic_name__ ) * prop_tokens_to_load )] if vector_shuffle: lowerCamelCase : Union[str, Any] = copy.copy(__magic_name__ ) random.shuffle(__magic_name__ ) lowerCamelCase : str = text.replace(__magic_name__ , """ """.join(__magic_name__ ) ) return text def __call__( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ): return super().__call__( self.replace_placeholder_tokens_in_text( __magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , ) def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ): return super().encode( self.replace_placeholder_tokens_in_text( __magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , )
681
0
"""simple docstring""" import json import os import unittest from typing import Tuple from transformers import WavaVecaPhonemeCTCTokenizer from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput from transformers.testing_utils import require_phonemizer from ...test_tokenization_common import TokenizerTesterMixin @require_phonemizer class __lowercase ( __lowerCamelCase , unittest.TestCase ): snake_case_ = WavaVecaPhonemeCTCTokenizer snake_case_ = False def __lowercase ( self : List[str] ): '''simple docstring''' super().setUp() UpperCAmelCase__ : Tuple = ( """<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː """ """ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː """ """ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 """ """oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ """ """pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ """ """yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ """ """əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ """ """ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ """ """ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ """ """uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ """ """ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ """ """ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ """ """ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4""" ).split(""" """ ) UpperCAmelCase__ : Dict = dict(zip(A ,range(len(A ) ) ) ) UpperCAmelCase__ : List[Any] = {"""pad_token""": """<pad>""", """unk_token""": """<unk>""", """bos_token""": """<s>""", """eos_token""": """</s>"""} UpperCAmelCase__ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(A ) + """\n""" ) def __lowercase ( self : Optional[Any] ,A : Dict ,A : Any=False ,A : List[str]=20 ,A : Tuple=5 ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = [(i, tokenizer.decode([i] ,clean_up_tokenization_spaces=A )) for i in range(len(A ) )] UpperCAmelCase__ : Any = list(filter(lambda A : [t[0]] == tokenizer.encode(t[1] ,do_phonemize=A ) ,A ) ) if max_length is not None and len(A ) > max_length: UpperCAmelCase__ : Union[str, Any] = toks[:max_length] if min_length is not None and len(A ) < min_length and len(A ) > 0: while len(A ) < min_length: UpperCAmelCase__ : List[str] = toks + toks # toks_str = [t[1] for t in toks] UpperCAmelCase__ : Any = [t[0] for t in toks] # Ensure consistency UpperCAmelCase__ : Union[str, Any] = tokenizer.decode(A ,clean_up_tokenization_spaces=A ) if " " not in output_txt and len(A ) > 1: UpperCAmelCase__ : Optional[Any] = ( tokenizer.decode([toks_ids[0]] ,clean_up_tokenization_spaces=A ) + """ """ + tokenizer.decode(toks_ids[1:] ,clean_up_tokenization_spaces=A ) ) if with_prefix_space: UpperCAmelCase__ : Dict = """ """ + output_txt UpperCAmelCase__ : Any = tokenizer.encode(A ,add_special_tokens=A ) return output_txt, output_ids def __lowercase ( self : str ,**A : Dict ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname ,**A ) def __lowercase ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" ) # check adding a single token tokenizer.add_tokens("""xxx""" ) UpperCAmelCase__ : Optional[int] = tokenizer("""m xxx ɪ""" ,do_phonemize=A ).input_ids self.assertEqual(A ,[13, 392, 17] ) # xxx should be last token tokenizer.add_tokens(["""aaa""", """bbb""", """ccc"""] ) UpperCAmelCase__ : Optional[Any] = tokenizer("""m aaa ɪ ccc""" ,do_phonemize=A ).input_ids self.assertEqual(A ,[13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa UpperCAmelCase__ : Union[str, Any] = tokenizer("""maɪ c""" ,do_phonemize=A ).input_ids self.assertEqual(A ,[3, 200] ) # mai should be <unk> (=3) def __lowercase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" ) UpperCAmelCase__ : int = """Hello how are you""" UpperCAmelCase__ : Any = tokenizer.phonemize(A ,phonemizer_lang="""en-us""" ) self.assertEqual(A ,"""h ə l oʊ h aʊ ɑːɹ j uː""" ) def __lowercase ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" ) UpperCAmelCase__ : List[Any] = """Hello how are you""" UpperCAmelCase__ : Dict = tokenizer.phonemize(A ,phonemizer_lang="""en-us""" ) self.assertEqual(tokenizer(A ).input_ids ,tokenizer(A ,do_phonemize=A ).input_ids ) def __lowercase ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" ) UpperCAmelCase__ : Optional[int] = """Hello how are you""" UpperCAmelCase__ : Optional[int] = tokenizer.phonemize(A ,phonemizer_lang="""en-us""" ) UpperCAmelCase__ : List[str] = tokenizer.decode(tokenizer(A ).input_ids ) self.assertEqual(A ,A ) def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" ) UpperCAmelCase__ : List[str] = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98], [24, 22, 5, 24, 22, 5, 77], ] UpperCAmelCase__ : Optional[Any] = tokenizer.decode(sample_ids[0] ) UpperCAmelCase__ : str = tokenizer.batch_decode(A ) self.assertEqual(A ,batch_tokens[0] ) self.assertEqual(A ,["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] ) def __lowercase ( self : Any ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.tokenizer_class.from_pretrained( """facebook/wav2vec2-lv-60-espeak-cv-ft""" ,word_delimiter_token="""|""" ) tokenizer.add_tokens("""|""" ) UpperCAmelCase__ : Optional[Any] = """Hello how are you""" UpperCAmelCase__ : Optional[Any] = tokenizer.phonemize(A ,phonemizer_lang="""en-us""" ) self.assertEqual(A ,"""h ə l oʊ | h aʊ | ɑːɹ | j uː |""" ) def __lowercase ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.tokenizer_class.from_pretrained( """facebook/wav2vec2-lv-60-espeak-cv-ft""" ,word_delimiter_token="""|""" ) tokenizer.add_tokens("""|""" ) UpperCAmelCase__ : Tuple = """Hello how are you""" UpperCAmelCase__ : Tuple = tokenizer.phonemize(A ,phonemizer_lang="""en-us""" ) self.assertEqual(tokenizer(A ).input_ids ,tokenizer(A ,do_phonemize=A ).input_ids ) def __lowercase ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : int = self.tokenizer_class.from_pretrained( """facebook/wav2vec2-lv-60-espeak-cv-ft""" ,word_delimiter_token="""|""" ) tokenizer.add_tokens("""|""" ) # fmt: off UpperCAmelCase__ : Any = [ [11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98], [tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77], ] # fmt: on # decode with word_del_token filter UpperCAmelCase__ : List[Any] = tokenizer.decode(sample_ids[0] ) UpperCAmelCase__ : str = tokenizer.batch_decode(A ) self.assertEqual(A ,batch_tokens[0] ) self.assertEqual(A ,["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] ) # decode with no word_del_token filter UpperCAmelCase__ : List[str] = tokenizer.decode(sample_ids[0] ,filter_word_delimiter_token=A ) UpperCAmelCase__ : int = tokenizer.batch_decode(A ,filter_word_delimiter_token=A ) self.assertEqual(A ,batch_tokens[0] ) self.assertEqual(A ,["""k s ɾ | ɾ l | ɭʲ""", """| j ð | s j ð s oːɹ"""] ) def __lowercase ( self : str ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.tokenizer_class.from_pretrained( """facebook/wav2vec2-lv-60-espeak-cv-ft""" ,word_delimiter_token="""|""" ) tokenizer.add_tokens("""|""" ) UpperCAmelCase__ : List[str] = """Hello how are you""" UpperCAmelCase__ : Any = tokenizer.phonemize(A ,phonemizer_lang="""en-us""" ) UpperCAmelCase__ : str = tokenizer.decode(tokenizer(A ).input_ids ,filter_word_delimiter_token=A ) self.assertEqual(A ,A ) def __lowercase ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.tokenizer_class.from_pretrained( """facebook/wav2vec2-lv-60-espeak-cv-ft""" ,word_delimiter_token="""|""" ) tokenizer.add_tokens("""|""" ) UpperCAmelCase__ : Tuple = """Hello how are you""" UpperCAmelCase__ : str = tokenizer.phonemize(A ,phonemizer_lang="""en-us""" ) UpperCAmelCase__ : Optional[Any] = tokenizer.decode(tokenizer(A ).input_ids ,filter_word_delimiter_token=A ) self.assertEqual(""" """.join([p.strip() for p in phonemes.split(""" |""" )] ).strip() ,A ) def __lowercase ( self : Any ): '''simple docstring''' UpperCAmelCase__ : List[str] = self.tokenizer_class.from_pretrained( """facebook/wav2vec2-lv-60-espeak-cv-ft""" ,word_delimiter_token=A ) UpperCAmelCase__ : Optional[int] = """Hello how are you""" UpperCAmelCase__ : int = tokenizer(A ,phonemizer_lang="""en-us""" ).input_ids UpperCAmelCase__ : Optional[int] = tokenizer(A ,phonemizer_lang="""fr-fr""" ).input_ids self.assertNotEqual(A ,A ) UpperCAmelCase__ : List[Any] = tokenizer.decode(A ) UpperCAmelCase__ : Any = tokenizer.decode(A ) self.assertEqual(A ,"""h ə l oʊ h aʊ ɑːɹ j uː""" ) self.assertEqual(A ,"""ɛ l o h aʊ a ʁ j u""" ) def __lowercase ( self : str ): '''simple docstring''' UpperCAmelCase__ : List[str] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" ) UpperCAmelCase__ : Dict = """Hello how Are you""" UpperCAmelCase__ : List[str] = """hello how are you""" UpperCAmelCase__ : Tuple = tokenizer(A ).input_ids UpperCAmelCase__ : Any = tokenizer(A ).input_ids self.assertEqual(A ,A ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" ) tokenizer.add_tokens(["""!""", """?"""] ) tokenizer.add_special_tokens({"""cls_token""": """$$$"""} ) # fmt: off UpperCAmelCase__ : Union[str, Any] = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394], [24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394], ] # fmt: on UpperCAmelCase__ : Union[str, Any] = tokenizer.batch_decode(A ) self.assertEqual(A ,["""k s ɾ ɾ l ɭʲ!?!? $$$""", """j ð s j ð s oːɹ $$$"""] ) @staticmethod def __lowercase ( A : Tuple ,A : str ): '''simple docstring''' UpperCAmelCase__ : Any = [d[key] for d in offsets] return retrieved_list def __lowercase ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.get_tokenizer(word_delimiter_token="""|""" ) tokenizer.add_tokens("""|""" ) # fmt: off # ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ" UpperCAmelCase__ : Union[str, Any] = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98] # fmt: on UpperCAmelCase__ : Optional[Any] = tokenizer.decode(A ,output_char_offsets=A ,filter_word_delimiter_token=A ) # check Wav2Vec2CTCTokenizerOutput keys for char self.assertEqual(len(outputs.keys() ) ,2 ) self.assertTrue("""text""" in outputs ) self.assertTrue("""char_offsets""" in outputs ) self.assertTrue(isinstance(A ,A ) ) # check that order of chars is correct and identical for both outputs self.assertEqual(""" """.join(self.get_from_offsets(outputs["""char_offsets"""] ,"""char""" ) ) ,outputs.text ) self.assertListEqual( self.get_from_offsets(outputs["""char_offsets"""] ,"""char""" ) ,["""k""", """s""", """ɾ""", """ɾ""", """|""", """ɾ""", """l""", """|""", """ɭʲ"""] ) # check that offsets are actually correct for char # 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token, # 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98 self.assertListEqual( self.get_from_offsets(outputs["""char_offsets"""] ,"""start_offset""" ) ,[0, 1, 4, 7, 9, 11, 12, 15, 16] ) self.assertListEqual( self.get_from_offsets(outputs["""char_offsets"""] ,"""end_offset""" ) ,[1, 4, 6, 9, 10, 12, 15, 16, 17] ) def __lowercase ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.get_tokenizer(word_delimiter_token="""|""" ) def check_list_tuples_equal(A : Union[str, Any] ,A : List[str] ): self.assertTrue(isinstance(A ,A ) ) self.assertTrue(isinstance(outputs_list[0] ,A ) ) # transform list to ModelOutput UpperCAmelCase__ : int = WavaVecaPhonemeCTCTokenizerOutput( {k: [d[k] for d in outputs_list] for k in outputs_list[0]} ) self.assertListEqual(outputs_batch["""text"""] ,outputs_batch_a["""text"""] ) def recursive_check(A : int ,A : Union[str, Any] ): if isinstance(A ,A ): [recursive_check(A ,A ) for la, la in zip(A ,A )] self.assertEqual(A ,A ) if "char_offsets" in outputs_batch: recursive_check(outputs_batch["""char_offsets"""] ,outputs_batch_a["""char_offsets"""] ) # fmt: off UpperCAmelCase__ : List[Any] = [ [11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34], [24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34], ] # fmt: on # We assume that `decode` works as expected. All we will check now is # the output type is correct and the output is identical to `decode` # char UpperCAmelCase__ : int = tokenizer.batch_decode(A ,output_char_offsets=A ) UpperCAmelCase__ : int = [tokenizer.decode(A ,output_char_offsets=A ) for ids in sample_ids] check_list_tuples_equal(A ,A ) @unittest.skip("""Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes""" ) def __lowercase ( self : List[str] ): '''simple docstring''' pass @unittest.skip("""Wav2Vec2PhonemeTokenizer always puts spaces between phonemes""" ) def __lowercase ( self : List[str] ): '''simple docstring''' pass @unittest.skip("""encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency""" ) def __lowercase ( self : List[Any] ): '''simple docstring''' pass @unittest.skip("""Wav2Vec2PhonemeModel has no max model length => no testing""" ) def __lowercase ( self : Tuple ): '''simple docstring''' pass def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : int = self.get_tokenizers(do_lower_case=A ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): UpperCAmelCase__ : Tuple = tokenizer.vocab_size UpperCAmelCase__ : List[Any] = len(A ) self.assertNotEqual(A ,0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) UpperCAmelCase__ : Dict = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""] UpperCAmelCase__ : Optional[int] = tokenizer.add_tokens(A ) UpperCAmelCase__ : int = tokenizer.vocab_size UpperCAmelCase__ : Dict = len(A ) self.assertNotEqual(A ,0 ) self.assertEqual(A ,A ) self.assertEqual(A ,len(A ) ) self.assertEqual(A ,all_size + len(A ) ) UpperCAmelCase__ : List[Any] = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" ,add_special_tokens=A ) self.assertGreaterEqual(len(A ) ,4 ) self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 ) UpperCAmelCase__ : List[Any] = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""} UpperCAmelCase__ : List[str] = tokenizer.add_special_tokens(A ) UpperCAmelCase__ : Optional[int] = tokenizer.vocab_size UpperCAmelCase__ : Union[str, Any] = len(A ) self.assertNotEqual(A ,0 ) self.assertEqual(A ,A ) self.assertEqual(A ,len(A ) ) self.assertEqual(A ,all_size_a + len(A ) ) UpperCAmelCase__ : Union[str, Any] = tokenizer.encode( """>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" ,add_special_tokens=A ) self.assertGreaterEqual(len(A ) ,6 ) self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] ,tokens[1] ) self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] ,tokens[-4] ) self.assertEqual(tokens[0] ,tokenizer.eos_token_id ) self.assertEqual(tokens[-3] ,tokenizer.pad_token_id ) @unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" ) def __lowercase ( self : Tuple ): '''simple docstring''' pass @unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" ) def __lowercase ( self : Optional[int] ): '''simple docstring''' pass def __lowercase ( self : int ): '''simple docstring''' # The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which # is not the case for Wav2Vec2PhonemeCTCTokenizer. UpperCAmelCase__ : List[str] = self.get_tokenizers(fast=A ,do_lower_case=A ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): UpperCAmelCase__ : str = ["""ð""", """ɪ""", """s""", """ɪ""", """z""", """ɐ""", """t""", """ɛ""", """k""", """s""", """t"""] UpperCAmelCase__ : int = tokenizer.convert_tokens_to_string(A ) self.assertIsInstance(output["""text"""] ,A )
65
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class A__ ( unittest.TestCase): def __init__( self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=1_8 , __magic_name__=3_0 , __magic_name__=4_0_0 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=[0.48_145_466, 0.4_578_275, 0.40_821_073] , __magic_name__=[0.26_862_954, 0.26_130_258, 0.27_577_711] , __magic_name__=True , ): lowerCamelCase : Union[str, Any] = size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4} lowerCamelCase : str = crop_size if crop_size is not None else {"""height""": 1_8, """width""": 1_8} lowerCamelCase : Optional[int] = parent lowerCamelCase : Union[str, Any] = batch_size lowerCamelCase : str = num_channels lowerCamelCase : Any = image_size lowerCamelCase : Optional[int] = min_resolution lowerCamelCase : Union[str, Any] = max_resolution lowerCamelCase : Union[str, Any] = do_resize lowerCamelCase : int = size lowerCamelCase : int = do_center_crop lowerCamelCase : Union[str, Any] = crop_size lowerCamelCase : Union[str, Any] = do_normalize lowerCamelCase : Dict = image_mean lowerCamelCase : Optional[Any] = image_std lowerCamelCase : Union[str, Any] = do_convert_rgb def UpperCamelCase__ ( self ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def UpperCamelCase__ ( self , __magic_name__=False , __magic_name__=False , __magic_name__=False ): assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: lowerCamelCase : Tuple = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) ) else: lowerCamelCase : Dict = [] for i in range(self.batch_size ): lowerCamelCase , lowerCamelCase : int = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 ) image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension lowerCamelCase : int = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs] if torchify: lowerCamelCase : int = [torch.from_numpy(__magic_name__ ) for x in image_inputs] return image_inputs @require_torch @require_vision class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase): _UpperCAmelCase : Any = ChineseCLIPImageProcessor if is_vision_available() else None def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = ChineseCLIPImageProcessingTester(self , do_center_crop=__magic_name__ ) @property def UpperCamelCase__ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) ) self.assertTrue(hasattr(__magic_name__ , """size""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_center_crop""" ) ) self.assertTrue(hasattr(__magic_name__ , """center_crop""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) ) self.assertTrue(hasattr(__magic_name__ , """image_mean""" ) ) self.assertTrue(hasattr(__magic_name__ , """image_std""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_convert_rgb""" ) ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 2_2_4, """width""": 2_2_4} ) self.assertEqual(image_processor.crop_size , {"""height""": 1_8, """width""": 1_8} ) lowerCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} ) self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): # Initialize image_processing lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , Image.Image ) # Test not batched input lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCamelCase : Optional[Any] = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def UpperCamelCase__ ( self ): # Initialize image_processing lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ , numpify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , np.ndarray ) # Test not batched input lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCamelCase : Tuple = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def UpperCamelCase__ ( self ): # Initialize image_processing lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase : Any = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ , torchify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , torch.Tensor ) # Test not batched input lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCamelCase : str = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) @require_torch @require_vision class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase): _UpperCAmelCase : Tuple = ChineseCLIPImageProcessor if is_vision_available() else None def UpperCamelCase__ ( self ): lowerCamelCase : Union[str, Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__magic_name__ ) lowerCamelCase : Any = 3 @property def UpperCamelCase__ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase__ ( self ): lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) ) self.assertTrue(hasattr(__magic_name__ , """size""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_center_crop""" ) ) self.assertTrue(hasattr(__magic_name__ , """center_crop""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) ) self.assertTrue(hasattr(__magic_name__ , """image_mean""" ) ) self.assertTrue(hasattr(__magic_name__ , """image_std""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_convert_rgb""" ) ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): # Initialize image_processing lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , Image.Image ) # Test not batched input lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCamelCase : Optional[Any] = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
681
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase = { "configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"], "tokenization_m2m_100": ["M2M100Tokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST", "M2M100ForConditionalGeneration", "M2M100Model", "M2M100PreTrainedModel", ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
66
from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A__ : def __init__( self , __magic_name__ , __magic_name__=3 , __magic_name__=3_2 , __magic_name__=3 , __magic_name__=1_0 , __magic_name__=[1_0, 2_0, 3_0, 4_0] , __magic_name__=[1, 1, 2, 1] , __magic_name__=True , __magic_name__=True , __magic_name__="relu" , __magic_name__=3 , __magic_name__=None , ): lowerCamelCase : Tuple = parent lowerCamelCase : Tuple = batch_size lowerCamelCase : List[Any] = image_size lowerCamelCase : Optional[Any] = num_channels lowerCamelCase : Dict = embeddings_size lowerCamelCase : Optional[int] = hidden_sizes lowerCamelCase : Union[str, Any] = depths lowerCamelCase : Optional[Any] = is_training lowerCamelCase : Union[str, Any] = use_labels lowerCamelCase : Dict = hidden_act lowerCamelCase : Any = num_labels lowerCamelCase : int = scope lowerCamelCase : Optional[Any] = len(__magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase : Tuple = None if self.use_labels: lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) lowerCamelCase : Tuple = self.get_config() return config, pixel_values, labels def UpperCamelCase__ ( self ): return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ): lowerCamelCase : Dict = TFResNetModel(config=__magic_name__ ) lowerCamelCase : Tuple = model(__magic_name__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ): lowerCamelCase : str = self.num_labels lowerCamelCase : Dict = TFResNetForImageClassification(__magic_name__ ) lowerCamelCase : Union[str, Any] = model(__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = self.prepare_config_and_inputs() lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = config_and_inputs lowerCamelCase : List[str] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase): _UpperCAmelCase : Any = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () _UpperCAmelCase : List[str] = ( {"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification} if is_tf_available() else {} ) _UpperCAmelCase : Optional[Any] = False _UpperCAmelCase : Optional[Any] = False _UpperCAmelCase : Dict = False _UpperCAmelCase : List[Any] = False _UpperCAmelCase : Any = False def UpperCamelCase__ ( self ): lowerCamelCase : int = TFResNetModelTester(self ) lowerCamelCase : str = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ ) def UpperCamelCase__ ( self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase__ ( self ): return @unittest.skip(reason="""ResNet does not use inputs_embeds""" ) def UpperCamelCase__ ( self ): pass @unittest.skip(reason="""ResNet does not support input and output embeddings""" ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase : List[str] = model_class(__magic_name__ ) lowerCamelCase : str = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase : Tuple = [*signature.parameters.keys()] lowerCamelCase : List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCamelCase__ ( self ): def check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ): lowerCamelCase : Any = model_class(__magic_name__ ) lowerCamelCase : List[Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) lowerCamelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase : Union[str, Any] = self.model_tester.num_stages self.assertEqual(len(__magic_name__ ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : Tuple = ["""basic""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: lowerCamelCase : Union[str, Any] = layer_type lowerCamelCase : str = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase : int = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__magic_name__ ) @slow def UpperCamelCase__ ( self ): for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase : Any = TFResNetModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def _a ( ): lowerCamelCase : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class A__ ( unittest.TestCase): @cached_property def UpperCamelCase__ ( self ): return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) lowerCamelCase : List[str] = self.default_image_processor lowerCamelCase : str = prepare_img() lowerCamelCase : Tuple = image_processor(images=__magic_name__ , return_tensors="""tf""" ) # forward pass lowerCamelCase : Tuple = model(**__magic_name__ ) # verify the logits lowerCamelCase : Optional[Any] = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , __magic_name__ ) lowerCamelCase : Optional[Any] = tf.constant([-11.1_069, -9.7_877, -8.3_777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __magic_name__ , atol=1e-4 ) )
681
0
from collections import OrderedDict from typing import Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import FeatureExtractionMixin from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType, logging snake_case = logging.get_logger(__name__) snake_case = { """deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""", # See all Perceiver models at https://huggingface.co/models?filter=perceiver } class A_ ( UpperCAmelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str = '''perceiver''' def __init__( self : Optional[Any] ,__A : int=256 ,__A : List[Any]=1280 ,__A : List[Any]=768 ,__A : List[str]=1 ,__A : Dict=26 ,__A : Union[str, Any]=8 ,__A : Union[str, Any]=8 ,__A : List[Any]=None ,__A : str=None ,__A : str="kv" ,__A : Union[str, Any]=1 ,__A : Any=1 ,__A : Any="gelu" ,__A : Optional[Any]=0.1 ,__A : Dict=0.02 ,__A : Union[str, Any]=1e-12 ,__A : List[Any]=True ,__A : List[Any]=262 ,__A : Any=2048 ,__A : str=56 ,__A : Tuple=[368, 496] ,__A : Union[str, Any]=16 ,__A : List[str]=1920 ,__A : List[Any]=16 ,__A : str=[1, 16, 224, 224] ,**__A : Tuple ,) -> int: super().__init__(**__A ) _lowercase = num_latents _lowercase = d_latents _lowercase = d_model _lowercase = num_blocks _lowercase = num_self_attends_per_block _lowercase = num_self_attention_heads _lowercase = num_cross_attention_heads _lowercase = qk_channels _lowercase = v_channels _lowercase = cross_attention_shape_for_attention _lowercase = self_attention_widening_factor _lowercase = cross_attention_widening_factor _lowercase = hidden_act _lowercase = attention_probs_dropout_prob _lowercase = initializer_range _lowercase = layer_norm_eps _lowercase = use_query_residual # masked language modeling attributes _lowercase = vocab_size _lowercase = max_position_embeddings # image classification attributes _lowercase = image_size # flow attributes _lowercase = train_size # multimodal autoencoding attributes _lowercase = num_frames _lowercase = audio_samples_per_frame _lowercase = samples_per_patch _lowercase = output_shape class A_ ( UpperCAmelCase ): """simple docstring""" @property def __UpperCAmelCase ( self : str ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _lowercase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _lowercase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('inputs', dynamic_axis), ('attention_mask', dynamic_axis), ] ) @property def __UpperCAmelCase ( self : int ) -> float: return 1e-4 def __UpperCAmelCase ( self : Union[str, Any] ,__A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,__A : int = -1 ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,__A : int = 3 ,__A : int = 40 ,__A : int = 40 ,) -> Mapping[str, Any]: # copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified if isinstance(__A ,__A ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _lowercase = compute_effective_axis_dimension( __A ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _lowercase = preprocessor.num_special_tokens_to_add(__A ) _lowercase = compute_effective_axis_dimension( __A ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=__A ) # Generate dummy inputs according to compute batch and sequence _lowercase = [' '.join(['a'] ) * seq_length] * batch_size _lowercase = dict(preprocessor(__A ,return_tensors=__A ) ) _lowercase = inputs.pop('input_ids' ) return inputs elif isinstance(__A ,__A ) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _lowercase = compute_effective_axis_dimension(__A ,fixed_dimension=OnnxConfig.default_fixed_batch ) _lowercase = self._generate_dummy_images(__A ,__A ,__A ,__A ) _lowercase = dict(preprocessor(images=__A ,return_tensors=__A ) ) _lowercase = inputs.pop('pixel_values' ) return inputs else: raise ValueError( 'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
67
import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ): # Initialise PyTorch model lowerCamelCase : str = MobileBertConfig.from_json_file(lowerCamelCase ) print(F'''Building PyTorch model from configuration: {config}''' ) lowerCamelCase : Tuple = MobileBertForPreTraining(lowerCamelCase ) # Load weights from tf checkpoint lowerCamelCase : Tuple = load_tf_weights_in_mobilebert(lowerCamelCase, lowerCamelCase, lowerCamelCase ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict(), lowerCamelCase ) if __name__ == "__main__": _lowerCamelCase =argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--mobilebert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained MobileBERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) _lowerCamelCase =parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
681
0
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A = { "configuration_informer": [ "INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "InformerConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "InformerForPrediction", "InformerModel", "InformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
68
import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def _a ( lowerCamelCase ): # vision encoder if "img_encoder.pos_embed" in name: lowerCamelCase : Tuple = name.replace("""img_encoder.pos_embed""", """vision_model.embeddings.position_embeddings""" ) if "img_encoder.patch_embed.proj" in name: lowerCamelCase : Union[str, Any] = name.replace("""img_encoder.patch_embed.proj""", """vision_model.embeddings.patch_embeddings.projection""" ) if "img_encoder.patch_embed.norm" in name: lowerCamelCase : Optional[int] = name.replace("""img_encoder.patch_embed.norm""", """vision_model.embeddings.layernorm""" ) if "img_encoder.layers" in name: lowerCamelCase : List[str] = name.replace("""img_encoder.layers""", """vision_model.encoder.stages""" ) if "blocks" in name and "res" not in name: lowerCamelCase : List[Any] = name.replace("""blocks""", """layers""" ) if "attn" in name and "pre_assign" not in name: lowerCamelCase : Optional[int] = name.replace("""attn""", """self_attn""" ) if "proj" in name and "self_attn" in name and "text" not in name: lowerCamelCase : Optional[int] = name.replace("""proj""", """out_proj""" ) if "pre_assign_attn.attn.proj" in name: lowerCamelCase : Any = name.replace("""pre_assign_attn.attn.proj""", """pre_assign_attn.attn.out_proj""" ) if "norm1" in name: lowerCamelCase : Optional[Any] = name.replace("""norm1""", """layer_norm1""" ) if "norm2" in name and "pre_assign" not in name: lowerCamelCase : Union[str, Any] = name.replace("""norm2""", """layer_norm2""" ) if "img_encoder.norm" in name: lowerCamelCase : Optional[int] = name.replace("""img_encoder.norm""", """vision_model.layernorm""" ) # text encoder if "text_encoder.token_embedding" in name: lowerCamelCase : int = name.replace("""text_encoder.token_embedding""", """text_model.embeddings.token_embedding""" ) if "text_encoder.positional_embedding" in name: lowerCamelCase : Optional[Any] = name.replace("""text_encoder.positional_embedding""", """text_model.embeddings.position_embedding.weight""" ) if "text_encoder.transformer.resblocks." in name: lowerCamelCase : Optional[Any] = name.replace("""text_encoder.transformer.resblocks.""", """text_model.encoder.layers.""" ) if "ln_1" in name: lowerCamelCase : Optional[Any] = name.replace("""ln_1""", """layer_norm1""" ) if "ln_2" in name: lowerCamelCase : str = name.replace("""ln_2""", """layer_norm2""" ) if "c_fc" in name: lowerCamelCase : Any = name.replace("""c_fc""", """fc1""" ) if "c_proj" in name: lowerCamelCase : Tuple = name.replace("""c_proj""", """fc2""" ) if "text_encoder" in name: lowerCamelCase : List[str] = name.replace("""text_encoder""", """text_model""" ) if "ln_final" in name: lowerCamelCase : Tuple = name.replace("""ln_final""", """final_layer_norm""" ) # projection layers if "img_projector.linear_hidden." in name: lowerCamelCase : Optional[int] = name.replace("""img_projector.linear_hidden.""", """visual_projection.""" ) if "img_projector.linear_out." in name: lowerCamelCase : Tuple = name.replace("""img_projector.linear_out.""", """visual_projection.3.""" ) if "text_projector.linear_hidden" in name: lowerCamelCase : Tuple = name.replace("""text_projector.linear_hidden""", """text_projection""" ) if "text_projector.linear_out" in name: lowerCamelCase : Tuple = name.replace("""text_projector.linear_out""", """text_projection.3""" ) return name def _a ( lowerCamelCase, lowerCamelCase ): for key in orig_state_dict.copy().keys(): lowerCamelCase : Tuple = orig_state_dict.pop(lowerCamelCase ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors lowerCamelCase : Any = key.split(""".""" ) lowerCamelCase , lowerCamelCase : Optional[Any] = int(key_split[2] ), int(key_split[4] ) lowerCamelCase : List[Any] = config.vision_config.hidden_size if "weight" in key: lowerCamelCase : int = val[:dim, :] lowerCamelCase : List[str] = val[dim : dim * 2, :] lowerCamelCase : Dict = val[-dim:, :] else: lowerCamelCase : List[Any] = val[:dim] lowerCamelCase : List[Any] = val[dim : dim * 2] lowerCamelCase : Tuple = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors lowerCamelCase : str = key.split(""".""" ) lowerCamelCase : Optional[int] = int(key_split[3] ) lowerCamelCase : List[str] = config.text_config.hidden_size if "weight" in key: lowerCamelCase : Optional[int] = val[:dim, :] lowerCamelCase : Any = val[ dim : dim * 2, : ] lowerCamelCase : Optional[Any] = val[-dim:, :] else: lowerCamelCase : Union[str, Any] = val[:dim] lowerCamelCase : Optional[int] = val[dim : dim * 2] lowerCamelCase : Union[str, Any] = val[-dim:] else: lowerCamelCase : List[Any] = rename_key(lowerCamelCase ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): lowerCamelCase : Any = val.squeeze_() else: lowerCamelCase : Union[str, Any] = val return orig_state_dict def _a ( ): lowerCamelCase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase : List[str] = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw ) return im @torch.no_grad() def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase="groupvit-gcc-yfcc", lowerCamelCase=False ): lowerCamelCase : int = GroupViTConfig() lowerCamelCase : Dict = GroupViTModel(lowerCamelCase ).eval() lowerCamelCase : Optional[int] = torch.load(lowerCamelCase, map_location="""cpu""" )["""model"""] lowerCamelCase : Tuple = convert_state_dict(lowerCamelCase, lowerCamelCase ) lowerCamelCase , lowerCamelCase : Tuple = model.load_state_dict(lowerCamelCase, strict=lowerCamelCase ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCamelCase ) == 0) # verify result lowerCamelCase : int = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" ) lowerCamelCase : int = prepare_img() lowerCamelCase : int = processor(text=["""a photo of a cat""", """a photo of a dog"""], images=lowerCamelCase, padding=lowerCamelCase, return_tensors="""pt""" ) with torch.no_grad(): lowerCamelCase : int = model(**lowerCamelCase ) if model_name == "groupvit-gcc-yfcc": lowerCamelCase : Any = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] ) elif model_name == "groupvit-gcc-redcaps": lowerCamelCase : Any = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] ) else: raise ValueError(F'''Model name {model_name} not supported.''' ) assert torch.allclose(outputs.logits_per_image, lowerCamelCase, atol=1e-3 ) processor.save_pretrained(lowerCamelCase ) model.save_pretrained(lowerCamelCase ) print("""Successfully saved processor and model to""", lowerCamelCase ) if push_to_hub: print("""Pushing to the hub...""" ) processor.push_to_hub(lowerCamelCase, organization="""nielsr""" ) model.push_to_hub(lowerCamelCase, organization="""nielsr""" ) if __name__ == "__main__": _lowerCamelCase =argparse.ArgumentParser() parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model.""" ) parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""") parser.add_argument( """--model_name""", default="""groupvit-gccy-fcc""", type=str, help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""", ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""", ) _lowerCamelCase =parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
681
0
'''simple docstring''' import os from collections import deque import torch from torch.utils.data import Dataset class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): def __init__( self : int , a_ : str="" , a_ : List[Any]="train" ): """simple docstring""" assert os.path.isdir(a_ ) __snake_case = [] __snake_case = os.listdir(a_ ) for story_filename in story_filenames_list: if "summary" in story_filename: continue __snake_case = os.path.join(a_ , a_ ) if not os.path.isfile(a_ ): continue self.documents.append(a_ ) def __len__( self : int ): """simple docstring""" return len(self.documents ) def __getitem__( self : List[Any] , a_ : Tuple ): """simple docstring""" __snake_case = self.documents[idx] __snake_case = document_path.split("/" )[-1] with open(a_ , encoding="utf-8" ) as source: __snake_case = source.read() __snake_case , __snake_case = process_story(a_ ) return document_name, story_lines, summary_lines def __UpperCAmelCase ( _UpperCAmelCase : List[str] ) -> List[str]: __snake_case = list(filter(lambda _UpperCAmelCase : len(_UpperCAmelCase ) != 0 , [line.strip() for line in raw_story.split("\n" )] ) ) # for some unknown reason some lines miss a period, add it __snake_case = [_add_missing_period(_UpperCAmelCase ) for line in nonempty_lines] # gather article lines __snake_case = [] __snake_case = deque(_UpperCAmelCase ) while True: try: __snake_case = lines.popleft() if element.startswith("@highlight" ): break story_lines.append(_UpperCAmelCase ) except IndexError: # if "@highlight" is absent from the file we pop # all elements until there is None, raising an exception. return story_lines, [] # gather summary lines __snake_case = list(filter(lambda _UpperCAmelCase : not t.startswith("@highlight" ) , _UpperCAmelCase ) ) return story_lines, summary_lines def __UpperCAmelCase ( _UpperCAmelCase : Tuple ) -> Tuple: __snake_case = [".", "!", "?", "...", "'", "`", "\"", "\u2019", "\u2019", ")"] if line.startswith("@highlight" ): return line if line[-1] in END_TOKENS: return line return line + "." def __UpperCAmelCase ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ) -> Dict: if len(_UpperCAmelCase ) > block_size: return sequence[:block_size] else: sequence.extend([pad_token_id] * (block_size - len(_UpperCAmelCase )) ) return sequence def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any ) -> Dict: __snake_case = torch.ones_like(_UpperCAmelCase ) __snake_case = sequence == pad_token_id __snake_case = 0 return mask def __UpperCAmelCase ( _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : int ) -> str: __snake_case = [tokenizer.encode(_UpperCAmelCase ) for line in story_lines] __snake_case = [token for sentence in story_lines_token_ids for token in sentence] __snake_case = [tokenizer.encode(_UpperCAmelCase ) for line in summary_lines] __snake_case = [token for sentence in summary_lines_token_ids for token in sentence] return story_token_ids, summary_token_ids def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ) -> List[Any]: __snake_case = [] for sequence in batch: __snake_case = -1 __snake_case = [] for s in sequence: if s == separator_token_id: sentence_num += 1 embeddings.append(sentence_num % 2 ) batch_embeddings.append(_UpperCAmelCase ) return torch.tensor(_UpperCAmelCase )
69
from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class A__ : # setable values _UpperCAmelCase : Optional[int] = None _UpperCAmelCase : Optional[jnp.ndarray] = None _UpperCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i) @classmethod def UpperCamelCase__ ( cls ): return cls() @dataclass class A__ ( __SCREAMING_SNAKE_CASE): _UpperCAmelCase : jnp.ndarray _UpperCAmelCase : jnp.ndarray _UpperCAmelCase : KarrasVeSchedulerState class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): @property def UpperCamelCase__ ( self ): return True @register_to_config def __init__( self , __magic_name__ = 0.02 , __magic_name__ = 1_0_0 , __magic_name__ = 1.007 , __magic_name__ = 8_0 , __magic_name__ = 0.05 , __magic_name__ = 5_0 , ): pass def UpperCamelCase__ ( self ): return KarrasVeSchedulerState.create() def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = () ): lowerCamelCase : Dict = jnp.arange(0 , __magic_name__ )[::-1].copy() lowerCamelCase : int = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=__magic_name__ , schedule=jnp.array(__magic_name__ , dtype=jnp.floataa ) , timesteps=__magic_name__ , ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ): if self.config.s_min <= sigma <= self.config.s_max: lowerCamelCase : Dict = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 ) else: lowerCamelCase : Dict = 0 # sample eps ~ N(0, S_noise^2 * I) lowerCamelCase : List[Any] = random.split(__magic_name__ , num=1 ) lowerCamelCase : Union[str, Any] = self.config.s_noise * random.normal(key=__magic_name__ , shape=sample.shape ) lowerCamelCase : List[Any] = sigma + gamma * sigma lowerCamelCase : str = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = True , ): lowerCamelCase : Optional[Any] = sample_hat + sigma_hat * model_output lowerCamelCase : Dict = (sample_hat - pred_original_sample) / sigma_hat lowerCamelCase : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=__magic_name__ , derivative=__magic_name__ , state=__magic_name__ ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = True , ): lowerCamelCase : str = sample_prev + sigma_prev * model_output lowerCamelCase : str = (sample_prev - pred_original_sample) / sigma_prev lowerCamelCase : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=__magic_name__ , derivative=__magic_name__ , state=__magic_name__ ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): raise NotImplementedError()
681
0
import os import unittest from transformers import BatchEncoding from transformers.models.bert.tokenization_bert import ( BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer from transformers.testing_utils import require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin class A( UpperCamelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase = ProphetNetTokenizer UpperCamelCase = False def a__ ( self : List[Any] ) -> List[str]: """simple docstring""" super().setUp() lowerCamelCase_ = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def a__ ( self : Union[str, Any] , A_ : str ) -> List[Any]: """simple docstring""" lowerCamelCase_ = 'UNwant\u00E9d,running' lowerCamelCase_ = 'unwanted, running' return input_text, output_text def a__ ( self : Optional[int] ) -> int: """simple docstring""" lowerCamelCase_ = self.tokenizer_class(self.vocab_file ) lowerCamelCase_ = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(A_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [9, 6, 7, 12, 10, 11] ) def a__ ( self : List[str] ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] ) def a__ ( self : List[Any] ) -> Dict: """simple docstring""" lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def a__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] ) def a__ ( self : List[str] ) -> Optional[int]: """simple docstring""" lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def a__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def a__ ( self : Tuple ) -> Any: """simple docstring""" lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] ) def a__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] ) def a__ ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] ) def a__ ( self : Union[str, Any] ) -> int: """simple docstring""" lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , never_split=['[UNK]'] ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] ) def a__ ( self : List[str] ) -> Dict: """simple docstring""" lowerCamelCase_ = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing'] lowerCamelCase_ = {} for i, token in enumerate(A_ ): lowerCamelCase_ = i lowerCamelCase_ = WordpieceTokenizer(vocab=A_ , unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) , [] ) self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] ) self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] ) @require_torch def a__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" lowerCamelCase_ = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' ) lowerCamelCase_ = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] lowerCamelCase_ = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102] lowerCamelCase_ = tokenizer(A_ , padding=A_ , return_tensors='pt' ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = list(batch.input_ids.numpy()[0] ) self.assertListEqual(A_ , A_ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) def a__ ( self : int ) -> Union[str, Any]: """simple docstring""" self.assertTrue(_is_whitespace(' ' ) ) self.assertTrue(_is_whitespace('\t' ) ) self.assertTrue(_is_whitespace('\r' ) ) self.assertTrue(_is_whitespace('\n' ) ) self.assertTrue(_is_whitespace('\u00A0' ) ) self.assertFalse(_is_whitespace('A' ) ) self.assertFalse(_is_whitespace('-' ) ) def a__ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" self.assertTrue(_is_control('\u0005' ) ) self.assertFalse(_is_control('A' ) ) self.assertFalse(_is_control(' ' ) ) self.assertFalse(_is_control('\t' ) ) self.assertFalse(_is_control('\r' ) ) def a__ ( self : int ) -> str: """simple docstring""" self.assertTrue(_is_punctuation('-' ) ) self.assertTrue(_is_punctuation('$' ) ) self.assertTrue(_is_punctuation('`' ) ) self.assertTrue(_is_punctuation('.' ) ) self.assertFalse(_is_punctuation('A' ) ) self.assertFalse(_is_punctuation(' ' ) ) @slow def a__ ( self : int ) -> int: """simple docstring""" lowerCamelCase_ = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' ) lowerCamelCase_ = tokenizer.encode('sequence builders' , add_special_tokens=A_ ) lowerCamelCase_ = tokenizer.encode('multi-sequence build' , add_special_tokens=A_ ) lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(A_ ) lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(A_ , A_ ) assert encoded_sentence == text + [102] assert encoded_pair == text + [102] + text_a + [102]
70
from itertools import product from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros def _a ( lowerCamelCase, lowerCamelCase ): lowerCamelCase : List[str] = k_size // 2 lowerCamelCase , lowerCamelCase : Optional[int] = mgrid[0 - center : k_size - center, 0 - center : k_size - center] lowerCamelCase : Optional[Any] = 1 / (2 * pi * sigma) * exp(-(square(lowerCamelCase ) + square(lowerCamelCase )) / (2 * square(lowerCamelCase )) ) return g def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ): lowerCamelCase , lowerCamelCase : Union[str, Any] = image.shape[0], image.shape[1] # dst image height and width lowerCamelCase : Dict = height - k_size + 1 lowerCamelCase : str = width - k_size + 1 # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows lowerCamelCase : Tuple = zeros((dst_height * dst_width, k_size * k_size) ) lowerCamelCase : List[Any] = 0 for i, j in product(range(lowerCamelCase ), range(lowerCamelCase ) ): lowerCamelCase : Dict = ravel(image[i : i + k_size, j : j + k_size] ) lowerCamelCase : Union[str, Any] = window row += 1 # turn the kernel into shape(k*k, 1) lowerCamelCase : Dict = gen_gaussian_kernel(lowerCamelCase, lowerCamelCase ) lowerCamelCase : str = ravel(lowerCamelCase ) # reshape and get the dst image lowerCamelCase : List[str] = dot(lowerCamelCase, lowerCamelCase ).reshape(lowerCamelCase, lowerCamelCase ).astype(lowerCamelCase ) return dst if __name__ == "__main__": # read original image _lowerCamelCase =imread(R"""../image_data/lena.jpg""") # turn image in gray scale value _lowerCamelCase =cvtColor(img, COLOR_BGR2GRAY) # get values with two different mask size _lowerCamelCase =gaussian_filter(gray, 3, sigma=1) _lowerCamelCase =gaussian_filter(gray, 5, sigma=0.8) # show result images imshow("""gaussian filter with 3x3 mask""", gaussianaxa) imshow("""gaussian filter with 5x5 mask""", gaussianaxa) waitKey()
681
0
'''simple docstring''' from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def a__ ( _SCREAMING_SNAKE_CASE : int ) -> Dict: """simple docstring""" def is_in_circle(_SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ) -> bool: UpperCAmelCase_ : Dict = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle UpperCAmelCase_ : List[Any] = mean( int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) ) for _ in range(_SCREAMING_SNAKE_CASE ) ) # The ratio of the area for circle to square is pi/4. UpperCAmelCase_ : Optional[int] = proportion * 4 print(F'''The estimated value of pi is {pi_estimate}''' ) print(F'''The numpy value of pi is {pi}''' ) print(F'''The total error is {abs(pi - pi_estimate )}''' ) def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Callable[[float], float] , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : float = 1.0 , ) -> float: """simple docstring""" return mean( function_to_integrate(uniform(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) for _ in range(_SCREAMING_SNAKE_CASE ) ) * (max_value - min_value) def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : float = 1.0 ) -> None: """simple docstring""" def identity_function(_SCREAMING_SNAKE_CASE : float ) -> float: return x UpperCAmelCase_ : Optional[Any] = area_under_curve_estimator( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = (max_value * max_value - min_value * min_value) / 2 print("******************" ) print(F'''Estimating area under y=x where x varies from {min_value} to {max_value}''' ) print(F'''Estimated value is {estimated_value}''' ) print(F'''Expected value is {expected_value}''' ) print(F'''Total error is {abs(estimated_value - expected_value )}''' ) print("******************" ) def a__ ( _SCREAMING_SNAKE_CASE : int ) -> None: """simple docstring""" def function_to_integrate(_SCREAMING_SNAKE_CASE : float ) -> float: return sqrt(4.0 - x * x ) UpperCAmelCase_ : List[Any] = area_under_curve_estimator( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0.0 , 2.0 ) print("******************" ) print("Estimating pi using area_under_curve_estimator" ) print(F'''Estimated value is {estimated_value}''' ) print(F'''Expected value is {pi}''' ) print(F'''Total error is {abs(estimated_value - pi )}''' ) print("******************" ) if __name__ == "__main__": import doctest doctest.testmod()
71
import pytest _lowerCamelCase ="""__dummy_dataset1__""" _lowerCamelCase =""" import json import os import datasets REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\" URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"} class __DummyDataset1__(datasets.GeneratorBasedBuilder): def _info(self): features = datasets.Features( { \"tokens\": datasets.Sequence(datasets.Value(\"string\")), \"ner_tags\": datasets.Sequence( datasets.features.ClassLabel( names=[ \"O\", \"B-PER\", \"I-PER\", \"B-ORG\", \"I-ORG\", \"B-LOC\", \"I-LOC\", ] ) ), \"langs\": datasets.Sequence(datasets.Value(\"string\")), \"spans\": datasets.Sequence(datasets.Value(\"string\")), } ) return datasets.DatasetInfo(features=features) def _split_generators(self, dl_manager): dl_path = dl_manager.download(URLS) return [ datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}), datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}), ] def _generate_examples(self, filepath): with open(filepath, \"r\", encoding=\"utf-8\") as f: for i, line in enumerate(f): yield i, json.loads(line) """ @pytest.fixture def _a ( ): return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def _a ( ): return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ): lowerCamelCase : Union[str, Any] = dataset_loading_script_name lowerCamelCase : Dict = tmp_path / """datasets""" / script_name script_dir.mkdir(parents=lowerCamelCase ) lowerCamelCase : str = script_dir / F'''{script_name}.py''' with open(lowerCamelCase, """w""" ) as f: f.write(lowerCamelCase ) return str(lowerCamelCase )
681
0
'''simple docstring''' import os def UpperCamelCase ( ) -> str: '''simple docstring''' with open(os.path.dirname(lowercase_ ) + '''/grid.txt''' ) as f: lowercase =[] # noqa: E741 for _ in range(2_0 ): l.append([int(lowercase_ ) for x in f.readline().split()] ) lowercase =0 # right for i in range(2_0 ): for j in range(1_7 ): lowercase =l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3] if temp > maximum: lowercase =temp # down for i in range(1_7 ): for j in range(2_0 ): lowercase =l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j] if temp > maximum: lowercase =temp # diagonal 1 for i in range(1_7 ): for j in range(1_7 ): lowercase =l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3] if temp > maximum: lowercase =temp # diagonal 2 for i in range(1_7 ): for j in range(3 , 2_0 ): lowercase =l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3] if temp > maximum: lowercase =temp return maximum if __name__ == "__main__": print(solution())
72
import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""): _lowerCamelCase ={ """linear""": PIL.Image.Resampling.BILINEAR, """bilinear""": PIL.Image.Resampling.BILINEAR, """bicubic""": PIL.Image.Resampling.BICUBIC, """lanczos""": PIL.Image.Resampling.LANCZOS, """nearest""": PIL.Image.Resampling.NEAREST, } else: _lowerCamelCase ={ """linear""": PIL.Image.LINEAR, """bilinear""": PIL.Image.BILINEAR, """bicubic""": PIL.Image.BICUBIC, """lanczos""": PIL.Image.LANCZOS, """nearest""": PIL.Image.NEAREST, } def _a ( lowerCamelCase ): lowerCamelCase : Optional[Any] = (images / 2 + 0.5).clamp(0, 1 ) lowerCamelCase : Optional[Any] = images.cpu().permute(0, 2, 3, 1 ).float().numpy() lowerCamelCase : Any = numpy_to_pil(lowerCamelCase ) return images def _a ( lowerCamelCase ): if images.ndim == 3: lowerCamelCase : Optional[Any] = images[None, ...] lowerCamelCase : List[Any] = (images * 255).round().astype("""uint8""" ) if images.shape[-1] == 1: # special case for grayscale (single channel) images lowerCamelCase : Optional[int] = [Image.fromarray(image.squeeze(), mode="""L""" ) for image in images] else: lowerCamelCase : int = [Image.fromarray(lowerCamelCase ) for image in images] return pil_images
681
0
import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin a_ : List[str] = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class _snake_case ( A__ , unittest.TestCase ): _lowercase : Tuple = XLNetTokenizer _lowercase : List[Any] = XLNetTokenizerFast _lowercase : int = True _lowercase : Any = True def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE = XLNetTokenizer(a , keep_accents=a) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname) def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: SCREAMING_SNAKE_CASE = '<s>' SCREAMING_SNAKE_CASE = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a) , a) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a) , a) def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]: SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '<unk>') self.assertEqual(vocab_keys[1] , '<s>') self.assertEqual(vocab_keys[-1] , '<eod>') self.assertEqual(len(a) , 1006) def SCREAMING_SNAKE_CASE__ ( self) -> Any: self.assertEqual(self.get_tokenizer().vocab_size , 1000) def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: SCREAMING_SNAKE_CASE = XLNetTokenizer(a , keep_accents=a) SCREAMING_SNAKE_CASE = tokenizer.tokenize('This is a test') self.assertListEqual(a , ['▁This', '▁is', '▁a', '▁t', 'est']) self.assertListEqual(tokenizer.convert_tokens_to_ids(a) , [285, 46, 10, 170, 382]) SCREAMING_SNAKE_CASE = tokenizer.tokenize('I was born in 92000, and this is falsé.') self.assertListEqual( a , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(a) self.assertListEqual(a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4]) SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(a) self.assertListEqual( a , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: SCREAMING_SNAKE_CASE = XLNetTokenizer(a , do_lower_case=a) SCREAMING_SNAKE_CASE = tokenizer.tokenize('I was born in 92000, and this is falsé.') self.assertListEqual( a , [ SPIECE_UNDERLINE + '', 'i', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 'se', '.', ] , ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['▁he', 'll', 'o']) def SCREAMING_SNAKE_CASE__ ( self) -> Dict: SCREAMING_SNAKE_CASE = XLNetTokenizer(a , do_lower_case=a) SCREAMING_SNAKE_CASE = tokenizer.tokenize('I was born in 92000, and this is falsé.') self.assertListEqual( a , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 'se', '.', ] , ) @slow def SCREAMING_SNAKE_CASE__ ( self) -> Any: SCREAMING_SNAKE_CASE = XLNetTokenizer.from_pretrained('xlnet-base-cased') SCREAMING_SNAKE_CASE = tokenizer.encode('sequence builders' , add_special_tokens=a) SCREAMING_SNAKE_CASE = tokenizer.encode('multi-sequence build' , add_special_tokens=a) SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(a) SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(a , a) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_a + [4, 3] @slow def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]: # fmt: off SCREAMING_SNAKE_CASE = {'input_ids': [[17, 2_1442, 270, 17, 10, 1_4645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 2_2018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 1_4431, 13, 5500, 11, 1176, 580, 13, 1_6819, 4797, 23, 17, 10, 1_7135, 658, 19, 457, 7932, 13, 184, 19, 3154, 1_7135, 6468, 19, 1404, 1_2269, 19, 4229, 5356, 1_6264, 46, 19, 17, 2_0545, 1_0395, 9, 9, 9, 11, 28, 6421, 9531, 2_0729, 17, 10, 353, 1_7022, 11, 21, 6421, 9531, 1_6949, 17, 10, 1_1509, 753, 11, 33, 95, 2421, 7385, 956, 1_4431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 2_4738, 19, 1_3203, 658, 218, 787, 21, 430, 1_8482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 2_2178, 27, 1064, 22, 956, 13, 1_1101, 1429, 5854, 2_4313, 1_8953, 40, 422, 2_4366, 68, 1758, 37, 1_0483, 1_4257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 1_3894, 3380, 23, 95, 18, 1_7634, 2288, 9, 4, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=a , model_name='xlnet-base-cased' , revision='c841166438c31ec7ca9a106dee7bb312b73ae511' , )
73
from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class A__ ( nn.Module): def __init__( self , __magic_name__ = 1_6 , __magic_name__ = 8_8 , __magic_name__ = None , __magic_name__ = 1 , __magic_name__ = 0.0 , __magic_name__ = 3_2 , __magic_name__ = None , __magic_name__ = False , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "geglu" , __magic_name__ = None , ): super().__init__() lowerCamelCase : Any = nn.ModuleList( [ TransformeraDModel( num_attention_heads=__magic_name__ , attention_head_dim=__magic_name__ , in_channels=__magic_name__ , num_layers=__magic_name__ , dropout=__magic_name__ , norm_num_groups=__magic_name__ , cross_attention_dim=__magic_name__ , attention_bias=__magic_name__ , sample_size=__magic_name__ , num_vector_embeds=__magic_name__ , activation_fn=__magic_name__ , num_embeds_ada_norm=__magic_name__ , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference lowerCamelCase : Any = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` lowerCamelCase : List[Any] = [7_7, 2_5_7] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` lowerCamelCase : Optional[int] = [1, 0] def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__ = True , ): lowerCamelCase : List[Any] = hidden_states lowerCamelCase : Dict = [] lowerCamelCase : List[Any] = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens lowerCamelCase : Dict = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] lowerCamelCase : Optional[int] = self.transformer_index_for_condition[i] lowerCamelCase : List[Any] = self.transformers[transformer_index]( __magic_name__ , encoder_hidden_states=__magic_name__ , timestep=__magic_name__ , cross_attention_kwargs=__magic_name__ , return_dict=__magic_name__ , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] lowerCamelCase : Any = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) lowerCamelCase : Dict = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=__magic_name__ )
681
0
import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): lowercase_ = """pt""" elif is_tf_available(): lowercase_ = """tf""" else: lowercase_ = """jax""" class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = ByTaTokenizer lowerCAmelCase_ = False def UpperCAmelCase__ ( self : int ): """simple docstring""" super().setUp() __SCREAMING_SNAKE_CASE : Any = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" return ByTaTokenizer.from_pretrained('''google/byt5-small''' ) def UpperCAmelCase__ ( self : Tuple , **_A : Any ): """simple docstring""" return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A ) def UpperCAmelCase__ ( self : str , _A : Optional[Any] , _A : Union[str, Any]=False , _A : Optional[int]=20 , _A : Optional[Any]=5 ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = [] for i in range(len(_A ) ): try: __SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_A ) except UnicodeDecodeError: pass toks.append((i, tok) ) __SCREAMING_SNAKE_CASE : Dict = list(filter(lambda _A : re.match(r'''^[ a-zA-Z]+$''' , t[1] ) , _A ) ) __SCREAMING_SNAKE_CASE : Optional[int] = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_A ) , _A ) ) if max_length is not None and len(_A ) > max_length: __SCREAMING_SNAKE_CASE : str = toks[:max_length] if min_length is not None and len(_A ) < min_length and len(_A ) > 0: while len(_A ) < min_length: __SCREAMING_SNAKE_CASE : List[Any] = toks + toks # toks_str = [t[1] for t in toks] __SCREAMING_SNAKE_CASE : Union[str, Any] = [t[0] for t in toks] # Ensure consistency __SCREAMING_SNAKE_CASE : int = tokenizer.decode(_A , clean_up_tokenization_spaces=_A ) if " " not in output_txt and len(_A ) > 1: __SCREAMING_SNAKE_CASE : str = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A ) + ''' ''' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A ) ) if with_prefix_space: __SCREAMING_SNAKE_CASE : str = ''' ''' + output_txt __SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A , add_special_tokens=_A ) return output_txt, output_ids def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = self.ta_base_tokenizer __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''] ) __SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(['''hi''', '''I went to the gym''', ''''''] ) self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''] ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.ta_base_tokenizer __SCREAMING_SNAKE_CASE : Tuple = '''Unicode €.''' __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded['''input_ids'''] , _A ) # decoding __SCREAMING_SNAKE_CASE : Any = tokenizer.decode(_A ) self.assertEqual(_A , '''Unicode €.</s>''' ) __SCREAMING_SNAKE_CASE : Dict = tokenizer('''e è é ê ë''' ) __SCREAMING_SNAKE_CASE : str = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded['''input_ids'''] , _A ) # decoding __SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(_A ) self.assertEqual(_A , '''e è é ê ë</s>''' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''e è é ê ë</s>''' ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.ta_base_tokenizer __SCREAMING_SNAKE_CASE : Union[str, Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] # fmt: off __SCREAMING_SNAKE_CASE : Optional[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: on __SCREAMING_SNAKE_CASE : List[Any] = tokenizer(_A , padding=_A , return_tensors=_A ) self.assertIsInstance(_A , _A ) if FRAMEWORK != "jax": __SCREAMING_SNAKE_CASE : int = list(batch.input_ids.numpy()[0] ) else: __SCREAMING_SNAKE_CASE : Union[str, Any] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(_A , _A ) self.assertEqual((2, 37) , batch.input_ids.shape ) self.assertEqual((2, 37) , batch.attention_mask.shape ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.ta_base_tokenizer __SCREAMING_SNAKE_CASE : Any = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] __SCREAMING_SNAKE_CASE : List[Any] = tokenizer(_A , padding=_A , return_tensors=_A ) # check if input_ids are returned and no decoder_input_ids self.assertIn('''input_ids''' , _A ) self.assertIn('''attention_mask''' , _A ) self.assertNotIn('''decoder_input_ids''' , _A ) self.assertNotIn('''decoder_attention_mask''' , _A ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.ta_base_tokenizer __SCREAMING_SNAKE_CASE : List[str] = [ '''Summary of the text.''', '''Another summary.''', ] __SCREAMING_SNAKE_CASE : List[str] = tokenizer( text_target=_A , max_length=32 , padding='''max_length''' , truncation=_A , return_tensors=_A ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.ta_base_tokenizer __SCREAMING_SNAKE_CASE : int = ['''A long paragraph for summarization. </s>'''] __SCREAMING_SNAKE_CASE : Any = ['''Summary of the text. </s>'''] # fmt: off __SCREAMING_SNAKE_CASE : Any = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] __SCREAMING_SNAKE_CASE : Any = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: on __SCREAMING_SNAKE_CASE : str = tokenizer(_A , text_target=_A ) self.assertEqual(_A , batch['''input_ids'''][0] ) self.assertEqual(_A , batch['''labels'''][0] ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test __SCREAMING_SNAKE_CASE : int = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __SCREAMING_SNAKE_CASE : Dict = tempfile.mkdtemp() __SCREAMING_SNAKE_CASE : Optional[int] = ''' He is very happy, UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A , add_special_tokens=_A ) tokenizer.save_pretrained(_A ) __SCREAMING_SNAKE_CASE : int = tokenizer.__class__.from_pretrained(_A ) __SCREAMING_SNAKE_CASE : int = after_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) shutil.rmtree(_A ) __SCREAMING_SNAKE_CASE : Dict = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __SCREAMING_SNAKE_CASE : Union[str, Any] = tempfile.mkdtemp() __SCREAMING_SNAKE_CASE : List[Any] = ''' He is very happy, UNwant\u00E9d,running''' tokenizer.add_tokens(['''bim''', '''bambam'''] ) __SCREAMING_SNAKE_CASE : int = tokenizer.additional_special_tokens additional_special_tokens.append('''new_additional_special_token''' ) tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} ) __SCREAMING_SNAKE_CASE : int = tokenizer.encode(_A , add_special_tokens=_A ) tokenizer.save_pretrained(_A ) __SCREAMING_SNAKE_CASE : str = tokenizer.__class__.from_pretrained(_A ) __SCREAMING_SNAKE_CASE : List[str] = after_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) __SCREAMING_SNAKE_CASE : Tuple = tokenizer.__class__.from_pretrained(_A , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_A ) with open(os.path.join(_A , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file: __SCREAMING_SNAKE_CASE : List[str] = json.load(_A ) with open(os.path.join(_A , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file: __SCREAMING_SNAKE_CASE : Any = json.load(_A ) __SCREAMING_SNAKE_CASE : List[Any] = [F'''<extra_id_{i}>''' for i in range(125 )] __SCREAMING_SNAKE_CASE : Tuple = added_tokens_extra_ids + [ '''an_additional_special_token''' ] __SCREAMING_SNAKE_CASE : str = added_tokens_extra_ids + [ '''an_additional_special_token''' ] with open(os.path.join(_A , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(_A , _A ) with open(os.path.join(_A , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(_A , _A ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __SCREAMING_SNAKE_CASE : Tuple = tokenizer_class.from_pretrained( _A , ) self.assertIn( '''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __SCREAMING_SNAKE_CASE : Tuple = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=_A )] __SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_class.from_pretrained( _A , additional_special_tokens=_A , ) self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens ) self.assertEqual( ['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_class.from_pretrained(_A ) self.assertTrue(tokenizer.decode([255] ) == '''''' ) def UpperCAmelCase__ ( self : int ): """simple docstring""" pass def UpperCAmelCase__ ( self : Dict ): """simple docstring""" pass def UpperCAmelCase__ ( self : Dict ): """simple docstring""" pass def UpperCAmelCase__ ( self : str ): """simple docstring""" pass def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizers(fast=_A , do_lower_case=_A ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): __SCREAMING_SNAKE_CASE : Optional[int] = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>'''] __SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_string(_A ) self.assertIsInstance(_A , _A ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): __SCREAMING_SNAKE_CASE : Dict = [ '''bos_token''', '''eos_token''', '''unk_token''', '''sep_token''', '''pad_token''', '''cls_token''', '''mask_token''', ] __SCREAMING_SNAKE_CASE : int = 0 __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.convert_ids_to_tokens( _A , skip_special_tokens=_A ) for attr in attributes_list: setattr(_A , attr + '''_id''' , _A ) self.assertEqual(getattr(_A , _A ) , _A ) self.assertEqual(getattr(_A , attr + '''_id''' ) , _A ) setattr(_A , attr + '''_id''' , _A ) self.assertEqual(getattr(_A , _A ) , _A ) self.assertEqual(getattr(_A , attr + '''_id''' ) , _A ) setattr(_A , '''additional_special_tokens_ids''' , [] ) self.assertListEqual(getattr(_A , '''additional_special_tokens''' ) , [] ) self.assertListEqual(getattr(_A , '''additional_special_tokens_ids''' ) , [] ) setattr(_A , '''additional_special_tokens_ids''' , [token_id_to_test_setters] ) self.assertListEqual(getattr(_A , '''additional_special_tokens''' ) , [token_to_test_setters] ) self.assertListEqual(getattr(_A , '''additional_special_tokens_ids''' ) , [token_id_to_test_setters] )
74
import unittest from transformers import BertGenerationTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _lowerCamelCase ="""▁""" _lowerCamelCase =get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase): _UpperCAmelCase : str = BertGenerationTokenizer _UpperCAmelCase : Tuple = False _UpperCAmelCase : List[Any] = True def UpperCamelCase__ ( self ): super().setUp() lowerCamelCase : int = BertGenerationTokenizer(__magic_name__ , keep_accents=__magic_name__ ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = """<s>""" lowerCamelCase : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<unk>""" ) self.assertEqual(vocab_keys[1] , """<s>""" ) self.assertEqual(vocab_keys[-1] , """<pad>""" ) self.assertEqual(len(__magic_name__ ) , 1_0_0_2 ) def UpperCamelCase__ ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = BertGenerationTokenizer(__magic_name__ , keep_accents=__magic_name__ ) lowerCamelCase : Optional[Any] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(__magic_name__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__magic_name__ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , ) lowerCamelCase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __magic_name__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) lowerCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__magic_name__ ) self.assertListEqual( __magic_name__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , ) lowerCamelCase : int = tokenizer.convert_ids_to_tokens(__magic_name__ ) self.assertListEqual( __magic_name__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) @cached_property def UpperCamelCase__ ( self ): return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" ) @slow def UpperCamelCase__ ( self ): lowerCamelCase : List[Any] = """Hello World!""" lowerCamelCase : Any = [1_8_5_3_6, 2_2_6_0, 1_0_1] self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) ) @slow def UpperCamelCase__ ( self ): lowerCamelCase : str = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth""" ) lowerCamelCase : str = [ 8_7_1, 4_1_9, 3_5_8, 9_4_6, 9_9_1, 2_5_2_1, 4_5_2, 3_5_8, 1_3_5_7, 3_8_7, 7_7_5_1, 3_5_3_6, 1_1_2, 9_8_5, 4_5_6, 1_2_6, 8_6_5, 9_3_8, 5_4_0_0, 5_7_3_4, 4_5_8, 1_3_6_8, 4_6_7, 7_8_6, 2_4_6_2, 5_2_4_6, 1_1_5_9, 6_3_3, 8_6_5, 4_5_1_9, 4_5_7, 5_8_2, 8_5_2, 2_5_5_7, 4_2_7, 9_1_6, 5_0_8, 4_0_5, 3_4_3_2_4, 4_9_7, 3_9_1, 4_0_8, 1_1_3_4_2, 1_2_4_4, 3_8_5, 1_0_0, 9_3_8, 9_8_5, 4_5_6, 5_7_4, 3_6_2, 1_2_5_9_7, 3_2_0_0, 3_1_2_9, 1_1_7_2, ] self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) ) @require_torch @slow def UpperCamelCase__ ( self ): import torch from transformers import BertGenerationConfig, BertGenerationEncoder # Build sequence lowerCamelCase : Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:1_0] lowerCamelCase : Dict = """ """.join(__magic_name__ ) lowerCamelCase : Any = self.big_tokenizer.encode_plus(__magic_name__ , return_tensors="""pt""" , return_token_type_ids=__magic_name__ ) lowerCamelCase : List[str] = self.big_tokenizer.batch_encode_plus( [sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=__magic_name__ ) lowerCamelCase : Tuple = BertGenerationConfig() lowerCamelCase : Optional[int] = BertGenerationEncoder(__magic_name__ ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**__magic_name__ ) model(**__magic_name__ ) @slow def UpperCamelCase__ ( self ): # fmt: off lowerCamelCase : Any = {"""input_ids""": [[3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4], [4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__magic_name__ , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
681
0
'''simple docstring''' from torch import nn def a__ ( lowerCAmelCase__ ) -> List[Any]: if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(F"""Unsupported activation function: {act_fn}""" )
75
from arguments import InitializationArguments from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser # Configuration _lowerCamelCase =HfArgumentParser(InitializationArguments) _lowerCamelCase =parser.parse_args() # Load codeparrot tokenizer trained for Python code tokenization _lowerCamelCase =AutoTokenizer.from_pretrained(args.tokenizer_name) # Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks _lowerCamelCase ={ """vocab_size""": len(tokenizer), """scale_attn_by_inverse_layer_idx""": True, """reorder_and_upcast_attn""": True, } # Load model config (GPT-2 large in this case) _lowerCamelCase =AutoConfig.from_pretrained(args.config_name, **config_kwargs) # Initialize new model with config _lowerCamelCase =AutoModelForCausalLM.from_config(config) # Save model to the hub model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
681
0
"""simple docstring""" import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class UpperCAmelCase_ ( snake_case ): def __init__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> List[Any]: super().__init__( UpperCamelCase_ , split=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , ) __lowercase : List[Any] = field __lowercase : List[str] = path_or_paths if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else {self.split: path_or_paths} __lowercase : Dict = Json( cache_dir=UpperCamelCase_ , data_files=UpperCamelCase_ , features=UpperCamelCase_ , field=UpperCamelCase_ , **UpperCamelCase_ , ) def _lowerCamelCase ( self ) -> str: # Build iterable dataset if self.streaming: __lowercase : Any = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: __lowercase : List[Any] = None __lowercase : Any = None __lowercase : Union[str, Any] = None __lowercase : Optional[int] = None self.builder.download_and_prepare( download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , ) __lowercase : str = self.builder.as_dataset( split=self.split , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory ) return dataset class UpperCAmelCase_ : def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> str: if num_proc is not None and num_proc <= 0: raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" ) __lowercase : Any = dataset __lowercase : Dict = path_or_buf __lowercase : Any = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE __lowercase : List[str] = num_proc __lowercase : Optional[Any] = '''utf-8''' __lowercase : Tuple = to_json_kwargs def _lowerCamelCase ( self ) -> int: __lowercase : str = self.to_json_kwargs.pop('''path_or_buf''' , UpperCamelCase_ ) __lowercase : str = self.to_json_kwargs.pop('''orient''' , '''records''' ) __lowercase : List[Any] = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False ) __lowercase : Any = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True ) __lowercase : Tuple = self.to_json_kwargs.pop('''compression''' , UpperCamelCase_ ) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" ) if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with fsspec.open(self.path_or_buf , '''wb''' , compression=UpperCamelCase_ ) as buffer: __lowercase : List[str] = self._write(file_obj=UpperCamelCase_ , orient=UpperCamelCase_ , lines=UpperCamelCase_ , index=UpperCamelCase_ , **self.to_json_kwargs ) else: if compression: raise NotImplementedError( F"""The compression parameter is not supported when writing to a buffer, but compression={compression}""" ''' was passed. Please provide a local path instead.''' ) __lowercase : Tuple = self._write( file_obj=self.path_or_buf , orient=UpperCamelCase_ , lines=UpperCamelCase_ , index=UpperCamelCase_ , **self.to_json_kwargs ) return written def _lowerCamelCase ( self , UpperCamelCase_ ) -> List[str]: __lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase : Dict = args __lowercase : Union[str, Any] = query_table( table=self.dataset.data , key=slice(UpperCamelCase_ , offset + self.batch_size ) , indices=self.dataset._indices , ) __lowercase : Optional[Any] = batch.to_pandas().to_json( path_or_buf=UpperCamelCase_ , orient=UpperCamelCase_ , lines=UpperCamelCase_ , index=UpperCamelCase_ , **UpperCamelCase_ ) if not json_str.endswith('''\n''' ): json_str += "\n" return json_str.encode(self.encoding ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ , ) -> int: __lowercase : Dict = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ): __lowercase : str = self._batch_json((offset, orient, lines, index, to_json_kwargs) ) written += file_obj.write(UpperCamelCase_ ) else: __lowercase ,__lowercase : Dict = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , UpperCamelCase_ , UpperCamelCase_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ): written += file_obj.write(UpperCamelCase_ ) return written
76
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class A__ ( unittest.TestCase): def UpperCamelCase__ ( self , __magic_name__ ): for model_result in results.values(): for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ): lowerCamelCase : List[str] = model_result["""result"""][batch_size][sequence_length] self.assertIsNotNone(__magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = """sshleifer/tiny-gpt2""" lowerCamelCase : str = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__magic_name__ , multi_process=__magic_name__ , ) lowerCamelCase : Dict = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : Any = """sgugger/tiny-distilbert-classification""" lowerCamelCase : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , only_pretrain_model=__magic_name__ , ) lowerCamelCase : List[Any] = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2""" lowerCamelCase : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) lowerCamelCase : Any = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : List[Any] = """sshleifer/tiny-gpt2""" lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ ) lowerCamelCase : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__magic_name__ , multi_process=__magic_name__ , ) lowerCamelCase : Optional[Any] = TensorFlowBenchmark(__magic_name__ , [config] ) lowerCamelCase : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = """sshleifer/tiny-gpt2""" lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(__magic_name__ ) lowerCamelCase : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) lowerCamelCase : Union[str, Any] = TensorFlowBenchmark(__magic_name__ , [config] ) lowerCamelCase : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2""" lowerCamelCase : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) lowerCamelCase : int = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase__ ( self ): lowerCamelCase : int = """sshleifer/tiny-gpt2""" lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ ) lowerCamelCase : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) lowerCamelCase : Any = TensorFlowBenchmark(__magic_name__ , [config] ) lowerCamelCase : str = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase__ ( self ): lowerCamelCase : str = """patrickvonplaten/t5-tiny-random""" lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ ) lowerCamelCase : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) lowerCamelCase : List[Any] = TensorFlowBenchmark(__magic_name__ , configs=[config] ) lowerCamelCase : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[Any] = """sshleifer/tiny-gpt2""" lowerCamelCase : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__magic_name__ , multi_process=__magic_name__ , ) lowerCamelCase : int = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : str = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2""" with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase : List[str] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=__magic_name__ , save_to_csv=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__magic_name__ , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(__magic_name__ , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(__magic_name__ , """env.csv""" ) , multi_process=__magic_name__ , ) lowerCamelCase : List[str] = TensorFlowBenchmark(__magic_name__ ) benchmark.run() self.assertTrue(Path(os.path.join(__magic_name__ , """inf_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(__magic_name__ , """inf_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(__magic_name__ , """env.csv""" ) ).exists() ) def UpperCamelCase__ ( self ): lowerCamelCase : str = """sshleifer/tiny-gpt2""" def _check_summary_is_not_empty(__magic_name__ ): self.assertTrue(hasattr(__magic_name__ , """sequential""" ) ) self.assertTrue(hasattr(__magic_name__ , """cumulative""" ) ) self.assertTrue(hasattr(__magic_name__ , """current""" ) ) self.assertTrue(hasattr(__magic_name__ , """total""" ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__magic_name__ , """log.txt""" ) , log_print=__magic_name__ , trace_memory_line_by_line=__magic_name__ , eager_mode=__magic_name__ , multi_process=__magic_name__ , ) lowerCamelCase : Tuple = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : Union[str, Any] = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(__magic_name__ , """log.txt""" ) ).exists() )
681
0
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class a__ ( __magic_name__ ): lowercase_ = "Salesforce/blip-image-captioning-base" lowercase_ = ( "This is a tool that generates a description of an image. It takes an input named `image` which should be the " "image to caption, and returns a text that contains the description in English." ) lowercase_ = "image_captioner" lowercase_ = AutoModelForVisionaSeq lowercase_ = ["image"] lowercase_ = ["text"] def __init__( self : Any , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : Union[str, Any]): """simple docstring""" requires_backends(self , ["vision"]) super().__init__(*UpperCamelCase_ , **UpperCamelCase_) def a_ ( self : List[str] , UpperCamelCase_ : "Image"): """simple docstring""" return self.pre_processor(images=UpperCamelCase_ , return_tensors="pt") def a_ ( self : Optional[Any] , UpperCamelCase_ : Tuple): """simple docstring""" return self.model.generate(**UpperCamelCase_) def a_ ( self : Any , UpperCamelCase_ : Tuple): """simple docstring""" return self.pre_processor.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_)[0].strip()
77
import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def _a ( lowerCamelCase ): return x + 2 class A__ ( unittest.TestCase): def UpperCamelCase__ ( self ): lowerCamelCase : List[Any] = """x = 3""" lowerCamelCase : Tuple = {} lowerCamelCase : List[str] = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result == 3 self.assertDictEqual(__magic_name__ , {"""x""": 3} ) lowerCamelCase : Optional[int] = """x = y""" lowerCamelCase : Tuple = {"""y""": 5} lowerCamelCase : Tuple = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 5, """y""": 5} ) def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = """y = add_two(x)""" lowerCamelCase : List[Any] = {"""x""": 3} lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} ) # Won't work without the tool with CaptureStdout() as out: lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result is None assert "tried to execute add_two" in out.out def UpperCamelCase__ ( self ): lowerCamelCase : int = """x = 3""" lowerCamelCase : Dict = {} lowerCamelCase : Tuple = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result == 3 self.assertDictEqual(__magic_name__ , {"""x""": 3} ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[Any] = """test_dict = {'x': x, 'y': add_two(x)}""" lowerCamelCase : Optional[int] = {"""x""": 3} lowerCamelCase : Tuple = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} ) self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = """x = 3\ny = 5""" lowerCamelCase : Optional[int] = {} lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = """text = f'This is x: {x}.'""" lowerCamelCase : Optional[int] = {"""x""": 3} lowerCamelCase : Optional[int] = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(__magic_name__ , {"""x""": 3, """text""": """This is x: 3."""} ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = """if x <= 3:\n y = 2\nelse:\n y = 5""" lowerCamelCase : Tuple = {"""x""": 3} lowerCamelCase : int = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 2} ) lowerCamelCase : Tuple = {"""x""": 8} lowerCamelCase : Dict = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 8, """y""": 5} ) def UpperCamelCase__ ( self ): lowerCamelCase : Dict = """test_list = [x, add_two(x)]""" lowerCamelCase : List[Any] = {"""x""": 3} lowerCamelCase : List[str] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) self.assertListEqual(__magic_name__ , [3, 5] ) self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_list""": [3, 5]} ) def UpperCamelCase__ ( self ): lowerCamelCase : str = """y = x""" lowerCamelCase : List[Any] = {"""x""": 3} lowerCamelCase : Any = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result == 3 self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 3} ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = """test_list = [x, add_two(x)]\ntest_list[1]""" lowerCamelCase : Any = {"""x""": 3} lowerCamelCase : List[str] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_list""": [3, 5]} ) lowerCamelCase : Any = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']""" lowerCamelCase : Dict = {"""x""": 3} lowerCamelCase : Any = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def UpperCamelCase__ ( self ): lowerCamelCase : Union[str, Any] = """x = 0\nfor i in range(3):\n x = i""" lowerCamelCase : int = {} lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {"""range""": range} , state=__magic_name__ ) assert result == 2 self.assertDictEqual(__magic_name__ , {"""x""": 2, """i""": 2} )
681
0
'''simple docstring''' import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __A : def __init__(self : List[str] , __a : Union[str, Any] , __a : int=13 , __a : List[Any]=7 , __a : List[Any]=True , __a : Dict=True , __a : Optional[Any]=True , __a : int=True , __a : Dict=99 , __a : str=16 , __a : List[str]=36 , __a : Tuple=6 , __a : List[str]=6 , __a : Tuple=6 , __a : Dict=37 , __a : Dict="gelu" , __a : Union[str, Any]=0.1 , __a : Any=0.1 , __a : str=512 , __a : Dict=16 , __a : int=2 , __a : List[str]=0.02 , __a : List[str]=3 , __a : int=4 , __a : Union[str, Any]=None , ): UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = seq_length UpperCAmelCase_ = is_training UpperCAmelCase_ = use_input_mask UpperCAmelCase_ = use_token_type_ids UpperCAmelCase_ = use_labels UpperCAmelCase_ = vocab_size UpperCAmelCase_ = embedding_size UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_hidden_groups UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_act UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = max_position_embeddings UpperCAmelCase_ = type_vocab_size UpperCAmelCase_ = type_sequence_label_size UpperCAmelCase_ = initializer_range UpperCAmelCase_ = num_labels UpperCAmelCase_ = num_choices UpperCAmelCase_ = scope def _lowercase (self : List[str] ): UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ = None if self.use_input_mask: UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase_ = None if self.use_token_type_ids: UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase_ = None UpperCAmelCase_ = None UpperCAmelCase_ = None if self.use_labels: UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase_ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowercase (self : int ): return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def _lowercase (self : int , __a : Tuple , __a : Dict , __a : List[Any] , __a : Tuple , __a : int , __a : Tuple , __a : Optional[Any] ): UpperCAmelCase_ = AlbertModel(config=__a ) model.to(__a ) model.eval() UpperCAmelCase_ = model(__a , attention_mask=__a , token_type_ids=__a ) UpperCAmelCase_ = model(__a , token_type_ids=__a ) UpperCAmelCase_ = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _lowercase (self : Optional[Any] , __a : Optional[int] , __a : Optional[int] , __a : List[str] , __a : Tuple , __a : List[Any] , __a : Union[str, Any] , __a : Optional[int] ): UpperCAmelCase_ = AlbertForPreTraining(config=__a ) model.to(__a ) model.eval() UpperCAmelCase_ = model( __a , attention_mask=__a , token_type_ids=__a , labels=__a , sentence_order_label=__a , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def _lowercase (self : Optional[int] , __a : Tuple , __a : int , __a : Tuple , __a : Tuple , __a : List[str] , __a : int , __a : Optional[Any] ): UpperCAmelCase_ = AlbertForMaskedLM(config=__a ) model.to(__a ) model.eval() UpperCAmelCase_ = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase (self : Dict , __a : Optional[int] , __a : Optional[Any] , __a : Dict , __a : Tuple , __a : Optional[Any] , __a : List[str] , __a : List[str] ): UpperCAmelCase_ = AlbertForQuestionAnswering(config=__a ) model.to(__a ) model.eval() UpperCAmelCase_ = model( __a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowercase (self : str , __a : List[str] , __a : Optional[Any] , __a : List[str] , __a : List[str] , __a : Tuple , __a : List[Any] , __a : Any ): UpperCAmelCase_ = self.num_labels UpperCAmelCase_ = AlbertForSequenceClassification(__a ) model.to(__a ) model.eval() UpperCAmelCase_ = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase (self : Tuple , __a : int , __a : int , __a : Optional[Any] , __a : Optional[int] , __a : Optional[int] , __a : Tuple , __a : List[str] ): UpperCAmelCase_ = self.num_labels UpperCAmelCase_ = AlbertForTokenClassification(config=__a ) model.to(__a ) model.eval() UpperCAmelCase_ = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase (self : List[Any] , __a : Union[str, Any] , __a : List[Any] , __a : str , __a : Tuple , __a : Dict , __a : Any , __a : Tuple ): UpperCAmelCase_ = self.num_choices UpperCAmelCase_ = AlbertForMultipleChoice(config=__a ) model.to(__a ) model.eval() UpperCAmelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase_ = model( __a , attention_mask=__a , token_type_ids=__a , labels=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) = config_and_inputs UpperCAmelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): a__ : Tuple = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) a__ : Any = ( { """feature-extraction""": AlbertModel, """fill-mask""": AlbertForMaskedLM, """question-answering""": AlbertForQuestionAnswering, """text-classification""": AlbertForSequenceClassification, """token-classification""": AlbertForTokenClassification, """zero-shot""": AlbertForSequenceClassification, } if is_torch_available() else {} ) a__ : str = True def _lowercase (self : Tuple , __a : List[Any] , __a : Tuple , __a : str=False ): UpperCAmelCase_ = super()._prepare_for_class(__a , __a , return_labels=__a ) if return_labels: if model_class in get_values(__a ): UpperCAmelCase_ = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__a ) UpperCAmelCase_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__a ) return inputs_dict def _lowercase (self : List[str] ): UpperCAmelCase_ = AlbertModelTester(self ) UpperCAmelCase_ = ConfigTester(self , config_class=__a , hidden_size=37 ) def _lowercase (self : List[Any] ): self.config_tester.run_common_tests() def _lowercase (self : Dict ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def _lowercase (self : Tuple ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__a ) def _lowercase (self : str ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__a ) def _lowercase (self : Tuple ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__a ) def _lowercase (self : str ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__a ) def _lowercase (self : Dict ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__a ) def _lowercase (self : Tuple ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase_ = type self.model_tester.create_and_check_model(*__a ) @slow def _lowercase (self : Tuple ): for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ = AlbertModel.from_pretrained(__a ) self.assertIsNotNone(__a ) @require_torch class __A ( unittest.TestCase ): @slow def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = AlbertModel.from_pretrained("albert-base-v2" ) UpperCAmelCase_ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) UpperCAmelCase_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): UpperCAmelCase_ = model(__a , attention_mask=__a )[0] UpperCAmelCase_ = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __a ) UpperCAmelCase_ = torch.tensor( [[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1E-4 ) )
78
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase =logging.get_logger(__name__) _lowerCamelCase ={ """edbeeching/decision-transformer-gym-hopper-medium""": ( """https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json""" ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class A__ ( __SCREAMING_SNAKE_CASE): _UpperCAmelCase : Optional[int] = """decision_transformer""" _UpperCAmelCase : str = ["""past_key_values"""] _UpperCAmelCase : Any = { """max_position_embeddings""": """n_positions""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , __magic_name__=1_7 , __magic_name__=4 , __magic_name__=1_2_8 , __magic_name__=4_0_9_6 , __magic_name__=True , __magic_name__=1 , __magic_name__=1_0_2_4 , __magic_name__=3 , __magic_name__=1 , __magic_name__=None , __magic_name__="relu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=True , __magic_name__=5_0_2_5_6 , __magic_name__=5_0_2_5_6 , __magic_name__=False , __magic_name__=False , **__magic_name__ , ): lowerCamelCase : Optional[int] = state_dim lowerCamelCase : int = act_dim lowerCamelCase : int = hidden_size lowerCamelCase : Union[str, Any] = max_ep_len lowerCamelCase : Optional[int] = action_tanh lowerCamelCase : Any = vocab_size lowerCamelCase : List[str] = n_positions lowerCamelCase : List[Any] = n_layer lowerCamelCase : Dict = n_head lowerCamelCase : Optional[Any] = n_inner lowerCamelCase : Tuple = activation_function lowerCamelCase : Tuple = resid_pdrop lowerCamelCase : str = embd_pdrop lowerCamelCase : Dict = attn_pdrop lowerCamelCase : Tuple = layer_norm_epsilon lowerCamelCase : Tuple = initializer_range lowerCamelCase : Tuple = scale_attn_weights lowerCamelCase : str = use_cache lowerCamelCase : List[Any] = scale_attn_by_inverse_layer_idx lowerCamelCase : List[str] = reorder_and_upcast_attn lowerCamelCase : Optional[Any] = bos_token_id lowerCamelCase : str = eos_token_id super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
681
0
import os from typing import Dict, List, Tuple, TypeVar, Union SCREAMING_SNAKE_CASE__ : Any = TypeVar("""T""") SCREAMING_SNAKE_CASE__ : List[Any] = Union[List[T], Tuple[T, ...]] SCREAMING_SNAKE_CASE__ : int = Union[T, List[T], Dict[str, T]] SCREAMING_SNAKE_CASE__ : Optional[int] = Union[str, bytes, os.PathLike]
79
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig _lowerCamelCase =logging.get_logger(__name__) class A__ : def __init__( self , __magic_name__ , __magic_name__ ): lowerCamelCase : Any = question_encoder lowerCamelCase : Dict = generator lowerCamelCase : Tuple = self.question_encoder def UpperCamelCase__ ( self , __magic_name__ ): if os.path.isfile(__magic_name__ ): raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) lowerCamelCase : Any = os.path.join(__magic_name__ , """question_encoder_tokenizer""" ) lowerCamelCase : str = os.path.join(__magic_name__ , """generator_tokenizer""" ) self.question_encoder.save_pretrained(__magic_name__ ) self.generator.save_pretrained(__magic_name__ ) @classmethod def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ): # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer lowerCamelCase : Any = kwargs.pop("""config""" , __magic_name__ ) if config is None: lowerCamelCase : Tuple = RagConfig.from_pretrained(__magic_name__ ) lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained( __magic_name__ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" ) lowerCamelCase : Any = AutoTokenizer.from_pretrained( __magic_name__ , config=config.generator , subfolder="""generator_tokenizer""" ) return cls(question_encoder=__magic_name__ , generator=__magic_name__ ) def __call__( self , *__magic_name__ , **__magic_name__ ): return self.current_tokenizer(*__magic_name__ , **__magic_name__ ) def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ): return self.generator.batch_decode(*__magic_name__ , **__magic_name__ ) def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ): return self.generator.decode(*__magic_name__ , **__magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : Union[str, Any] = self.question_encoder def UpperCamelCase__ ( self ): lowerCamelCase : str = self.generator def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "longest" , __magic_name__ = None , __magic_name__ = True , **__magic_name__ , ): warnings.warn( """`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """ """regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """ """context manager to prepare your targets. See the documentation of your specific tokenizer for more """ """details""" , __magic_name__ , ) if max_length is None: lowerCamelCase : int = self.current_tokenizer.model_max_length lowerCamelCase : int = self( __magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , max_length=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: lowerCamelCase : int = self.current_tokenizer.model_max_length lowerCamelCase : Dict = self( text_target=__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , padding=__magic_name__ , max_length=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , ) lowerCamelCase : List[Any] = labels["""input_ids"""] return model_inputs
681
0
class __UpperCamelCase : # Public class to implement a graph def __init__( self : str , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : list[list[bool]] ) -> None: """simple docstring""" __lowercase = row __lowercase = col __lowercase = graph def _a ( self : str , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : list[list[bool]] ) -> bool: """simple docstring""" return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def _a ( self : str , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : list[list[bool]] ) -> None: """simple docstring""" __lowercase = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order __lowercase = [-1, 0, 1, -1, 1, -1, 0, 1] __lowercase = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _lowerCAmelCase ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , _lowerCAmelCase ) def _a ( self : Dict ) -> int: # And finally, count all islands. """simple docstring""" __lowercase = [[False for j in range(self.COL )] for i in range(self.ROW )] __lowercase = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) count += 1 return count
80
import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def _a ( lowerCamelCase, lowerCamelCase ): lowerCamelCase : List[Any] = F'''{sampling_rate}''' lowerCamelCase : Optional[int] = """1""" lowerCamelCase : Any = """f32le""" lowerCamelCase : Any = [ """ffmpeg""", """-i""", """pipe:0""", """-ac""", ac, """-ar""", ar, """-f""", format_for_conversion, """-hide_banner""", """-loglevel""", """quiet""", """pipe:1""", ] try: with subprocess.Popen(lowerCamelCase, stdin=subprocess.PIPE, stdout=subprocess.PIPE ) as ffmpeg_process: lowerCamelCase : Optional[int] = ffmpeg_process.communicate(lowerCamelCase ) except FileNotFoundError as error: raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error lowerCamelCase : Union[str, Any] = output_stream[0] lowerCamelCase : Optional[Any] = np.frombuffer(lowerCamelCase, np.floataa ) if audio.shape[0] == 0: raise ValueError("""Malformed soundfile""" ) return audio def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase = "f32le", ): lowerCamelCase : Dict = F'''{sampling_rate}''' lowerCamelCase : List[Any] = """1""" if format_for_conversion == "s16le": lowerCamelCase : Any = 2 elif format_for_conversion == "f32le": lowerCamelCase : Dict = 4 else: raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) lowerCamelCase : Dict = platform.system() if system == "Linux": lowerCamelCase : Union[str, Any] = """alsa""" lowerCamelCase : List[Any] = """default""" elif system == "Darwin": lowerCamelCase : List[Any] = """avfoundation""" lowerCamelCase : List[Any] = """:0""" elif system == "Windows": lowerCamelCase : int = """dshow""" lowerCamelCase : Any = """default""" lowerCamelCase : Any = [ """ffmpeg""", """-f""", format_, """-i""", input_, """-ac""", ac, """-ar""", ar, """-f""", format_for_conversion, """-fflags""", """nobuffer""", """-hide_banner""", """-loglevel""", """quiet""", """pipe:1""", ] lowerCamelCase : List[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample lowerCamelCase : Any = _ffmpeg_stream(lowerCamelCase, lowerCamelCase ) for item in iterator: yield item def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = "f32le", ): if stream_chunk_s is not None: lowerCamelCase : int = stream_chunk_s else: lowerCamelCase : Dict = chunk_length_s lowerCamelCase : Optional[Any] = ffmpeg_microphone(lowerCamelCase, lowerCamelCase, format_for_conversion=lowerCamelCase ) if format_for_conversion == "s16le": lowerCamelCase : Optional[int] = np.intaa lowerCamelCase : Optional[Any] = 2 elif format_for_conversion == "f32le": lowerCamelCase : int = np.floataa lowerCamelCase : Any = 4 else: raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) if stride_length_s is None: lowerCamelCase : Any = chunk_length_s / 6 lowerCamelCase : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(lowerCamelCase, (int, float) ): lowerCamelCase : Optional[int] = [stride_length_s, stride_length_s] lowerCamelCase : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample lowerCamelCase : Optional[int] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample lowerCamelCase : List[Any] = datetime.datetime.now() lowerCamelCase : List[Any] = datetime.timedelta(seconds=lowerCamelCase ) for item in chunk_bytes_iter(lowerCamelCase, lowerCamelCase, stride=(stride_left, stride_right), stream=lowerCamelCase ): # Put everything back in numpy scale lowerCamelCase : Dict = np.frombuffer(item["""raw"""], dtype=lowerCamelCase ) lowerCamelCase : List[Any] = ( item["""stride"""][0] // size_of_sample, item["""stride"""][1] // size_of_sample, ) lowerCamelCase : Tuple = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = False ): lowerCamelCase : Optional[int] = B"""""" lowerCamelCase , lowerCamelCase : str = stride if stride_left + stride_right >= chunk_len: raise ValueError( F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' ) lowerCamelCase : str = 0 for raw in iterator: acc += raw if stream and len(lowerCamelCase ) < chunk_len: lowerCamelCase : Optional[int] = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(lowerCamelCase ) >= chunk_len: # We are flushing the accumulator lowerCamelCase : str = (_stride_left, stride_right) lowerCamelCase : Dict = {"""raw""": acc[:chunk_len], """stride""": stride} if stream: lowerCamelCase : Optional[int] = False yield item lowerCamelCase : str = stride_left lowerCamelCase : Tuple = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(lowerCamelCase ) > stride_left: lowerCamelCase : List[str] = {"""raw""": acc, """stride""": (_stride_left, 0)} if stream: lowerCamelCase : List[Any] = False yield item def _a ( lowerCamelCase, lowerCamelCase ): lowerCamelCase : Optional[int] = 2**24 # 16Mo try: with subprocess.Popen(lowerCamelCase, stdout=subprocess.PIPE, bufsize=lowerCamelCase ) as ffmpeg_process: while True: lowerCamelCase : Any = ffmpeg_process.stdout.read(lowerCamelCase ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
681
0
import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin _snake_case : str = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model") @require_sentencepiece @require_tokenizers class a (_lowerCAmelCase , unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Any = SpeechTaTokenizer __UpperCAmelCase : Any = False __UpperCAmelCase : str = True def __snake_case ( self : Optional[Any] ) -> Optional[int]: super().setUp() # We have a SentencePiece fixture for testing __snake_case : Tuple = SpeechTaTokenizer(lowerCamelCase ) __snake_case : Tuple = AddedToken("<mask>" , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) __snake_case : Optional[Any] = mask_token tokenizer.add_special_tokens({"mask_token": mask_token} ) tokenizer.add_tokens(["<ctc_blank>"] ) tokenizer.save_pretrained(self.tmpdirname ) def __snake_case ( self : Tuple , lowerCamelCase : Optional[int] ) -> Union[str, Any]: __snake_case : Any = "this is a test" __snake_case : Tuple = "this is a test" return input_text, output_text def __snake_case ( self : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : List[Any]=False , lowerCamelCase : List[Any]=20 , lowerCamelCase : List[Any]=5 ) -> Optional[int]: __snake_case , __snake_case : int = self.get_input_output_texts(lowerCamelCase ) __snake_case : str = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) __snake_case : Optional[Any] = tokenizer.decode(lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase ) return text, ids def __snake_case ( self : Any ) -> List[str]: __snake_case : Tuple = "<pad>" __snake_case : List[Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase ) def __snake_case ( self : Optional[int] ) -> Optional[int]: __snake_case : Tuple = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-4] , "œ" ) self.assertEqual(vocab_keys[-2] , "<mask>" ) self.assertEqual(vocab_keys[-1] , "<ctc_blank>" ) self.assertEqual(len(lowerCamelCase ) , 81 ) def __snake_case ( self : Union[str, Any] ) -> Optional[Any]: self.assertEqual(self.get_tokenizer().vocab_size , 79 ) def __snake_case ( self : Union[str, Any] ) -> int: __snake_case : Optional[Any] = self.get_tokenizers(do_lower_case=lowerCamelCase ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): __snake_case : Optional[Any] = tokenizer.vocab_size __snake_case : List[Any] = len(lowerCamelCase ) self.assertNotEqual(lowerCamelCase , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) __snake_case : List[Any] = ["aaaaa bbbbbb", "cccccccccdddddddd"] __snake_case : int = tokenizer.add_tokens(lowerCamelCase ) __snake_case : Optional[int] = tokenizer.vocab_size __snake_case : List[Any] = len(lowerCamelCase ) self.assertNotEqual(lowerCamelCase , 0 ) self.assertEqual(lowerCamelCase , lowerCamelCase ) self.assertEqual(lowerCamelCase , len(lowerCamelCase ) ) self.assertEqual(lowerCamelCase , all_size + len(lowerCamelCase ) ) __snake_case : Union[str, Any] = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=lowerCamelCase ) self.assertGreaterEqual(len(lowerCamelCase ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) __snake_case : Union[str, Any] = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"} __snake_case : Optional[int] = tokenizer.add_special_tokens(lowerCamelCase ) __snake_case : Dict = tokenizer.vocab_size __snake_case : Optional[int] = len(lowerCamelCase ) self.assertNotEqual(lowerCamelCase , 0 ) self.assertEqual(lowerCamelCase , lowerCamelCase ) self.assertEqual(lowerCamelCase , len(lowerCamelCase ) ) self.assertEqual(lowerCamelCase , all_size_a + len(lowerCamelCase ) ) __snake_case : Optional[int] = tokenizer.encode( ">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=lowerCamelCase ) self.assertGreaterEqual(len(lowerCamelCase ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def __snake_case ( self : Optional[int] ) -> Optional[int]: pass def __snake_case ( self : Union[str, Any] ) -> Optional[Any]: pass def __snake_case ( self : List[Any] ) -> Optional[Any]: __snake_case : Union[str, Any] = self.get_tokenizer() __snake_case : Union[str, Any] = tokenizer.tokenize("This is a test" ) # fmt: off self.assertListEqual(lowerCamelCase , [SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) __snake_case : int = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowerCamelCase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] ) __snake_case : Any = tokenizer.convert_tokens_to_ids(lowerCamelCase ) # fmt: off self.assertListEqual(lowerCamelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] ) # fmt: on __snake_case : Union[str, Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase ) self.assertListEqual( lowerCamelCase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] ) @slow def __snake_case ( self : Dict ) -> Dict: # Use custom sequence because this tokenizer does not handle numbers. __snake_case : str = [ "Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides " "general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural " "Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained " "models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.", "BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly " "conditioning on both left and right context in all layers.", "The quick brown fox jumps over the lazy dog.", ] # fmt: off __snake_case : List[Any] = { "input_ids": [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], "attention_mask": [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCamelCase , model_name="microsoft/speecht5_asr" , revision="c5ef64c71905caeccde0e4462ef3f9077224c524" , sequences=lowerCamelCase , )
81
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , ) @pytest.mark.usefixtures("""sm_env""") @parameterized_class( [ { """framework""": """pytorch""", """script""": """run_glue_model_parallelism.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, { """framework""": """pytorch""", """script""": """run_glue.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, ]) class A__ ( unittest.TestCase): def UpperCamelCase__ ( self ): if self.framework == "pytorch": subprocess.run( F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=__magic_name__ , ) assert hasattr(self , """env""" ) def UpperCamelCase__ ( self , __magic_name__ ): # configuration for running training on smdistributed Model Parallel lowerCamelCase : Any = { """enabled""": True, """processes_per_host""": 8, } lowerCamelCase : Any = { """enabled""": True, """parameters""": { """microbatches""": 4, """placement_strategy""": """spread""", """pipeline""": """interleaved""", """optimize""": """speed""", """partitions""": 4, """ddp""": True, }, } lowerCamelCase : Optional[Any] = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options} lowerCamelCase : Dict = """trainer""" if self.script == """run_glue.py""" else """smtrainer""" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=__magic_name__ , instance_type=self.instance_type , debugger_hook_config=__magic_name__ , hyperparameters={ **self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path, """max_steps""": 5_0_0, } , metric_definitions=self.env.metric_definitions , distribution=__magic_name__ , py_version="""py36""" , ) def UpperCamelCase__ ( self , __magic_name__ ): TrainingJobAnalytics(__magic_name__ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' ) @parameterized.expand([(1,)] ) def UpperCamelCase__ ( self , __magic_name__ ): # create estimator lowerCamelCase : int = self.create_estimator(__magic_name__ ) # run training estimator.fit() # result dataframe lowerCamelCase : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis lowerCamelCase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) lowerCamelCase : int = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping lowerCamelCase : int = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(F'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __magic_name__ )
681
0
"""simple docstring""" from numpy import exp, pi, sqrt def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 1.0 ): return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
82
from __future__ import annotations def _a ( lowerCamelCase ): lowerCamelCase : Union[str, Any] = str(lowerCamelCase ) return n == n[::-1] def _a ( lowerCamelCase = 100_0000 ): lowerCamelCase : Any = 0 for i in range(1, lowerCamelCase ): if is_palindrome(lowerCamelCase ) and is_palindrome(bin(lowerCamelCase ).split("""b""" )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
681
0
"""simple docstring""" import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def snake_case_ ( A_ : List[Any] ): '''simple docstring''' if is_torch_version('''<''', '''2.0.0''' ) or not hasattr(A_, '''_dynamo''' ): return False return isinstance(A_, torch._dynamo.eval_frame.OptimizedModule ) def snake_case_ ( A_ : List[str], A_ : bool = True ): '''simple docstring''' _lowerCamelCase : Tuple = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) _lowerCamelCase : Any = is_compiled_module(A_ ) if is_compiled: _lowerCamelCase : Dict = model _lowerCamelCase : Any = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(A_, A_ ): _lowerCamelCase : List[Any] = model.module if not keep_fpaa_wrapper: _lowerCamelCase : int = getattr(A_, '''forward''' ) _lowerCamelCase : int = model.__dict__.pop('''_original_forward''', A_ ) if original_forward is not None: while hasattr(A_, '''__wrapped__''' ): _lowerCamelCase : Union[str, Any] = forward.__wrapped__ if forward == original_forward: break _lowerCamelCase : int = forward if getattr(A_, '''_converted_to_transformer_engine''', A_ ): convert_model(A_, to_transformer_engine=A_ ) if is_compiled: _lowerCamelCase : Dict = model _lowerCamelCase : int = compiled_model return model def snake_case_ ( ): '''simple docstring''' PartialState().wait_for_everyone() def snake_case_ ( A_ : Optional[int], A_ : str ): '''simple docstring''' if PartialState().distributed_type == DistributedType.TPU: xm.save(A_, A_ ) elif PartialState().local_process_index == 0: torch.save(A_, A_ ) @contextmanager def snake_case_ ( **A_ : Any ): '''simple docstring''' for key, value in kwargs.items(): _lowerCamelCase : Union[str, Any] = str(A_ ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def snake_case_ ( A_ : Any ): '''simple docstring''' if not hasattr(A_, '''__qualname__''' ) and not hasattr(A_, '''__name__''' ): _lowerCamelCase : Union[str, Any] = getattr(A_, '''__class__''', A_ ) if hasattr(A_, '''__qualname__''' ): return obj.__qualname__ if hasattr(A_, '''__name__''' ): return obj.__name__ return str(A_ ) def snake_case_ ( A_ : Any, A_ : Dict ): '''simple docstring''' for key, value in source.items(): if isinstance(A_, A_ ): _lowerCamelCase : Optional[Any] = destination.setdefault(A_, {} ) merge_dicts(A_, A_ ) else: _lowerCamelCase : Any = value return destination def snake_case_ ( A_ : int = None ): '''simple docstring''' if port is None: _lowerCamelCase : Optional[int] = 2_95_00 with socket.socket(socket.AF_INET, socket.SOCK_STREAM ) as s: return s.connect_ex(('''localhost''', port) ) == 0
83
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def _a ( lowerCamelCase, lowerCamelCase=False ): lowerCamelCase : Dict = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("""module.cls_token""", """vit.embeddings.cls_token"""), ("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""module.pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""module.norm.weight""", """layernorm.weight"""), ("""module.norm.bias""", """layernorm.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" lowerCamelCase : Any = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase=False ): for i in range(config.num_hidden_layers ): if base_model: lowerCamelCase : Optional[Any] = """""" else: lowerCamelCase : Optional[int] = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCamelCase : Dict = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' ) lowerCamelCase : List[str] = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase : Union[str, Any] = in_proj_weight[ : config.hidden_size, : ] lowerCamelCase : Optional[int] = in_proj_bias[: config.hidden_size] lowerCamelCase : Optional[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase : List[str] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCamelCase : Union[str, Any] = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase : Any = in_proj_bias[-config.hidden_size :] def _a ( lowerCamelCase ): lowerCamelCase : Tuple = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(lowerCamelCase, lowerCamelCase ) def _a ( lowerCamelCase ): # projection head is used in the self-supervised pre-training in MSN, # for downstream task it's not needed. lowerCamelCase : Any = [ """module.fc.fc1.weight""", """module.fc.fc1.bias""", """module.fc.bn1.weight""", """module.fc.bn1.bias""", """module.fc.bn1.running_mean""", """module.fc.bn1.running_var""", """module.fc.bn1.num_batches_tracked""", """module.fc.fc2.weight""", """module.fc.fc2.bias""", """module.fc.bn2.weight""", """module.fc.bn2.bias""", """module.fc.bn2.running_mean""", """module.fc.bn2.running_var""", """module.fc.bn2.num_batches_tracked""", """module.fc.fc3.weight""", """module.fc.fc3.bias""", ] for k in ignore_keys: state_dict.pop(lowerCamelCase, lowerCamelCase ) def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ): lowerCamelCase : Dict = dct.pop(lowerCamelCase ) lowerCamelCase : str = val def _a ( lowerCamelCase, lowerCamelCase ): lowerCamelCase : Any = ViTMSNConfig() lowerCamelCase : Tuple = 1000 lowerCamelCase : List[Any] = """datasets/huggingface/label-files""" lowerCamelCase : Optional[Any] = """imagenet-1k-id2label.json""" lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase, lowerCamelCase ), """r""" ) ) lowerCamelCase : List[Any] = {int(lowerCamelCase ): v for k, v in idalabel.items()} lowerCamelCase : Optional[int] = idalabel lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: lowerCamelCase : int = 384 lowerCamelCase : Optional[int] = 1536 lowerCamelCase : Tuple = 6 elif "l16" in checkpoint_url: lowerCamelCase : Dict = 1024 lowerCamelCase : List[Any] = 4096 lowerCamelCase : Optional[int] = 24 lowerCamelCase : str = 16 lowerCamelCase : str = 0.1 elif "b4" in checkpoint_url: lowerCamelCase : Union[str, Any] = 4 elif "l7" in checkpoint_url: lowerCamelCase : Tuple = 7 lowerCamelCase : Optional[int] = 1024 lowerCamelCase : List[Any] = 4096 lowerCamelCase : Tuple = 24 lowerCamelCase : Dict = 16 lowerCamelCase : str = 0.1 lowerCamelCase : List[Any] = ViTMSNModel(lowerCamelCase ) lowerCamelCase : Dict = torch.hub.load_state_dict_from_url(lowerCamelCase, map_location="""cpu""" )["""target_encoder"""] lowerCamelCase : Any = ViTImageProcessor(size=config.image_size ) remove_projection_head(lowerCamelCase ) lowerCamelCase : Dict = create_rename_keys(lowerCamelCase, base_model=lowerCamelCase ) for src, dest in rename_keys: rename_key(lowerCamelCase, lowerCamelCase, lowerCamelCase ) read_in_q_k_v(lowerCamelCase, lowerCamelCase, base_model=lowerCamelCase ) model.load_state_dict(lowerCamelCase ) model.eval() lowerCamelCase : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase : Dict = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw ) lowerCamelCase : Union[str, Any] = ViTImageProcessor( size=config.image_size, image_mean=lowerCamelCase, image_std=lowerCamelCase ) lowerCamelCase : Tuple = image_processor(images=lowerCamelCase, return_tensors="""pt""" ) # forward pass torch.manual_seed(2 ) lowerCamelCase : int = model(**lowerCamelCase ) lowerCamelCase : Union[str, Any] = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: lowerCamelCase : Union[str, Any] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] ) elif "b16" in checkpoint_url: lowerCamelCase : Tuple = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] ) elif "l16" in checkpoint_url: lowerCamelCase : List[str] = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] ) elif "b4" in checkpoint_url: lowerCamelCase : Tuple = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] ) else: lowerCamelCase : List[str] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3], lowerCamelCase, atol=1e-4 ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCamelCase ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(lowerCamelCase ) if __name__ == "__main__": _lowerCamelCase =argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar""", type=str, help="""URL of the checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) _lowerCamelCase =parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
681
0
import unittest from transformers import MPNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class A_ : '''simple docstring''' def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=False , snake_case=True , snake_case=99 , snake_case=64 , snake_case=5 , snake_case=4 , snake_case=64 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ): lowercase = parent lowercase = batch_size lowercase = seq_length lowercase = is_training lowercase = use_input_mask lowercase = use_token_type_ids lowercase = use_labels lowercase = vocab_size lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = intermediate_size lowercase = hidden_act lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = max_position_embeddings lowercase = type_vocab_size lowercase = type_sequence_label_size lowercase = initializer_range lowercase = num_labels lowercase = num_choices lowercase = scope def SCREAMING_SNAKE_CASE__ ( self ): return MPNetConfig.from_pretrained('microsoft/mpnet-base' ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase = None if self.use_input_mask: lowercase = random_attention_mask([self.batch_size, self.seq_length] ) lowercase = None lowercase = None lowercase = None if self.use_labels: lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase = ids_tensor([self.batch_size] , self.num_choices ) lowercase = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE__ ( self ): return MPNetConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): lowercase = MPNetModel(config=snake_case ) model.to(snake_case ) model.eval() lowercase = model(snake_case , snake_case ) lowercase = model(snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): lowercase = MPNetForQuestionAnswering(config=snake_case ) model.to(snake_case ) model.eval() lowercase = model( snake_case , attention_mask=snake_case , start_positions=snake_case , end_positions=snake_case , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): lowercase = self.num_labels lowercase = MPNetForSequenceClassification(snake_case ) model.to(snake_case ) model.eval() lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): lowercase = self.num_choices lowercase = MPNetForMultipleChoice(config=snake_case ) model.to(snake_case ) model.eval() lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase = model( snake_case , attention_mask=snake_case , labels=snake_case , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): lowercase = self.num_labels lowercase = MPNetForTokenClassification(config=snake_case ) model.to(snake_case ) model.eval() lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.prepare_config_and_inputs() ((lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase)) = config_and_inputs lowercase = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A_ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): '''simple docstring''' _UpperCamelCase : Optional[Any] = ( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) _UpperCamelCase : Optional[Any] = ( { """feature-extraction""": MPNetModel, """fill-mask""": MPNetForMaskedLM, """question-answering""": MPNetForQuestionAnswering, """text-classification""": MPNetForSequenceClassification, """token-classification""": MPNetForTokenClassification, """zero-shot""": MPNetForSequenceClassification, } if is_torch_available() else {} ) _UpperCamelCase : Tuple = False _UpperCamelCase : List[str] = True def SCREAMING_SNAKE_CASE__ ( self ): lowercase = MPNetModelTester(self ) lowercase = ConfigTester(self , config_class=snake_case , hidden_size=37 ) def SCREAMING_SNAKE_CASE__ ( self ): self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*snake_case ) @require_torch class A_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE__ ( self ): lowercase = MPNetModel.from_pretrained('microsoft/mpnet-base' ) lowercase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) lowercase = model(snake_case )[0] lowercase = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , snake_case ) lowercase = torch.tensor( [[[-0.0_550, 0.1_943, -0.0_740], [-0.0_562, 0.2_211, -0.0_579], [-0.0_437, 0.3_337, -0.0_641]]] ) # compare the actual values for a slice. self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1E-4 ) )
84
def _a ( lowerCamelCase ): if num < 0: return False lowerCamelCase : int = num lowerCamelCase : int = 0 while num > 0: lowerCamelCase : str = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
681
0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = { "Pillow": "Pillow", "accelerate": "accelerate>=0.11.0", "compel": "compel==0.1.8", "black": "black~=23.1", "datasets": "datasets", "filelock": "filelock", "flax": "flax>=0.4.1", "hf-doc-builder": "hf-doc-builder>=0.3.0", "huggingface-hub": "huggingface-hub>=0.13.2", "requests-mock": "requests-mock==1.10.0", "importlib_metadata": "importlib_metadata", "invisible-watermark": "invisible-watermark", "isort": "isort>=5.5.4", "jax": "jax>=0.2.8,!=0.3.2", "jaxlib": "jaxlib>=0.1.65", "Jinja2": "Jinja2", "k-diffusion": "k-diffusion>=0.0.12", "torchsde": "torchsde", "note_seq": "note_seq", "librosa": "librosa", "numpy": "numpy", "omegaconf": "omegaconf", "parameterized": "parameterized", "protobuf": "protobuf>=3.20.3,<4", "pytest": "pytest", "pytest-timeout": "pytest-timeout", "pytest-xdist": "pytest-xdist", "ruff": "ruff>=0.0.241", "safetensors": "safetensors", "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92", "scipy": "scipy", "onnx": "onnx", "regex": "regex!=2019.12.17", "requests": "requests", "tensorboard": "tensorboard", "torch": "torch>=1.4", "torchvision": "torchvision", "transformers": "transformers>=4.25.1", "urllib3": "urllib3<=2.0.0", }
85
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_torch_available from ...utils import OptionalDependencyNotAvailable _lowerCamelCase ={ """configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""], """tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase =[ """GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTNeoXJapaneseForCausalLM""", """GPTNeoXJapaneseLayer""", """GPTNeoXJapaneseModel""", """GPTNeoXJapanesePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox_japanese import ( GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseLayer, GPTNeoXJapaneseModel, GPTNeoXJapanesePreTrainedModel, ) else: import sys _lowerCamelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
681
0
import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def __snake_case ( __UpperCamelCase : Optional[int] ): """simple docstring""" if "img_encoder.pos_embed" in name: A_ = name.replace("img_encoder.pos_embed" ,"vision_model.embeddings.position_embeddings" ) if "img_encoder.patch_embed.proj" in name: A_ = name.replace("img_encoder.patch_embed.proj" ,"vision_model.embeddings.patch_embeddings.projection" ) if "img_encoder.patch_embed.norm" in name: A_ = name.replace("img_encoder.patch_embed.norm" ,"vision_model.embeddings.layernorm" ) if "img_encoder.layers" in name: A_ = name.replace("img_encoder.layers" ,"vision_model.encoder.stages" ) if "blocks" in name and "res" not in name: A_ = name.replace("blocks" ,"layers" ) if "attn" in name and "pre_assign" not in name: A_ = name.replace("attn" ,"self_attn" ) if "proj" in name and "self_attn" in name and "text" not in name: A_ = name.replace("proj" ,"out_proj" ) if "pre_assign_attn.attn.proj" in name: A_ = name.replace("pre_assign_attn.attn.proj" ,"pre_assign_attn.attn.out_proj" ) if "norm1" in name: A_ = name.replace("norm1" ,"layer_norm1" ) if "norm2" in name and "pre_assign" not in name: A_ = name.replace("norm2" ,"layer_norm2" ) if "img_encoder.norm" in name: A_ = name.replace("img_encoder.norm" ,"vision_model.layernorm" ) # text encoder if "text_encoder.token_embedding" in name: A_ = name.replace("text_encoder.token_embedding" ,"text_model.embeddings.token_embedding" ) if "text_encoder.positional_embedding" in name: A_ = name.replace("text_encoder.positional_embedding" ,"text_model.embeddings.position_embedding.weight" ) if "text_encoder.transformer.resblocks." in name: A_ = name.replace("text_encoder.transformer.resblocks." ,"text_model.encoder.layers." ) if "ln_1" in name: A_ = name.replace("ln_1" ,"layer_norm1" ) if "ln_2" in name: A_ = name.replace("ln_2" ,"layer_norm2" ) if "c_fc" in name: A_ = name.replace("c_fc" ,"fc1" ) if "c_proj" in name: A_ = name.replace("c_proj" ,"fc2" ) if "text_encoder" in name: A_ = name.replace("text_encoder" ,"text_model" ) if "ln_final" in name: A_ = name.replace("ln_final" ,"final_layer_norm" ) # projection layers if "img_projector.linear_hidden." in name: A_ = name.replace("img_projector.linear_hidden." ,"visual_projection." ) if "img_projector.linear_out." in name: A_ = name.replace("img_projector.linear_out." ,"visual_projection.3." ) if "text_projector.linear_hidden" in name: A_ = name.replace("text_projector.linear_hidden" ,"text_projection" ) if "text_projector.linear_out" in name: A_ = name.replace("text_projector.linear_out" ,"text_projection.3" ) return name def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ): """simple docstring""" for key in orig_state_dict.copy().keys(): A_ = orig_state_dict.pop(__UpperCamelCase ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors A_ = key.split("." ) A_ , A_ = int(key_split[2] ), int(key_split[4] ) A_ = config.vision_config.hidden_size if "weight" in key: A_ = val[:dim, :] A_ = val[dim : dim * 2, :] A_ = val[-dim:, :] else: A_ = val[:dim] A_ = val[dim : dim * 2] A_ = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors A_ = key.split("." ) A_ = int(key_split[3] ) A_ = config.text_config.hidden_size if "weight" in key: A_ = val[:dim, :] A_ = val[ dim : dim * 2, : ] A_ = val[-dim:, :] else: A_ = val[:dim] A_ = val[dim : dim * 2] A_ = val[-dim:] else: A_ = rename_key(__UpperCamelCase ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): A_ = val.squeeze_() else: A_ = val return orig_state_dict def __snake_case ( ): """simple docstring""" A_ = "http://images.cocodataset.org/val2017/000000039769.jpg" A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw ) return im @torch.no_grad() def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int]="groupvit-gcc-yfcc" ,__UpperCamelCase : List[Any]=False ): """simple docstring""" A_ = GroupViTConfig() A_ = GroupViTModel(__UpperCamelCase ).eval() A_ = torch.load(__UpperCamelCase ,map_location="cpu" )["model"] A_ = convert_state_dict(__UpperCamelCase ,__UpperCamelCase ) A_ , A_ = model.load_state_dict(__UpperCamelCase ,strict=__UpperCamelCase ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(__UpperCamelCase ) == 0) # verify result A_ = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" ) A_ = prepare_img() A_ = processor(text=["a photo of a cat", "a photo of a dog"] ,images=__UpperCamelCase ,padding=__UpperCamelCase ,return_tensors="pt" ) with torch.no_grad(): A_ = model(**__UpperCamelCase ) if model_name == "groupvit-gcc-yfcc": A_ = torch.tensor([[13.3523, 6.3629]] ) elif model_name == "groupvit-gcc-redcaps": A_ = torch.tensor([[16.1873, 8.6230]] ) else: raise ValueError(f'''Model name {model_name} not supported.''' ) assert torch.allclose(outputs.logits_per_image ,__UpperCamelCase ,atol=1E-3 ) processor.save_pretrained(__UpperCamelCase ) model.save_pretrained(__UpperCamelCase ) print("Successfully saved processor and model to" ,__UpperCamelCase ) if push_to_hub: print("Pushing to the hub..." ) processor.push_to_hub(__UpperCamelCase ,organization="nielsr" ) model.push_to_hub(__UpperCamelCase ,organization="nielsr" ) if __name__ == "__main__": __a :Tuple = argparse.ArgumentParser() parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.' ) parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint') parser.add_argument( '--model_name', default='groupvit-gccy-fcc', type=str, help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'', ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.', ) __a :int = parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
86
import copy import random from transformers import CLIPTokenizer class A__ ( __SCREAMING_SNAKE_CASE): def __init__( self , *__magic_name__ , **__magic_name__ ): super().__init__(*__magic_name__ , **__magic_name__ ) lowerCamelCase : Dict = {} def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , **__magic_name__ ): lowerCamelCase : Any = super().add_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ ) if num_added_tokens == 0: raise ValueError( F'''The tokenizer already contains the token {placeholder_token}. Please pass a different''' """ `placeholder_token` that is not already in the tokenizer.""" ) def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=1 , **__magic_name__ ): lowerCamelCase : List[Any] = [] if num_vec_per_token == 1: self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ ) output.append(__magic_name__ ) else: lowerCamelCase : Dict = [] for i in range(__magic_name__ ): lowerCamelCase : Optional[Any] = placeholder_token + F'''_{i}''' self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ ) output.append(__magic_name__ ) # handle cases where there is a new placeholder token that contains the current placeholder token but is larger for token in self.token_map: if token in placeholder_token: raise ValueError( F'''The tokenizer already has placeholder token {token} that can get confused with''' F''' {placeholder_token}keep placeholder tokens independent''' ) lowerCamelCase : Any = output def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=False , __magic_name__=1.0 ): if isinstance(__magic_name__ , __magic_name__ ): lowerCamelCase : List[str] = [] for i in range(len(__magic_name__ ) ): output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__magic_name__ ) ) return output for placeholder_token in self.token_map: if placeholder_token in text: lowerCamelCase : List[str] = self.token_map[placeholder_token] lowerCamelCase : Optional[Any] = tokens[: 1 + int(len(__magic_name__ ) * prop_tokens_to_load )] if vector_shuffle: lowerCamelCase : Union[str, Any] = copy.copy(__magic_name__ ) random.shuffle(__magic_name__ ) lowerCamelCase : str = text.replace(__magic_name__ , """ """.join(__magic_name__ ) ) return text def __call__( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ): return super().__call__( self.replace_placeholder_tokens_in_text( __magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , ) def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ): return super().encode( self.replace_placeholder_tokens_in_text( __magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , )
681
0
import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotSmallConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html _lowerCamelCase : List[Any] = """platform""" import jax import jax.numpy as jnp from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, shift_tokens_right, ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , ) -> Union[str, Any]: """simple docstring""" if attention_mask is None: A__ = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: A__ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: A__ = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: A__ = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: A__ = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class UpperCamelCase_ : '''simple docstring''' def __init__( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int=13 , UpperCAmelCase__ : Any=7 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : List[str]=False , UpperCAmelCase__ : Union[str, Any]=99 , UpperCAmelCase__ : List[str]=16 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[Any]="gelu" , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Union[str, Any]=32 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Dict=1 , UpperCAmelCase__ : str=0 , UpperCAmelCase__ : List[str]=0.02 , ) ->str: '''simple docstring''' A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = eos_token_id A__ = pad_token_id A__ = bos_token_id A__ = initializer_range def SCREAMING_SNAKE_CASE ( self : Tuple) ->Dict: '''simple docstring''' A__ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size) A__ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1) A__ = shift_tokens_right(UpperCAmelCase__ , 1 , 2) A__ = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCAmelCase__ , ) A__ = prepare_blenderbot_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) return config, inputs_dict def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]: '''simple docstring''' A__ , A__ = self.prepare_config_and_inputs() return config, inputs_dict def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]) ->Optional[int]: '''simple docstring''' A__ = 20 A__ = model_class_name(UpperCAmelCase__) A__ = model.encode(inputs_dict['''input_ids''']) A__ , A__ = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) A__ = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase__ , UpperCAmelCase__) A__ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''') A__ = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) A__ = model.decode( decoder_input_ids[:, :-1] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , ) A__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''') A__ = model.decode( decoder_input_ids[:, -1:] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase__ , ) A__ = model.decode(UpperCAmelCase__ , UpperCAmelCase__) A__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""") def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str]) ->str: '''simple docstring''' A__ = 20 A__ = model_class_name(UpperCAmelCase__) A__ = model.encode(inputs_dict['''input_ids''']) A__ , A__ = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) A__ = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])), ] , axis=-1 , ) A__ = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase__ , UpperCAmelCase__) A__ = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) A__ = model.decode( decoder_input_ids[:, :-1] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , ) A__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''') A__ = model.decode( decoder_input_ids[:, -1:] , UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , ) A__ = model.decode(UpperCAmelCase__ , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__) A__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""") @require_flax class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = 99 def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]: '''simple docstring''' A__ = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) A__ = input_ids.shape[0] A__ = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: '''simple docstring''' A__ , A__ , A__ = self._get_config_and_data() A__ = FlaxBlenderbotSmallForConditionalGeneration(UpperCAmelCase__) A__ = lm_model(input_ids=UpperCAmelCase__) A__ = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['''logits'''].shape , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]: '''simple docstring''' A__ = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) A__ = FlaxBlenderbotSmallForConditionalGeneration(UpperCAmelCase__) A__ = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa) A__ = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa) A__ = lm_model(input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__) A__ = (*summary.shape, config.vocab_size) self.assertEqual(outputs['''logits'''].shape , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int: '''simple docstring''' A__ = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa) A__ = shift_tokens_right(UpperCAmelCase__ , 1 , 2) A__ = np.equal(UpperCAmelCase__ , 1).astype(np.floataa).sum() A__ = np.equal(UpperCAmelCase__ , 1).astype(np.floataa).sum() self.assertEqual(shifted.shape , input_ids.shape) self.assertEqual(UpperCAmelCase__ , n_pad_before - 1) self.assertTrue(np.equal(shifted[:, 0] , 2).all()) @require_flax class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase , UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = True UpperCAmelCase__ = ( ( FlaxBlenderbotSmallModel, FlaxBlenderbotSmallForConditionalGeneration, ) if is_flax_available() else () ) UpperCAmelCase__ = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else () def SCREAMING_SNAKE_CASE ( self : str) ->Dict: '''simple docstring''' A__ = FlaxBlenderbotSmallModelTester(self) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[int]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): A__ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) A__ = model_class(UpperCAmelCase__) @jax.jit def encode_jitted(UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple=None , **UpperCAmelCase__ : Tuple): return model.encode(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__) with self.subTest('''JIT Enabled'''): A__ = encode_jitted(**UpperCAmelCase__).to_tuple() with self.subTest('''JIT Disabled'''): with jax.disable_jit(): A__ = encode_jitted(**UpperCAmelCase__).to_tuple() self.assertEqual(len(UpperCAmelCase__) , len(UpperCAmelCase__)) for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__): self.assertEqual(jitted_output.shape , output.shape) def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): A__ = model_class(UpperCAmelCase__) A__ = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask''']) A__ = { '''decoder_input_ids''': inputs_dict['''decoder_input_ids'''], '''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''], '''encoder_outputs''': encoder_outputs, } @jax.jit def decode_jitted(UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : Any): return model.decode( decoder_input_ids=UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , encoder_outputs=UpperCAmelCase__ , ) with self.subTest('''JIT Enabled'''): A__ = decode_jitted(**UpperCAmelCase__).to_tuple() with self.subTest('''JIT Disabled'''): with jax.disable_jit(): A__ = decode_jitted(**UpperCAmelCase__).to_tuple() self.assertEqual(len(UpperCAmelCase__) , len(UpperCAmelCase__)) for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__): self.assertEqual(jitted_output.shape , output.shape) @slow def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Tuple: '''simple docstring''' for model_class_name in self.all_model_classes: A__ = model_class_name.from_pretrained('''facebook/blenderbot_small-90M''') # FlaxBlenderbotForSequenceClassification expects eos token in input_ids A__ = np.ones((1, 1)) * model.config.eos_token_id A__ = model(UpperCAmelCase__) self.assertIsNotNone(UpperCAmelCase__)
87
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class A__ ( unittest.TestCase): def __init__( self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=1_8 , __magic_name__=3_0 , __magic_name__=4_0_0 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=[0.48_145_466, 0.4_578_275, 0.40_821_073] , __magic_name__=[0.26_862_954, 0.26_130_258, 0.27_577_711] , __magic_name__=True , ): lowerCamelCase : Union[str, Any] = size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4} lowerCamelCase : str = crop_size if crop_size is not None else {"""height""": 1_8, """width""": 1_8} lowerCamelCase : Optional[int] = parent lowerCamelCase : Union[str, Any] = batch_size lowerCamelCase : str = num_channels lowerCamelCase : Any = image_size lowerCamelCase : Optional[int] = min_resolution lowerCamelCase : Union[str, Any] = max_resolution lowerCamelCase : Union[str, Any] = do_resize lowerCamelCase : int = size lowerCamelCase : int = do_center_crop lowerCamelCase : Union[str, Any] = crop_size lowerCamelCase : Union[str, Any] = do_normalize lowerCamelCase : Dict = image_mean lowerCamelCase : Optional[Any] = image_std lowerCamelCase : Union[str, Any] = do_convert_rgb def UpperCamelCase__ ( self ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def UpperCamelCase__ ( self , __magic_name__=False , __magic_name__=False , __magic_name__=False ): assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: lowerCamelCase : Tuple = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) ) else: lowerCamelCase : Dict = [] for i in range(self.batch_size ): lowerCamelCase , lowerCamelCase : int = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 ) image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension lowerCamelCase : int = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs] if torchify: lowerCamelCase : int = [torch.from_numpy(__magic_name__ ) for x in image_inputs] return image_inputs @require_torch @require_vision class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase): _UpperCAmelCase : Any = ChineseCLIPImageProcessor if is_vision_available() else None def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = ChineseCLIPImageProcessingTester(self , do_center_crop=__magic_name__ ) @property def UpperCamelCase__ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) ) self.assertTrue(hasattr(__magic_name__ , """size""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_center_crop""" ) ) self.assertTrue(hasattr(__magic_name__ , """center_crop""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) ) self.assertTrue(hasattr(__magic_name__ , """image_mean""" ) ) self.assertTrue(hasattr(__magic_name__ , """image_std""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_convert_rgb""" ) ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 2_2_4, """width""": 2_2_4} ) self.assertEqual(image_processor.crop_size , {"""height""": 1_8, """width""": 1_8} ) lowerCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} ) self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): # Initialize image_processing lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , Image.Image ) # Test not batched input lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCamelCase : Optional[Any] = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def UpperCamelCase__ ( self ): # Initialize image_processing lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ , numpify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , np.ndarray ) # Test not batched input lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCamelCase : Tuple = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def UpperCamelCase__ ( self ): # Initialize image_processing lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase : Any = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ , torchify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , torch.Tensor ) # Test not batched input lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCamelCase : str = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) @require_torch @require_vision class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase): _UpperCAmelCase : Tuple = ChineseCLIPImageProcessor if is_vision_available() else None def UpperCamelCase__ ( self ): lowerCamelCase : Union[str, Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__magic_name__ ) lowerCamelCase : Any = 3 @property def UpperCamelCase__ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase__ ( self ): lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) ) self.assertTrue(hasattr(__magic_name__ , """size""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_center_crop""" ) ) self.assertTrue(hasattr(__magic_name__ , """center_crop""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) ) self.assertTrue(hasattr(__magic_name__ , """image_mean""" ) ) self.assertTrue(hasattr(__magic_name__ , """image_std""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_convert_rgb""" ) ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): # Initialize image_processing lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , Image.Image ) # Test not batched input lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCamelCase : Optional[Any] = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
681
0
"""simple docstring""" import argparse import shlex import runhouse as rh if __name__ == "__main__": # Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access # setup instructions, if using on-demand hardware # If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster # If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster # Throw an error if user passes both BYO and on-demand cluster args # Otherwise, use default values UpperCAmelCase = argparse.ArgumentParser() parser.add_argument("""--user""", type=str, default="""ubuntu""") parser.add_argument("""--host""", type=str, default="""localhost""") parser.add_argument("""--key_path""", type=str, default=None) parser.add_argument("""--instance""", type=str, default="""V100:1""") parser.add_argument("""--provider""", type=str, default="""cheapest""") parser.add_argument("""--use_spot""", type=bool, default=False) parser.add_argument("""--example""", type=str, default="""pytorch/text-generation/run_generation.py""") UpperCAmelCase , UpperCAmelCase = parser.parse_known_args() if args.host != "localhost": if args.instance != "V100:1" or args.provider != "cheapest": raise ValueError("""Cannot specify both BYO and on-demand cluster args""") UpperCAmelCase = rh.cluster( name="""rh-cluster""", ips=[args.host], ssh_creds={"""ssh_user""": args.user, """ssh_private_key""": args.key_path} ) else: UpperCAmelCase = rh.cluster( name="""rh-cluster""", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot ) UpperCAmelCase = args.example.rsplit("""/""", 1)[0] # Set up remote environment cluster.install_packages(["""pip:./"""]) # Installs transformers from local source # Note transformers is copied into the home directory on the remote machine, so we can install from there cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt''']) cluster.run(["""pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"""]) # Run example. You can bypass the CLI wrapper and paste your own code here. cluster.run([f'''python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}''']) # Alternatively, we can just import and run a training function (especially if there's no wrapper CLI): # from my_script... import train # reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard'] # launch_train_gpu = rh.function(fn=train, # system=gpu, # reqs=reqs, # name='train_bert_glue') # # We can pass in arguments just like we would to a function: # launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16 # stream_logs=True)
88
from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A__ : def __init__( self , __magic_name__ , __magic_name__=3 , __magic_name__=3_2 , __magic_name__=3 , __magic_name__=1_0 , __magic_name__=[1_0, 2_0, 3_0, 4_0] , __magic_name__=[1, 1, 2, 1] , __magic_name__=True , __magic_name__=True , __magic_name__="relu" , __magic_name__=3 , __magic_name__=None , ): lowerCamelCase : Tuple = parent lowerCamelCase : Tuple = batch_size lowerCamelCase : List[Any] = image_size lowerCamelCase : Optional[Any] = num_channels lowerCamelCase : Dict = embeddings_size lowerCamelCase : Optional[int] = hidden_sizes lowerCamelCase : Union[str, Any] = depths lowerCamelCase : Optional[Any] = is_training lowerCamelCase : Union[str, Any] = use_labels lowerCamelCase : Dict = hidden_act lowerCamelCase : Any = num_labels lowerCamelCase : int = scope lowerCamelCase : Optional[Any] = len(__magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase : Tuple = None if self.use_labels: lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) lowerCamelCase : Tuple = self.get_config() return config, pixel_values, labels def UpperCamelCase__ ( self ): return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ): lowerCamelCase : Dict = TFResNetModel(config=__magic_name__ ) lowerCamelCase : Tuple = model(__magic_name__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ): lowerCamelCase : str = self.num_labels lowerCamelCase : Dict = TFResNetForImageClassification(__magic_name__ ) lowerCamelCase : Union[str, Any] = model(__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = self.prepare_config_and_inputs() lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = config_and_inputs lowerCamelCase : List[str] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase): _UpperCAmelCase : Any = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () _UpperCAmelCase : List[str] = ( {"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification} if is_tf_available() else {} ) _UpperCAmelCase : Optional[Any] = False _UpperCAmelCase : Optional[Any] = False _UpperCAmelCase : Dict = False _UpperCAmelCase : List[Any] = False _UpperCAmelCase : Any = False def UpperCamelCase__ ( self ): lowerCamelCase : int = TFResNetModelTester(self ) lowerCamelCase : str = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ ) def UpperCamelCase__ ( self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase__ ( self ): return @unittest.skip(reason="""ResNet does not use inputs_embeds""" ) def UpperCamelCase__ ( self ): pass @unittest.skip(reason="""ResNet does not support input and output embeddings""" ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase : List[str] = model_class(__magic_name__ ) lowerCamelCase : str = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase : Tuple = [*signature.parameters.keys()] lowerCamelCase : List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCamelCase__ ( self ): def check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ): lowerCamelCase : Any = model_class(__magic_name__ ) lowerCamelCase : List[Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) lowerCamelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase : Union[str, Any] = self.model_tester.num_stages self.assertEqual(len(__magic_name__ ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : Tuple = ["""basic""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: lowerCamelCase : Union[str, Any] = layer_type lowerCamelCase : str = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase : int = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__magic_name__ ) @slow def UpperCamelCase__ ( self ): for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase : Any = TFResNetModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def _a ( ): lowerCamelCase : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class A__ ( unittest.TestCase): @cached_property def UpperCamelCase__ ( self ): return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) lowerCamelCase : List[str] = self.default_image_processor lowerCamelCase : str = prepare_img() lowerCamelCase : Tuple = image_processor(images=__magic_name__ , return_tensors="""tf""" ) # forward pass lowerCamelCase : Tuple = model(**__magic_name__ ) # verify the logits lowerCamelCase : Optional[Any] = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , __magic_name__ ) lowerCamelCase : Optional[Any] = tf.constant([-11.1_069, -9.7_877, -8.3_777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __magic_name__ , atol=1e-4 ) )
681
0
import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class _lowerCamelCase( _a ): def __init__( self, *lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None, **lowerCamelCase) -> List[Any]: """simple docstring""" super().__init__(*lowerCamelCase, **lowerCamelCase) _lowercase : Any = eval_examples _lowercase : List[Any] = post_process_function def UpperCamelCase ( self, lowerCamelCase = None, lowerCamelCase=None, lowerCamelCase = None, lowerCamelCase = "eval", **lowerCamelCase, ) -> Dict[str, float]: """simple docstring""" _lowercase : Optional[Any] = gen_kwargs.copy() _lowercase : List[Any] = ( gen_kwargs['max_length'] if gen_kwargs.get('max_length') is not None else self.args.generation_max_length ) _lowercase : Any = ( gen_kwargs['num_beams'] if gen_kwargs.get('num_beams') is not None else self.args.generation_num_beams ) _lowercase : Optional[Any] = gen_kwargs _lowercase : Optional[int] = self.eval_dataset if eval_dataset is None else eval_dataset _lowercase : Optional[int] = self.get_eval_dataloader(lowerCamelCase) _lowercase : Optional[Any] = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _lowercase : List[Any] = self.compute_metrics _lowercase : int = None _lowercase : Optional[Any] = time.time() _lowercase : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _lowercase : int = eval_loop( lowerCamelCase, description='Evaluation', prediction_loss_only=True if compute_metrics is None else None, ignore_keys=lowerCamelCase, metric_key_prefix=lowerCamelCase, ) finally: _lowercase : Union[str, Any] = compute_metrics _lowercase : Optional[int] = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( lowerCamelCase, lowerCamelCase, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size), )) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default _lowercase : Dict = self.post_process_function(lowerCamelCase, lowerCamelCase, lowerCamelCase) _lowercase : List[Any] = self.compute_metrics(lowerCamelCase) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys()): if not key.startswith(F'''{metric_key_prefix}_'''): _lowercase : Optional[int] = metrics.pop(lowerCamelCase) metrics.update(output.metrics) else: _lowercase : Dict = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(lowerCamelCase) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report()) _lowercase : List[str] = self.callback_handler.on_evaluate(self.args, self.state, self.control, lowerCamelCase) return metrics def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, lowerCamelCase = "test", **lowerCamelCase) -> List[str]: """simple docstring""" _lowercase : str = gen_kwargs.copy() _lowercase : str = self.get_test_dataloader(lowerCamelCase) # Temporarily disable metric computation, we will do it in the loop here. _lowercase : List[str] = self.compute_metrics _lowercase : Any = None _lowercase : str = time.time() _lowercase : Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _lowercase : str = eval_loop( lowerCamelCase, description='Prediction', prediction_loss_only=True if compute_metrics is None else None, ignore_keys=lowerCamelCase, metric_key_prefix=lowerCamelCase, ) finally: _lowercase : int = compute_metrics _lowercase : int = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( lowerCamelCase, lowerCamelCase, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size), )) if self.post_process_function is None or self.compute_metrics is None: return output _lowercase : Dict = self.post_process_function(lowerCamelCase, lowerCamelCase, lowerCamelCase, 'predict') _lowercase : Optional[Any] = self.compute_metrics(lowerCamelCase) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys()): if not key.startswith(F'''{metric_key_prefix}_'''): _lowercase : Optional[Any] = metrics.pop(lowerCamelCase) metrics.update(output.metrics) return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=lowerCamelCase)
89
import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ): # Initialise PyTorch model lowerCamelCase : str = MobileBertConfig.from_json_file(lowerCamelCase ) print(F'''Building PyTorch model from configuration: {config}''' ) lowerCamelCase : Tuple = MobileBertForPreTraining(lowerCamelCase ) # Load weights from tf checkpoint lowerCamelCase : Tuple = load_tf_weights_in_mobilebert(lowerCamelCase, lowerCamelCase, lowerCamelCase ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict(), lowerCamelCase ) if __name__ == "__main__": _lowerCamelCase =argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--mobilebert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained MobileBERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) _lowerCamelCase =parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
681
0
'''simple docstring''' __UpperCAmelCase = '''0.21.0''' from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
90
import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def _a ( lowerCamelCase ): # vision encoder if "img_encoder.pos_embed" in name: lowerCamelCase : Tuple = name.replace("""img_encoder.pos_embed""", """vision_model.embeddings.position_embeddings""" ) if "img_encoder.patch_embed.proj" in name: lowerCamelCase : Union[str, Any] = name.replace("""img_encoder.patch_embed.proj""", """vision_model.embeddings.patch_embeddings.projection""" ) if "img_encoder.patch_embed.norm" in name: lowerCamelCase : Optional[int] = name.replace("""img_encoder.patch_embed.norm""", """vision_model.embeddings.layernorm""" ) if "img_encoder.layers" in name: lowerCamelCase : List[str] = name.replace("""img_encoder.layers""", """vision_model.encoder.stages""" ) if "blocks" in name and "res" not in name: lowerCamelCase : List[Any] = name.replace("""blocks""", """layers""" ) if "attn" in name and "pre_assign" not in name: lowerCamelCase : Optional[int] = name.replace("""attn""", """self_attn""" ) if "proj" in name and "self_attn" in name and "text" not in name: lowerCamelCase : Optional[int] = name.replace("""proj""", """out_proj""" ) if "pre_assign_attn.attn.proj" in name: lowerCamelCase : Any = name.replace("""pre_assign_attn.attn.proj""", """pre_assign_attn.attn.out_proj""" ) if "norm1" in name: lowerCamelCase : Optional[Any] = name.replace("""norm1""", """layer_norm1""" ) if "norm2" in name and "pre_assign" not in name: lowerCamelCase : Union[str, Any] = name.replace("""norm2""", """layer_norm2""" ) if "img_encoder.norm" in name: lowerCamelCase : Optional[int] = name.replace("""img_encoder.norm""", """vision_model.layernorm""" ) # text encoder if "text_encoder.token_embedding" in name: lowerCamelCase : int = name.replace("""text_encoder.token_embedding""", """text_model.embeddings.token_embedding""" ) if "text_encoder.positional_embedding" in name: lowerCamelCase : Optional[Any] = name.replace("""text_encoder.positional_embedding""", """text_model.embeddings.position_embedding.weight""" ) if "text_encoder.transformer.resblocks." in name: lowerCamelCase : Optional[Any] = name.replace("""text_encoder.transformer.resblocks.""", """text_model.encoder.layers.""" ) if "ln_1" in name: lowerCamelCase : Optional[Any] = name.replace("""ln_1""", """layer_norm1""" ) if "ln_2" in name: lowerCamelCase : str = name.replace("""ln_2""", """layer_norm2""" ) if "c_fc" in name: lowerCamelCase : Any = name.replace("""c_fc""", """fc1""" ) if "c_proj" in name: lowerCamelCase : Tuple = name.replace("""c_proj""", """fc2""" ) if "text_encoder" in name: lowerCamelCase : List[str] = name.replace("""text_encoder""", """text_model""" ) if "ln_final" in name: lowerCamelCase : Tuple = name.replace("""ln_final""", """final_layer_norm""" ) # projection layers if "img_projector.linear_hidden." in name: lowerCamelCase : Optional[int] = name.replace("""img_projector.linear_hidden.""", """visual_projection.""" ) if "img_projector.linear_out." in name: lowerCamelCase : Tuple = name.replace("""img_projector.linear_out.""", """visual_projection.3.""" ) if "text_projector.linear_hidden" in name: lowerCamelCase : Tuple = name.replace("""text_projector.linear_hidden""", """text_projection""" ) if "text_projector.linear_out" in name: lowerCamelCase : Tuple = name.replace("""text_projector.linear_out""", """text_projection.3""" ) return name def _a ( lowerCamelCase, lowerCamelCase ): for key in orig_state_dict.copy().keys(): lowerCamelCase : Tuple = orig_state_dict.pop(lowerCamelCase ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors lowerCamelCase : Any = key.split(""".""" ) lowerCamelCase , lowerCamelCase : Optional[Any] = int(key_split[2] ), int(key_split[4] ) lowerCamelCase : List[Any] = config.vision_config.hidden_size if "weight" in key: lowerCamelCase : int = val[:dim, :] lowerCamelCase : List[str] = val[dim : dim * 2, :] lowerCamelCase : Dict = val[-dim:, :] else: lowerCamelCase : List[Any] = val[:dim] lowerCamelCase : List[Any] = val[dim : dim * 2] lowerCamelCase : Tuple = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors lowerCamelCase : str = key.split(""".""" ) lowerCamelCase : Optional[int] = int(key_split[3] ) lowerCamelCase : List[str] = config.text_config.hidden_size if "weight" in key: lowerCamelCase : Optional[int] = val[:dim, :] lowerCamelCase : Any = val[ dim : dim * 2, : ] lowerCamelCase : Optional[Any] = val[-dim:, :] else: lowerCamelCase : Union[str, Any] = val[:dim] lowerCamelCase : Optional[int] = val[dim : dim * 2] lowerCamelCase : Union[str, Any] = val[-dim:] else: lowerCamelCase : List[Any] = rename_key(lowerCamelCase ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): lowerCamelCase : Any = val.squeeze_() else: lowerCamelCase : Union[str, Any] = val return orig_state_dict def _a ( ): lowerCamelCase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase : List[str] = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw ) return im @torch.no_grad() def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase="groupvit-gcc-yfcc", lowerCamelCase=False ): lowerCamelCase : int = GroupViTConfig() lowerCamelCase : Dict = GroupViTModel(lowerCamelCase ).eval() lowerCamelCase : Optional[int] = torch.load(lowerCamelCase, map_location="""cpu""" )["""model"""] lowerCamelCase : Tuple = convert_state_dict(lowerCamelCase, lowerCamelCase ) lowerCamelCase , lowerCamelCase : Tuple = model.load_state_dict(lowerCamelCase, strict=lowerCamelCase ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCamelCase ) == 0) # verify result lowerCamelCase : int = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" ) lowerCamelCase : int = prepare_img() lowerCamelCase : int = processor(text=["""a photo of a cat""", """a photo of a dog"""], images=lowerCamelCase, padding=lowerCamelCase, return_tensors="""pt""" ) with torch.no_grad(): lowerCamelCase : int = model(**lowerCamelCase ) if model_name == "groupvit-gcc-yfcc": lowerCamelCase : Any = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] ) elif model_name == "groupvit-gcc-redcaps": lowerCamelCase : Any = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] ) else: raise ValueError(F'''Model name {model_name} not supported.''' ) assert torch.allclose(outputs.logits_per_image, lowerCamelCase, atol=1e-3 ) processor.save_pretrained(lowerCamelCase ) model.save_pretrained(lowerCamelCase ) print("""Successfully saved processor and model to""", lowerCamelCase ) if push_to_hub: print("""Pushing to the hub...""" ) processor.push_to_hub(lowerCamelCase, organization="""nielsr""" ) model.push_to_hub(lowerCamelCase, organization="""nielsr""" ) if __name__ == "__main__": _lowerCamelCase =argparse.ArgumentParser() parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model.""" ) parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""") parser.add_argument( """--model_name""", default="""groupvit-gccy-fcc""", type=str, help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""", ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""", ) _lowerCamelCase =parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
681
0
"""simple docstring""" import os import zipfile import pytest from datasets.utils.extract import ( BzipaExtractor, Extractor, GzipExtractor, LzaExtractor, SevenZipExtractor, TarExtractor, XzExtractor, ZipExtractor, ZstdExtractor, ) from .utils import require_lza, require_pyazr, require_zstandard @pytest.mark.parametrize( 'compression_format, is_archive' , [ ('7z', True), ('bz2', False), ('gzip', False), ('lz4', False), ('tar', True), ('xz', False), ('zip', True), ('zstd', False), ] , ) def _snake_case ( snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Optional[int] , ): A = { '7z': (seven_zip_file, SevenZipExtractor), 'bz2': (bza_file, BzipaExtractor), 'gzip': (gz_file, GzipExtractor), 'lz4': (lza_file, LzaExtractor), 'tar': (tar_file, TarExtractor), 'xz': (xz_file, XzExtractor), 'zip': (zip_file, ZipExtractor), 'zstd': (zstd_file, ZstdExtractor), } A , A = input_paths_and_base_extractors[compression_format] if input_path is None: A = F'for \'{compression_format}\' compression_format, ' if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(snake_case__ ) assert base_extractor.is_extractable(snake_case__ ) A = tmp_path / ('extracted' if is_archive else 'extracted.txt') base_extractor.extract(snake_case__ , snake_case__ ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name A = file_path.read_text(encoding='utf-8' ) else: A = output_path.read_text(encoding='utf-8' ) A = text_file.read_text(encoding='utf-8' ) assert extracted_file_content == expected_file_content @pytest.mark.parametrize( 'compression_format, is_archive' , [ ('7z', True), ('bz2', False), ('gzip', False), ('lz4', False), ('tar', True), ('xz', False), ('zip', True), ('zstd', False), ] , ) def _snake_case ( snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : List[Any] , ): A = { '7z': seven_zip_file, 'bz2': bza_file, 'gzip': gz_file, 'lz4': lza_file, 'tar': tar_file, 'xz': xz_file, 'zip': zip_file, 'zstd': zstd_file, } A = input_paths[compression_format] if input_path is None: A = F'for \'{compression_format}\' compression_format, ' if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(snake_case__ ) A = Extractor.infer_extractor_format(snake_case__ ) assert extractor_format is not None A = tmp_path / ('extracted' if is_archive else 'extracted.txt') Extractor.extract(snake_case__ , snake_case__ , snake_case__ ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name A = file_path.read_text(encoding='utf-8' ) else: A = output_path.read_text(encoding='utf-8' ) A = text_file.read_text(encoding='utf-8' ) assert extracted_file_content == expected_file_content @pytest.fixture def _snake_case ( snake_case__ : List[str] , snake_case__ : Dict ): import tarfile A = tmp_path / 'data_dot_dot' directory.mkdir() A = directory / 'tar_file_with_dot_dot.tar' with tarfile.TarFile(snake_case__ , 'w' ) as f: f.add(snake_case__ , arcname=os.path.join('..' , text_file.name ) ) return path @pytest.fixture def _snake_case ( snake_case__ : List[str] ): import tarfile A = tmp_path / 'data_sym_link' directory.mkdir() A = directory / 'tar_file_with_sym_link.tar' os.symlink('..' , directory / 'subdir' , target_is_directory=snake_case__ ) with tarfile.TarFile(snake_case__ , 'w' ) as f: f.add(str(directory / 'subdir' ) , arcname='subdir' ) # str required by os.readlink on Windows and Python < 3.8 return path @pytest.mark.parametrize( 'insecure_tar_file, error_log' , [('tar_file_with_dot_dot', 'illegal path'), ('tar_file_with_sym_link', 'Symlink')] , ) def _snake_case ( snake_case__ : str , snake_case__ : int , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : str ): A = { 'tar_file_with_dot_dot': tar_file_with_dot_dot, 'tar_file_with_sym_link': tar_file_with_sym_link, } A = insecure_tar_files[insecure_tar_file] A = tmp_path / 'extracted' TarExtractor.extract(snake_case__ , snake_case__ ) assert caplog.text for record in caplog.records: assert record.levelname == "ERROR" assert error_log in record.msg def _snake_case ( snake_case__ : Dict ): # We should have less false positives than zipfile.is_zipfile # We do that by checking only the magic number A = tmpdir / 'not_a_zip_file' # From: https://github.com/python/cpython/pull/5053 A = ( B'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00' B'\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I' B'DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07' B'\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82' ) with not_a_zip_file.open('wb' ) as f: f.write(snake_case__ ) assert zipfile.is_zipfile(str(snake_case__ ) ) # is a false positive for `zipfile` assert not ZipExtractor.is_extractable(snake_case__ ) # but we're right
91
from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class A__ : # setable values _UpperCAmelCase : Optional[int] = None _UpperCAmelCase : Optional[jnp.ndarray] = None _UpperCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i) @classmethod def UpperCamelCase__ ( cls ): return cls() @dataclass class A__ ( __SCREAMING_SNAKE_CASE): _UpperCAmelCase : jnp.ndarray _UpperCAmelCase : jnp.ndarray _UpperCAmelCase : KarrasVeSchedulerState class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): @property def UpperCamelCase__ ( self ): return True @register_to_config def __init__( self , __magic_name__ = 0.02 , __magic_name__ = 1_0_0 , __magic_name__ = 1.007 , __magic_name__ = 8_0 , __magic_name__ = 0.05 , __magic_name__ = 5_0 , ): pass def UpperCamelCase__ ( self ): return KarrasVeSchedulerState.create() def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = () ): lowerCamelCase : Dict = jnp.arange(0 , __magic_name__ )[::-1].copy() lowerCamelCase : int = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=__magic_name__ , schedule=jnp.array(__magic_name__ , dtype=jnp.floataa ) , timesteps=__magic_name__ , ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ): if self.config.s_min <= sigma <= self.config.s_max: lowerCamelCase : Dict = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 ) else: lowerCamelCase : Dict = 0 # sample eps ~ N(0, S_noise^2 * I) lowerCamelCase : List[Any] = random.split(__magic_name__ , num=1 ) lowerCamelCase : Union[str, Any] = self.config.s_noise * random.normal(key=__magic_name__ , shape=sample.shape ) lowerCamelCase : List[Any] = sigma + gamma * sigma lowerCamelCase : str = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = True , ): lowerCamelCase : Optional[Any] = sample_hat + sigma_hat * model_output lowerCamelCase : Dict = (sample_hat - pred_original_sample) / sigma_hat lowerCamelCase : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=__magic_name__ , derivative=__magic_name__ , state=__magic_name__ ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = True , ): lowerCamelCase : str = sample_prev + sigma_prev * model_output lowerCamelCase : str = (sample_prev - pred_original_sample) / sigma_prev lowerCamelCase : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=__magic_name__ , derivative=__magic_name__ , state=__magic_name__ ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): raise NotImplementedError()
681
0
'''simple docstring''' import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() UpperCamelCase_ = logging.get_logger("""transformers.models.speecht5""") UpperCamelCase_ = { """speech_encoder_prenet.layer_norm""": """speecht5.encoder.prenet.feature_projection.layer_norm""", """speech_encoder_prenet.post_extract_proj""": """speecht5.encoder.prenet.feature_projection.projection""", """speech_encoder_prenet.pos_conv.0""": """speecht5.encoder.prenet.pos_conv_embed.conv""", """speech_encoder_prenet.mask_emb""": """speecht5.encoder.prenet.masked_spec_embed""", } UpperCamelCase_ = { """text_encoder_prenet.encoder_prenet.0""": """speecht5.encoder.prenet.embed_tokens""", """text_encoder_prenet.encoder_prenet.1.alpha""": """speecht5.encoder.prenet.encode_positions.alpha""", } UpperCamelCase_ = { """speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0""": """speecht5.decoder.prenet.layers.0""", """speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0""": """speecht5.decoder.prenet.layers.1""", """speech_decoder_prenet.decoder_prenet.0.1""": """speecht5.decoder.prenet.final_layer""", """speech_decoder_prenet.decoder_prenet.1.alpha""": """speecht5.decoder.prenet.encode_positions.alpha""", """speech_decoder_prenet.spkembs_layer.0""": """speecht5.decoder.prenet.speaker_embeds_layer""", } UpperCamelCase_ = { """speech_decoder_postnet.feat_out""": """speech_decoder_postnet.feat_out""", """speech_decoder_postnet.prob_out""": """speech_decoder_postnet.prob_out""", """speech_decoder_postnet.postnet.postnet.0.0""": """speech_decoder_postnet.layers.0.conv""", """speech_decoder_postnet.postnet.postnet.0.1""": """speech_decoder_postnet.layers.0.batch_norm""", """speech_decoder_postnet.postnet.postnet.1.0""": """speech_decoder_postnet.layers.1.conv""", """speech_decoder_postnet.postnet.postnet.1.1""": """speech_decoder_postnet.layers.1.batch_norm""", """speech_decoder_postnet.postnet.postnet.2.0""": """speech_decoder_postnet.layers.2.conv""", """speech_decoder_postnet.postnet.postnet.2.1""": """speech_decoder_postnet.layers.2.batch_norm""", """speech_decoder_postnet.postnet.postnet.3.0""": """speech_decoder_postnet.layers.3.conv""", """speech_decoder_postnet.postnet.postnet.3.1""": """speech_decoder_postnet.layers.3.batch_norm""", """speech_decoder_postnet.postnet.postnet.4.0""": """speech_decoder_postnet.layers.4.conv""", """speech_decoder_postnet.postnet.postnet.4.1""": """speech_decoder_postnet.layers.4.batch_norm""", } UpperCamelCase_ = { """text_decoder_prenet.embed_tokens""": """speecht5.decoder.prenet.embed_tokens""", } UpperCamelCase_ = { """text_decoder_postnet.output_projection""": """text_decoder_postnet.lm_head""", } UpperCamelCase_ = { """encoder.layers.*.self_attn.k_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj""", """encoder.layers.*.self_attn.v_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj""", """encoder.layers.*.self_attn.q_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj""", """encoder.layers.*.self_attn.out_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj""", """encoder.layers.*.self_attn_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.layer_norm""", """encoder.layers.*.fc1""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense""", """encoder.layers.*.fc2""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense""", """encoder.layers.*.final_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """speecht5.encoder.wrapped_encoder.layer_norm""", """encoder.pos_emb.pe_k""": """speecht5.encoder.wrapped_encoder.embed_positions.pe_k""", } UpperCamelCase_ = { """decoder.layers.*.self_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj""", """decoder.layers.*.self_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj""", """decoder.layers.*.self_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj""", """decoder.layers.*.self_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj""", """decoder.layers.*.self_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm""", """decoder.layers.*.encoder_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj""", """decoder.layers.*.encoder_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj""", """decoder.layers.*.encoder_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj""", """decoder.layers.*.encoder_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj""", """decoder.layers.*.encoder_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm""", """decoder.layers.*.fc1""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense""", """decoder.layers.*.fc2""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense""", """decoder.layers.*.final_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm""", } UpperCamelCase_ = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } UpperCamelCase_ = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } UpperCamelCase_ = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } UpperCamelCase_ = [] UpperCamelCase_ = [ """encoder.version""", """encoder.layers.*.norm_k.weight""", """encoder.layers.*.norm_k.bias""", """decoder.version""", """decoder.layers.*.norm_k.weight""", """decoder.layers.*.norm_k.bias""", """decoder.pos_emb.pe_k""", """speech_encoder_prenet.embed_positions._float_tensor""", """text_decoder_prenet.embed_positions._float_tensor""", ] UpperCamelCase_ = IGNORE_KEYS + [ """encoder.proj""", """text_encoder_prenet.*""", """speech_decoder_prenet.*""", """speech_decoder_postnet.*""", ] UpperCamelCase_ = IGNORE_KEYS + [ """encoder.proj""", """speech_encoder_prenet.*""", """text_decoder_prenet.*""", """text_decoder_postnet.*""", ] UpperCamelCase_ = IGNORE_KEYS + [ """encoder.proj""", """text_encoder_prenet.*""", """text_decoder_prenet.*""", """text_decoder_postnet.*""", ] def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : Tuple ) -> List[Any]: for attribute in key.split('''.''' ): lowercase : Any =getattr(__magic_name__ , __magic_name__ ) if weight_type is not None: lowercase : Any =getattr(__magic_name__ , __magic_name__ ).shape else: lowercase : int =hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowercase : Optional[Any] =value elif weight_type == "weight_g": lowercase : List[str] =value elif weight_type == "weight_v": lowercase : Any =value elif weight_type == "bias": lowercase : Optional[int] =value elif weight_type == "running_mean": lowercase : List[str] =value elif weight_type == "running_var": lowercase : List[str] =value elif weight_type == "num_batches_tracked": lowercase : int =value else: lowercase : List[Any] =value logger.info(f'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' ) def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] ) -> Dict: for key in ignore_keys: if key.endswith('''.*''' ): if name.startswith(key[:-1] ): return True elif ".*." in key: lowercase , lowercase : int =key.split('''.*.''' ) if prefix in name and suffix in name: return True elif key in name: return True return False def _lowerCAmelCase ( __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Any ) -> Optional[Any]: lowercase : str =[] if task == "s2t": lowercase : List[Any] =hf_model.speechta.encoder.prenet.feature_encoder lowercase : Optional[int] =MAPPING_S2T lowercase : Optional[Any] =IGNORE_KEYS_S2T elif task == "t2s": lowercase : Optional[Any] =None lowercase : Union[str, Any] =MAPPING_T2S lowercase : List[Any] =IGNORE_KEYS_T2S elif task == "s2s": lowercase : Union[str, Any] =hf_model.speechta.encoder.prenet.feature_encoder lowercase : str =MAPPING_S2S lowercase : List[str] =IGNORE_KEYS_S2S else: raise ValueError(f'''Unsupported task: {task}''' ) for name, value in fairseq_dict.items(): if should_ignore(__magic_name__ , __magic_name__ ): logger.info(f'''{name} was ignored''' ) continue lowercase : str =False if "conv_layers" in name: load_conv_layer( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == '''group''' , ) lowercase : Optional[Any] =True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: lowercase , lowercase : Optional[int] =key.split('''.*.''' ) if prefix in name and suffix in name: lowercase : List[str] =suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: lowercase : Dict =True if "*" in mapped_key: lowercase : Optional[Any] =name.split(__magic_name__ )[0].split('''.''' )[-2] lowercase : str =mapped_key.replace('''*''' , __magic_name__ ) if "weight_g" in name: lowercase : int ='''weight_g''' elif "weight_v" in name: lowercase : Tuple ='''weight_v''' elif "bias" in name: lowercase : Tuple ='''bias''' elif "weight" in name: lowercase : Dict ='''weight''' elif "running_mean" in name: lowercase : Any ='''running_mean''' elif "running_var" in name: lowercase : List[str] ='''running_var''' elif "num_batches_tracked" in name: lowercase : str ='''num_batches_tracked''' else: lowercase : Optional[int] =None set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) continue if not is_used: unused_weights.append(__magic_name__ ) logger.warning(f'''Unused weights: {unused_weights}''' ) def _lowerCAmelCase ( __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Dict ) -> Optional[int]: lowercase : Tuple =full_name.split('''conv_layers.''' )[-1] lowercase : Dict =name.split('''.''' ) lowercase : Union[str, Any] =int(items[0] ) lowercase : int =int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) lowercase : Any =value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) lowercase : str =value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) lowercase : List[str] =value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) lowercase : Any =value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__magic_name__ ) @torch.no_grad() def _lowerCAmelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : Union[str, Any]=None , __magic_name__ : List[str]=None , __magic_name__ : Optional[Any]=None , ) -> Optional[int]: if config_path is not None: lowercase : int =SpeechTaConfig.from_pretrained(__magic_name__ ) else: lowercase : str =SpeechTaConfig() if task == "s2t": lowercase : List[str] =config.max_text_positions lowercase : Union[str, Any] =SpeechTaForSpeechToText(__magic_name__ ) elif task == "t2s": lowercase : Optional[int] =1876 lowercase : Any =600 lowercase : List[str] =config.max_speech_positions lowercase : List[str] =SpeechTaForTextToSpeech(__magic_name__ ) elif task == "s2s": lowercase : Tuple =1876 lowercase : Optional[int] =config.max_speech_positions lowercase : List[Any] =SpeechTaForSpeechToSpeech(__magic_name__ ) else: raise ValueError(f'''Unknown task name: {task}''' ) if vocab_path: lowercase : Any =SpeechTaTokenizer(__magic_name__ , model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it lowercase : str =AddedToken('''<mask>''' , lstrip=__magic_name__ , rstrip=__magic_name__ ) lowercase : Tuple =mask_token tokenizer.add_special_tokens({'''mask_token''': mask_token} ) tokenizer.add_tokens(['''<ctc_blank>'''] ) lowercase : Tuple =SpeechTaFeatureExtractor() lowercase : Union[str, Any] =SpeechTaProcessor(tokenizer=__magic_name__ , feature_extractor=__magic_name__ ) processor.save_pretrained(__magic_name__ ) lowercase : List[str] =torch.load(__magic_name__ ) recursively_load_weights(fairseq_checkpoint['''model'''] , __magic_name__ , __magic_name__ ) model.save_pretrained(__magic_name__ ) if repo_id: print('''Pushing to the hub...''' ) processor.push_to_hub(__magic_name__ ) model.push_to_hub(__magic_name__ ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument( """--task""", default="""s2t""", type=str, help="""Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.""", ) parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--vocab_path""", default=None, type=str, help="""Path to SentencePiece model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) UpperCamelCase_ = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
92
from itertools import product from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros def _a ( lowerCamelCase, lowerCamelCase ): lowerCamelCase : List[str] = k_size // 2 lowerCamelCase , lowerCamelCase : Optional[int] = mgrid[0 - center : k_size - center, 0 - center : k_size - center] lowerCamelCase : Optional[Any] = 1 / (2 * pi * sigma) * exp(-(square(lowerCamelCase ) + square(lowerCamelCase )) / (2 * square(lowerCamelCase )) ) return g def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ): lowerCamelCase , lowerCamelCase : Union[str, Any] = image.shape[0], image.shape[1] # dst image height and width lowerCamelCase : Dict = height - k_size + 1 lowerCamelCase : str = width - k_size + 1 # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows lowerCamelCase : Tuple = zeros((dst_height * dst_width, k_size * k_size) ) lowerCamelCase : List[Any] = 0 for i, j in product(range(lowerCamelCase ), range(lowerCamelCase ) ): lowerCamelCase : Dict = ravel(image[i : i + k_size, j : j + k_size] ) lowerCamelCase : Union[str, Any] = window row += 1 # turn the kernel into shape(k*k, 1) lowerCamelCase : Dict = gen_gaussian_kernel(lowerCamelCase, lowerCamelCase ) lowerCamelCase : str = ravel(lowerCamelCase ) # reshape and get the dst image lowerCamelCase : List[str] = dot(lowerCamelCase, lowerCamelCase ).reshape(lowerCamelCase, lowerCamelCase ).astype(lowerCamelCase ) return dst if __name__ == "__main__": # read original image _lowerCamelCase =imread(R"""../image_data/lena.jpg""") # turn image in gray scale value _lowerCamelCase =cvtColor(img, COLOR_BGR2GRAY) # get values with two different mask size _lowerCamelCase =gaussian_filter(gray, 3, sigma=1) _lowerCamelCase =gaussian_filter(gray, 5, sigma=0.8) # show result images imshow("""gaussian filter with 3x3 mask""", gaussianaxa) imshow("""gaussian filter with 5x5 mask""", gaussianaxa) waitKey()
681
0
"""simple docstring""" from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import BaseOutput, is_torch_available, is_transformers_available @dataclass class _lowerCAmelCase ( a ): """simple docstring""" __magic_name__ :Union[List[PIL.Image.Image], np.ndarray] __magic_name__ :Optional[List[bool]] if is_transformers_available() and is_torch_available(): from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
93
import pytest _lowerCamelCase ="""__dummy_dataset1__""" _lowerCamelCase =""" import json import os import datasets REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\" URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"} class __DummyDataset1__(datasets.GeneratorBasedBuilder): def _info(self): features = datasets.Features( { \"tokens\": datasets.Sequence(datasets.Value(\"string\")), \"ner_tags\": datasets.Sequence( datasets.features.ClassLabel( names=[ \"O\", \"B-PER\", \"I-PER\", \"B-ORG\", \"I-ORG\", \"B-LOC\", \"I-LOC\", ] ) ), \"langs\": datasets.Sequence(datasets.Value(\"string\")), \"spans\": datasets.Sequence(datasets.Value(\"string\")), } ) return datasets.DatasetInfo(features=features) def _split_generators(self, dl_manager): dl_path = dl_manager.download(URLS) return [ datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}), datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}), ] def _generate_examples(self, filepath): with open(filepath, \"r\", encoding=\"utf-8\") as f: for i, line in enumerate(f): yield i, json.loads(line) """ @pytest.fixture def _a ( ): return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def _a ( ): return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ): lowerCamelCase : Union[str, Any] = dataset_loading_script_name lowerCamelCase : Dict = tmp_path / """datasets""" / script_name script_dir.mkdir(parents=lowerCamelCase ) lowerCamelCase : str = script_dir / F'''{script_name}.py''' with open(lowerCamelCase, """w""" ) as f: f.write(lowerCamelCase ) return str(lowerCamelCase )
681
0
'''simple docstring''' import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class UpperCAmelCase_ ( __A ): """simple docstring""" def __init__( self : Any , UpperCAmelCase : List[str] , UpperCAmelCase : str , UpperCAmelCase : str ) -> Dict: '''simple docstring''' lowercase : Any =dataset lowercase : Union[str, Any] =process lowercase : str =params def __len__( self : Any ) -> Dict: '''simple docstring''' return len(self.dataset ) def __getitem__( self : Optional[int] , UpperCAmelCase : Optional[int] ) -> List[str]: '''simple docstring''' lowercase : Dict =self.dataset[i] lowercase : Optional[Any] =self.process(UpperCAmelCase , **self.params ) return processed class UpperCAmelCase_ ( __A ): """simple docstring""" def __init__( self : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int]=None ) -> List[Any]: '''simple docstring''' lowercase : Dict =loader lowercase : List[Any] =infer lowercase : Tuple =params if loader_batch_size == 1: # Let's spare some time by deactivating altogether lowercase : Any =None lowercase : int =loader_batch_size # Internal bookkeeping lowercase : Tuple =None lowercase : Tuple =None def __len__( self : List[Any] ) -> Dict: '''simple docstring''' return len(self.loader ) def __iter__( self : str ) -> List[str]: '''simple docstring''' lowercase : str =iter(self.loader ) return self def A__ ( self : str ) -> Union[str, Any]: '''simple docstring''' if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice lowercase : int =self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) lowercase : str ={} for k, element in self._loader_batch_data.items(): if isinstance(UpperCAmelCase , UpperCAmelCase ): # Convert ModelOutput to tuple first lowercase : str =element.to_tuple() if isinstance(element[0] , torch.Tensor ): lowercase : Any =tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): lowercase : Optional[int] =tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCAmelCase , UpperCAmelCase ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): lowercase : Optional[int] =tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): lowercase : Optional[Any] =tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around lowercase : Any =None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers lowercase : Optional[Any] =element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers lowercase : Any =np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. lowercase : List[Any] =element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 lowercase : Dict =self._loader_batch_data.__class__(UpperCAmelCase ) self._loader_batch_index += 1 return result def A__ ( self : Tuple ) -> Dict: '''simple docstring''' if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch lowercase : Any =next(self.iterator ) lowercase : Optional[int] =self.infer(UpperCAmelCase , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(UpperCAmelCase , torch.Tensor ): lowercase : Optional[Any] =processed else: lowercase : Optional[int] =list(processed.keys() )[0] lowercase : Any =processed[key] if isinstance(UpperCAmelCase , UpperCAmelCase ): lowercase : Optional[int] =len(UpperCAmelCase ) else: lowercase : Dict =first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. lowercase : List[str] =observed_batch_size # Setting internal index to unwrap the batch lowercase : Dict =processed lowercase : Any =0 return self.loader_batch_item() else: # We're not unrolling batches return processed class UpperCAmelCase_ ( __A ): """simple docstring""" def __init__( self : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : str=None ) -> Optional[Any]: '''simple docstring''' super().__init__(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def __iter__( self : Optional[int] ) -> str: '''simple docstring''' lowercase : int =iter(self.loader ) lowercase : str =None return self def A__ ( self : Dict ) -> int: '''simple docstring''' if self.subiterator is None: lowercase : Optional[Any] =self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item lowercase : int =next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators lowercase : Optional[int] =self.infer(next(self.iterator ) , **self.params ) lowercase : List[str] =next(self.subiterator ) return processed class UpperCAmelCase_ ( __A ): """simple docstring""" def __iter__( self : Optional[int] ) -> List[str]: '''simple docstring''' lowercase : str =iter(self.loader ) return self def A__ ( self : int ) -> Tuple: '''simple docstring''' lowercase : Union[str, Any] =False lowercase : List[str] =[] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: lowercase : Dict =self.loader_batch_item() lowercase : int =item.pop('''is_last''' ) accumulator.append(UpperCAmelCase ) if is_last: return accumulator while not is_last: lowercase : List[Any] =self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(UpperCAmelCase , torch.Tensor ): lowercase : Optional[int] =processed else: lowercase : int =list(processed.keys() )[0] lowercase : int =processed[key] if isinstance(UpperCAmelCase , UpperCAmelCase ): lowercase : Union[str, Any] =len(UpperCAmelCase ) else: lowercase : List[Any] =first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. lowercase : List[Any] =observed_batch_size lowercase : Any =processed lowercase : List[Any] =0 while self._loader_batch_index < self.loader_batch_size: lowercase : Optional[int] =self.loader_batch_item() lowercase : str =item.pop('''is_last''' ) accumulator.append(UpperCAmelCase ) if is_last: return accumulator else: lowercase : int =processed lowercase : int =item.pop('''is_last''' ) accumulator.append(UpperCAmelCase ) return accumulator class UpperCAmelCase_ ( __A ): """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase : Dataset , UpperCAmelCase : str ) -> Dict: '''simple docstring''' lowercase : Union[str, Any] =dataset lowercase : Dict =key def __len__( self : Dict ) -> Optional[Any]: '''simple docstring''' return len(self.dataset ) def __getitem__( self : List[str] , UpperCAmelCase : Optional[int] ) -> Optional[int]: '''simple docstring''' return self.dataset[i][self.key] class UpperCAmelCase_ ( __A ): """simple docstring""" def __init__( self : Union[str, Any] , UpperCAmelCase : Dataset , UpperCAmelCase : str , UpperCAmelCase : str ) -> int: '''simple docstring''' lowercase : int =dataset lowercase : Union[str, Any] =keya lowercase : Any =keya def __len__( self : Union[str, Any] ) -> Any: '''simple docstring''' return len(self.dataset ) def __getitem__( self : Dict , UpperCAmelCase : Tuple ) -> int: '''simple docstring''' return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
94
import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""): _lowerCamelCase ={ """linear""": PIL.Image.Resampling.BILINEAR, """bilinear""": PIL.Image.Resampling.BILINEAR, """bicubic""": PIL.Image.Resampling.BICUBIC, """lanczos""": PIL.Image.Resampling.LANCZOS, """nearest""": PIL.Image.Resampling.NEAREST, } else: _lowerCamelCase ={ """linear""": PIL.Image.LINEAR, """bilinear""": PIL.Image.BILINEAR, """bicubic""": PIL.Image.BICUBIC, """lanczos""": PIL.Image.LANCZOS, """nearest""": PIL.Image.NEAREST, } def _a ( lowerCamelCase ): lowerCamelCase : Optional[Any] = (images / 2 + 0.5).clamp(0, 1 ) lowerCamelCase : Optional[Any] = images.cpu().permute(0, 2, 3, 1 ).float().numpy() lowerCamelCase : Any = numpy_to_pil(lowerCamelCase ) return images def _a ( lowerCamelCase ): if images.ndim == 3: lowerCamelCase : Optional[Any] = images[None, ...] lowerCamelCase : List[Any] = (images * 255).round().astype("""uint8""" ) if images.shape[-1] == 1: # special case for grayscale (single channel) images lowerCamelCase : Optional[int] = [Image.fromarray(image.squeeze(), mode="""L""" ) for image in images] else: lowerCamelCase : int = [Image.fromarray(lowerCamelCase ) for image in images] return pil_images
681
0
"""simple docstring""" import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml lowerCamelCase_ = NewType('''DataClass''', Any) lowerCamelCase_ = NewType('''DataClassType''', Any) def snake_case ( A__ ): if isinstance(A__ ,A__ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( F"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" ) def snake_case ( A__ ): UpperCAmelCase_ : Optional[Any] = {str(A__ ): choice for choice in choices} return lambda A__ : str_to_choice.get(A__ ,A__ ) def snake_case ( *, A__ = None ,A__ = None ,A__ = dataclasses.MISSING ,A__ = dataclasses.MISSING ,A__ = None ,**A__ ,): if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls UpperCAmelCase_ : int = {} if aliases is not None: UpperCAmelCase_ : List[str] = aliases if help is not None: UpperCAmelCase_ : Optional[int] = help return dataclasses.field(metadata=A__ ,default=A__ ,default_factory=A__ ,**A__ ) class UpperCamelCase_ (__A ): __magic_name__ = 42 def __init__( self : Optional[Any] , lowerCAmelCase_ : Union[DataClassType, Iterable[DataClassType]] , **lowerCAmelCase_ : str ) -> Optional[Any]: # To make the default appear when using --help if "formatter_class" not in kwargs: UpperCAmelCase_ : Tuple = ArgumentDefaultsHelpFormatter super().__init__(**lowerCAmelCase_ ) if dataclasses.is_dataclass(lowerCAmelCase_ ): UpperCAmelCase_ : Optional[Any] = [dataclass_types] UpperCAmelCase_ : int = list(lowerCAmelCase_ ) for dtype in self.dataclass_types: self._add_dataclass_arguments(lowerCAmelCase_ ) @staticmethod def _SCREAMING_SNAKE_CASE ( lowerCAmelCase_ : ArgumentParser , lowerCAmelCase_ : dataclasses.Field ) -> Tuple: UpperCAmelCase_ : Any = f"""--{field.name}""" UpperCAmelCase_ : Any = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type , lowerCAmelCase_ ): raise RuntimeError( "Unresolved type detected, which should have been done with the help of " "`typing.get_type_hints` method by default" ) UpperCAmelCase_ : Optional[Any] = kwargs.pop("aliases" , [] ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): UpperCAmelCase_ : Any = [aliases] UpperCAmelCase_ : Tuple = getattr(field.type , "__origin__" , field.type ) if origin_type is Union or (hasattr(lowerCAmelCase_ , "UnionType" ) and isinstance(lowerCAmelCase_ , types.UnionType )): if str not in field.type.__args__ and ( len(field.type.__args__ ) != 2 or type(lowerCAmelCase_ ) not in field.type.__args__ ): raise ValueError( "Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because" " the argument parser only supports one type per argument." f""" Problem encountered in field '{field.name}'.""" ) if type(lowerCAmelCase_ ) not in field.type.__args__: # filter `str` in Union UpperCAmelCase_ : Optional[Any] = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] UpperCAmelCase_ : int = getattr(field.type , "__origin__" , field.type ) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) UpperCAmelCase_ : Any = ( field.type.__args__[0] if isinstance(lowerCAmelCase_ , field.type.__args__[1] ) else field.type.__args__[1] ) UpperCAmelCase_ : Optional[Any] = getattr(field.type , "__origin__" , field.type ) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) UpperCAmelCase_ : List[Any] = {} if origin_type is Literal or (isinstance(field.type , lowerCAmelCase_ ) and issubclass(field.type , lowerCAmelCase_ )): if origin_type is Literal: UpperCAmelCase_ : str = field.type.__args__ else: UpperCAmelCase_ : List[Any] = [x.value for x in field.type] UpperCAmelCase_ : Optional[int] = make_choice_type_function(kwargs["choices"] ) if field.default is not dataclasses.MISSING: UpperCAmelCase_ : Dict = field.default else: UpperCAmelCase_ : str = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument UpperCAmelCase_ : Any = copy(lowerCAmelCase_ ) # Hack because type=bool in argparse does not behave as we want. UpperCAmelCase_ : Any = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. UpperCAmelCase_ : Optional[int] = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way UpperCAmelCase_ : int = default # This tells argparse we accept 0 or 1 value after --field_name UpperCAmelCase_ : Union[str, Any] = "?" # This is the value that will get picked if we do --field_name (without value) UpperCAmelCase_ : List[Any] = True elif isclass(lowerCAmelCase_ ) and issubclass(lowerCAmelCase_ , lowerCAmelCase_ ): UpperCAmelCase_ : List[str] = field.type.__args__[0] UpperCAmelCase_ : int = "+" if field.default_factory is not dataclasses.MISSING: UpperCAmelCase_ : Union[str, Any] = field.default_factory() elif field.default is dataclasses.MISSING: UpperCAmelCase_ : List[Any] = True else: UpperCAmelCase_ : Tuple = field.type if field.default is not dataclasses.MISSING: UpperCAmelCase_ : Any = field.default elif field.default_factory is not dataclasses.MISSING: UpperCAmelCase_ : str = field.default_factory() else: UpperCAmelCase_ : Union[str, Any] = True parser.add_argument(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): UpperCAmelCase_ : Union[str, Any] = False parser.add_argument(f"""--no_{field.name}""" , action="store_false" , dest=field.name , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : DataClassType ) -> int: if hasattr(lowerCAmelCase_ , "_argument_group_name" ): UpperCAmelCase_ : Union[str, Any] = self.add_argument_group(dtype._argument_group_name ) else: UpperCAmelCase_ : Tuple = self try: UpperCAmelCase_ : Dict[str, type] = get_type_hints(lowerCAmelCase_ ) except NameError: raise RuntimeError( f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """ "removing line of `from __future__ import annotations` which opts in Postponed " "Evaluation of Annotations (PEP 563)" ) except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(lowerCAmelCase_ ): UpperCAmelCase_ : Union[str, Any] = ".".join(map(lowerCAmelCase_ , sys.version_info[:3] ) ) raise RuntimeError( f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """ "line of `from __future__ import annotations` which opts in union types as " "`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To " "support Python versions that lower than 3.10, you need to use " "`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of " "`X | None`." ) from ex raise for field in dataclasses.fields(lowerCAmelCase_ ): if not field.init: continue UpperCAmelCase_ : Optional[Any] = type_hints[field.name] self._parse_dataclass_field(lowerCAmelCase_ , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : int=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=None , ) -> Tuple[DataClass, ...]: if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )): UpperCAmelCase_ : Dict = [] if args_filename: args_files.append(Path(lowerCAmelCase_ ) ) elif look_for_args_file and len(sys.argv ): args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) ) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values UpperCAmelCase_ : int = ArgumentParser() args_file_parser.add_argument(lowerCAmelCase_ , type=lowerCAmelCase_ , action="append" ) # Use only remaining args for further parsing (remove the args_file_flag) UpperCAmelCase_ , UpperCAmelCase_ : Tuple = args_file_parser.parse_known_args(args=lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = vars(lowerCAmelCase_ ).get(args_file_flag.lstrip("-" ) , lowerCAmelCase_ ) if cmd_args_file_paths: args_files.extend([Path(lowerCAmelCase_ ) for p in cmd_args_file_paths] ) UpperCAmelCase_ : Dict = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last UpperCAmelCase_ : Union[str, Any] = file_args + args if args is not None else file_args + sys.argv[1:] UpperCAmelCase_ , UpperCAmelCase_ : Any = self.parse_known_args(args=lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = [] for dtype in self.dataclass_types: UpperCAmelCase_ : Union[str, Any] = {f.name for f in dataclasses.fields(lowerCAmelCase_ ) if f.init} UpperCAmelCase_ : Optional[int] = {k: v for k, v in vars(lowerCAmelCase_ ).items() if k in keys} for k in keys: delattr(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = dtype(**lowerCAmelCase_ ) outputs.append(lowerCAmelCase_ ) if len(namespace.__dict__ ) > 0: # additional namespace. outputs.append(lowerCAmelCase_ ) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" ) return (*outputs,) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : Dict[str, Any] , lowerCAmelCase_ : bool = False ) -> Tuple[DataClass, ...]: UpperCAmelCase_ : Dict = set(args.keys() ) UpperCAmelCase_ : Dict = [] for dtype in self.dataclass_types: UpperCAmelCase_ : Dict = {f.name for f in dataclasses.fields(lowerCAmelCase_ ) if f.init} UpperCAmelCase_ : Dict = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys() ) UpperCAmelCase_ : List[str] = dtype(**lowerCAmelCase_ ) outputs.append(lowerCAmelCase_ ) if not allow_extra_keys and unused_keys: raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(lowerCAmelCase_ )}""" ) return tuple(lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : bool = False ) -> Tuple[DataClass, ...]: with open(Path(lowerCAmelCase_ ) , encoding="utf-8" ) as open_json_file: UpperCAmelCase_ : List[Any] = json.loads(open_json_file.read() ) UpperCAmelCase_ : Any = self.parse_dict(lowerCAmelCase_ , allow_extra_keys=lowerCAmelCase_ ) return tuple(lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : bool = False ) -> Tuple[DataClass, ...]: UpperCAmelCase_ : Optional[int] = self.parse_dict(yaml.safe_load(Path(lowerCAmelCase_ ).read_text() ) , allow_extra_keys=lowerCAmelCase_ ) return tuple(lowerCAmelCase_ )
95
from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class A__ ( nn.Module): def __init__( self , __magic_name__ = 1_6 , __magic_name__ = 8_8 , __magic_name__ = None , __magic_name__ = 1 , __magic_name__ = 0.0 , __magic_name__ = 3_2 , __magic_name__ = None , __magic_name__ = False , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "geglu" , __magic_name__ = None , ): super().__init__() lowerCamelCase : Any = nn.ModuleList( [ TransformeraDModel( num_attention_heads=__magic_name__ , attention_head_dim=__magic_name__ , in_channels=__magic_name__ , num_layers=__magic_name__ , dropout=__magic_name__ , norm_num_groups=__magic_name__ , cross_attention_dim=__magic_name__ , attention_bias=__magic_name__ , sample_size=__magic_name__ , num_vector_embeds=__magic_name__ , activation_fn=__magic_name__ , num_embeds_ada_norm=__magic_name__ , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference lowerCamelCase : Any = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` lowerCamelCase : List[Any] = [7_7, 2_5_7] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` lowerCamelCase : Optional[int] = [1, 0] def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__ = True , ): lowerCamelCase : List[Any] = hidden_states lowerCamelCase : Dict = [] lowerCamelCase : List[Any] = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens lowerCamelCase : Dict = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] lowerCamelCase : Optional[int] = self.transformer_index_for_condition[i] lowerCamelCase : List[Any] = self.transformers[transformer_index]( __magic_name__ , encoder_hidden_states=__magic_name__ , timestep=__magic_name__ , cross_attention_kwargs=__magic_name__ , return_dict=__magic_name__ , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] lowerCamelCase : Any = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) lowerCamelCase : Dict = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=__magic_name__ )
681
0
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __lowerCamelCase = 16 __lowerCamelCase = 32 def a ( __UpperCAmelCase : Accelerator , __UpperCAmelCase : int = 1_6 ) -> int: __magic_name__: Optional[int] = AutoTokenizer.from_pretrained("""bert-base-cased""" ) __magic_name__: Any = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(__UpperCAmelCase : Optional[Any] ): # max_length=None => use the model max length (it's actually the default) __magic_name__: Dict = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __magic_name__: int = datasets.map( __UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __magic_name__: Tuple = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(__UpperCAmelCase : Union[str, Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. __magic_name__: str = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __magic_name__: Union[str, Any] = 1_6 elif accelerator.mixed_precision != "no": __magic_name__: List[str] = 8 else: __magic_name__: int = None return tokenizer.pad( __UpperCAmelCase , padding="""longest""" , max_length=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_tensors="""pt""" , ) # Instantiate dataloaders. __magic_name__: List[Any] = DataLoader( tokenized_datasets["""train"""] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase ) __magic_name__: str = DataLoader( tokenized_datasets["""validation"""] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders __lowerCamelCase = mocked_dataloaders # noqa: F811 def a ( __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any] ) -> Optional[int]: # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __UpperCAmelCase ) == "1": __magic_name__: int = 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: __magic_name__: Union[str, Any] = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir ) else: __magic_name__: Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __magic_name__: str = config["""lr"""] __magic_name__: Union[str, Any] = int(config["""num_epochs"""] ) __magic_name__: List[str] = int(config["""seed"""] ) __magic_name__: Union[str, Any] = int(config["""batch_size"""] ) set_seed(__UpperCAmelCase ) __magic_name__, __magic_name__: Tuple = get_dataloaders(__UpperCAmelCase , __UpperCAmelCase ) __magic_name__: str = evaluate.load("""glue""" , """mrpc""" ) # If the batch size is too big we use gradient accumulation __magic_name__: Optional[int] = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: __magic_name__: Optional[Any] = batch_size // MAX_GPU_BATCH_SIZE __magic_name__: Dict = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) __magic_name__: Any = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__UpperCAmelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __magic_name__: Dict = model.to(accelerator.device ) # Instantiate optimizer __magic_name__: List[str] = AdamW(params=model.parameters() , lr=__UpperCAmelCase ) # Instantiate scheduler __magic_name__: Optional[Any] = get_linear_schedule_with_warmup( optimizer=__UpperCAmelCase , num_warmup_steps=1_0_0 , num_training_steps=(len(__UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__: List[str] = accelerator.prepare( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: __magic_name__: List[str] = os.path.split(__UpperCAmelCase )[-1].split(""".""" )[0] accelerator.init_trackers(__UpperCAmelCase , __UpperCAmelCase ) # Now we train the model for epoch in range(__UpperCAmelCase ): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: __magic_name__: int = 0 for step, batch in enumerate(__UpperCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __magic_name__: Any = model(**__UpperCAmelCase ) __magic_name__: int = outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() __magic_name__: Tuple = loss / gradient_accumulation_steps accelerator.backward(__UpperCAmelCase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__UpperCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device ) with torch.no_grad(): __magic_name__: int = model(**__UpperCAmelCase ) __magic_name__: int = outputs.logits.argmax(dim=-1 ) __magic_name__, __magic_name__: List[str] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=__UpperCAmelCase , references=__UpperCAmelCase , ) __magic_name__: Union[str, Any] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'epoch {epoch}:' , __UpperCAmelCase ) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { """accuracy""": eval_metric["""accuracy"""], """f1""": eval_metric["""f1"""], """train_loss""": total_loss.item() / len(__UpperCAmelCase ), """epoch""": epoch, } , step=__UpperCAmelCase , ) # New Code # # When a run is finished, you should call `accelerator.end_training()` # to close all of the open trackers if args.with_tracking: accelerator.end_training() def a ( ) -> str: __magic_name__: Optional[int] = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=__UpperCAmelCase , default=__UpperCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) parser.add_argument( """--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , ) parser.add_argument( """--project_dir""" , type=__UpperCAmelCase , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , ) __magic_name__: Union[str, Any] = parser.parse_args() __magic_name__: int = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6} training_function(__UpperCAmelCase , __UpperCAmelCase ) if __name__ == "__main__": main()
96
import unittest from transformers import BertGenerationTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _lowerCamelCase ="""▁""" _lowerCamelCase =get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase): _UpperCAmelCase : str = BertGenerationTokenizer _UpperCAmelCase : Tuple = False _UpperCAmelCase : List[Any] = True def UpperCamelCase__ ( self ): super().setUp() lowerCamelCase : int = BertGenerationTokenizer(__magic_name__ , keep_accents=__magic_name__ ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = """<s>""" lowerCamelCase : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<unk>""" ) self.assertEqual(vocab_keys[1] , """<s>""" ) self.assertEqual(vocab_keys[-1] , """<pad>""" ) self.assertEqual(len(__magic_name__ ) , 1_0_0_2 ) def UpperCamelCase__ ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = BertGenerationTokenizer(__magic_name__ , keep_accents=__magic_name__ ) lowerCamelCase : Optional[Any] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(__magic_name__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__magic_name__ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , ) lowerCamelCase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __magic_name__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) lowerCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__magic_name__ ) self.assertListEqual( __magic_name__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , ) lowerCamelCase : int = tokenizer.convert_ids_to_tokens(__magic_name__ ) self.assertListEqual( __magic_name__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) @cached_property def UpperCamelCase__ ( self ): return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" ) @slow def UpperCamelCase__ ( self ): lowerCamelCase : List[Any] = """Hello World!""" lowerCamelCase : Any = [1_8_5_3_6, 2_2_6_0, 1_0_1] self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) ) @slow def UpperCamelCase__ ( self ): lowerCamelCase : str = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth""" ) lowerCamelCase : str = [ 8_7_1, 4_1_9, 3_5_8, 9_4_6, 9_9_1, 2_5_2_1, 4_5_2, 3_5_8, 1_3_5_7, 3_8_7, 7_7_5_1, 3_5_3_6, 1_1_2, 9_8_5, 4_5_6, 1_2_6, 8_6_5, 9_3_8, 5_4_0_0, 5_7_3_4, 4_5_8, 1_3_6_8, 4_6_7, 7_8_6, 2_4_6_2, 5_2_4_6, 1_1_5_9, 6_3_3, 8_6_5, 4_5_1_9, 4_5_7, 5_8_2, 8_5_2, 2_5_5_7, 4_2_7, 9_1_6, 5_0_8, 4_0_5, 3_4_3_2_4, 4_9_7, 3_9_1, 4_0_8, 1_1_3_4_2, 1_2_4_4, 3_8_5, 1_0_0, 9_3_8, 9_8_5, 4_5_6, 5_7_4, 3_6_2, 1_2_5_9_7, 3_2_0_0, 3_1_2_9, 1_1_7_2, ] self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) ) @require_torch @slow def UpperCamelCase__ ( self ): import torch from transformers import BertGenerationConfig, BertGenerationEncoder # Build sequence lowerCamelCase : Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:1_0] lowerCamelCase : Dict = """ """.join(__magic_name__ ) lowerCamelCase : Any = self.big_tokenizer.encode_plus(__magic_name__ , return_tensors="""pt""" , return_token_type_ids=__magic_name__ ) lowerCamelCase : List[str] = self.big_tokenizer.batch_encode_plus( [sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=__magic_name__ ) lowerCamelCase : Tuple = BertGenerationConfig() lowerCamelCase : Optional[int] = BertGenerationEncoder(__magic_name__ ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**__magic_name__ ) model(**__magic_name__ ) @slow def UpperCamelCase__ ( self ): # fmt: off lowerCamelCase : Any = {"""input_ids""": [[3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4], [4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__magic_name__ , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
681
0
import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser __a = logging.getLogger(__name__) torch.set_grad_enabled(False) __a = 'cuda' if torch.cuda.is_available() else 'cpu' def a ( snake_case__: str , snake_case__: Optional[int]=100 , snake_case__: Any=" " ): '''simple docstring''' lowercase_ = text.split(snake_case__ ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(snake_case__ ) , snake_case__ )] def a ( snake_case__: dict ): '''simple docstring''' lowercase_ , lowercase_ = [], [] for title, text in zip(documents['''title'''] , documents['''text'''] ): if text is not None: for passage in split_text(snake_case__ ): titles.append(title if title is not None else '''''' ) texts.append(snake_case__ ) return {"title": titles, "text": texts} def a ( snake_case__: dict , snake_case__: DPRContextEncoder , snake_case__: DPRContextEncoderTokenizerFast ): '''simple docstring''' lowercase_ = ctx_tokenizer( documents['''title'''] , documents['''text'''] , truncation=snake_case__ , padding='''longest''' , return_tensors='''pt''' )['''input_ids'''] lowercase_ = ctx_encoder(input_ids.to(device=snake_case__ ) , return_dict=snake_case__ ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def a ( snake_case__: "RagExampleArguments" , snake_case__: "ProcessingArguments" , snake_case__: "IndexHnswArguments" , ): '''simple docstring''' ###################################### logger.info('''Step 1 - Create the dataset''' ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way lowercase_ = load_dataset( '''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words lowercase_ = dataset.map(snake_case__ , batched=snake_case__ , num_proc=processing_args.num_proc ) # And compute the embeddings lowercase_ = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=snake_case__ ) lowercase_ = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) lowercase_ = Features( {'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space lowercase_ = dataset.map( partial(snake_case__ , ctx_encoder=snake_case__ , ctx_tokenizer=snake_case__ ) , batched=snake_case__ , batch_size=processing_args.batch_size , features=snake_case__ , ) # And finally save your dataset lowercase_ = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' ) dataset.save_to_disk(snake_case__ ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info('''Step 2 - Index the dataset''' ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search lowercase_ = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index('''embeddings''' , custom_index=snake_case__ ) # And save the index lowercase_ = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' ) dataset.get_index('''embeddings''' ).save(snake_case__ ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class lowercase__: """simple docstring""" a :str = field( default=str(Path(UpperCAmelCase ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , ) a :Optional[str] = field( default=UpperCAmelCase , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , ) a :str = field( default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , ) a :str = field( default='facebook/dpr-ctx_encoder-multiset-base' , metadata={ 'help': ( 'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or' ' \'facebook/dpr-ctx_encoder-multiset-base\'' ) } , ) a :Optional[str] = field( default=str(Path(UpperCAmelCase ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , ) @dataclass class lowercase__: """simple docstring""" a :Optional[int] = field( default=UpperCAmelCase , metadata={ 'help': 'The number of processes to use to split the documents into passages. Default is single process.' } , ) a :int = field( default=16 , metadata={ 'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.' } , ) @dataclass class lowercase__: """simple docstring""" a :int = field( default=768 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , ) a :int = field( default=128 , metadata={ 'help': ( 'The number of bi-directional links created for every new element during the HNSW index construction.' ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) __a = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) __a , __a , __a = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: __a = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
97
from arguments import InitializationArguments from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser # Configuration _lowerCamelCase =HfArgumentParser(InitializationArguments) _lowerCamelCase =parser.parse_args() # Load codeparrot tokenizer trained for Python code tokenization _lowerCamelCase =AutoTokenizer.from_pretrained(args.tokenizer_name) # Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks _lowerCamelCase ={ """vocab_size""": len(tokenizer), """scale_attn_by_inverse_layer_idx""": True, """reorder_and_upcast_attn""": True, } # Load model config (GPT-2 large in this case) _lowerCamelCase =AutoConfig.from_pretrained(args.config_name, **config_kwargs) # Initialize new model with config _lowerCamelCase =AutoModelForCausalLM.from_config(config) # Save model to the hub model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
681
0
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase__ : Optional[int] = { 'configuration_autoformer': [ 'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AutoformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Optional[int] = [ 'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'AutoformerForPrediction', 'AutoformerModel', 'AutoformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys lowercase__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
98
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class A__ ( unittest.TestCase): def UpperCamelCase__ ( self , __magic_name__ ): for model_result in results.values(): for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ): lowerCamelCase : List[str] = model_result["""result"""][batch_size][sequence_length] self.assertIsNotNone(__magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = """sshleifer/tiny-gpt2""" lowerCamelCase : str = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__magic_name__ , multi_process=__magic_name__ , ) lowerCamelCase : Dict = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : Any = """sgugger/tiny-distilbert-classification""" lowerCamelCase : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , only_pretrain_model=__magic_name__ , ) lowerCamelCase : List[Any] = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2""" lowerCamelCase : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) lowerCamelCase : Any = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : List[Any] = """sshleifer/tiny-gpt2""" lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ ) lowerCamelCase : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__magic_name__ , multi_process=__magic_name__ , ) lowerCamelCase : Optional[Any] = TensorFlowBenchmark(__magic_name__ , [config] ) lowerCamelCase : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = """sshleifer/tiny-gpt2""" lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(__magic_name__ ) lowerCamelCase : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) lowerCamelCase : Union[str, Any] = TensorFlowBenchmark(__magic_name__ , [config] ) lowerCamelCase : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2""" lowerCamelCase : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) lowerCamelCase : int = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase__ ( self ): lowerCamelCase : int = """sshleifer/tiny-gpt2""" lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ ) lowerCamelCase : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) lowerCamelCase : Any = TensorFlowBenchmark(__magic_name__ , [config] ) lowerCamelCase : str = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase__ ( self ): lowerCamelCase : str = """patrickvonplaten/t5-tiny-random""" lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ ) lowerCamelCase : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) lowerCamelCase : List[Any] = TensorFlowBenchmark(__magic_name__ , configs=[config] ) lowerCamelCase : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[Any] = """sshleifer/tiny-gpt2""" lowerCamelCase : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__magic_name__ , multi_process=__magic_name__ , ) lowerCamelCase : int = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : str = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2""" with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase : List[str] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=__magic_name__ , save_to_csv=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__magic_name__ , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(__magic_name__ , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(__magic_name__ , """env.csv""" ) , multi_process=__magic_name__ , ) lowerCamelCase : List[str] = TensorFlowBenchmark(__magic_name__ ) benchmark.run() self.assertTrue(Path(os.path.join(__magic_name__ , """inf_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(__magic_name__ , """inf_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(__magic_name__ , """env.csv""" ) ).exists() ) def UpperCamelCase__ ( self ): lowerCamelCase : str = """sshleifer/tiny-gpt2""" def _check_summary_is_not_empty(__magic_name__ ): self.assertTrue(hasattr(__magic_name__ , """sequential""" ) ) self.assertTrue(hasattr(__magic_name__ , """cumulative""" ) ) self.assertTrue(hasattr(__magic_name__ , """current""" ) ) self.assertTrue(hasattr(__magic_name__ , """total""" ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__magic_name__ , """log.txt""" ) , log_print=__magic_name__ , trace_memory_line_by_line=__magic_name__ , eager_mode=__magic_name__ , multi_process=__magic_name__ , ) lowerCamelCase : Tuple = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : Union[str, Any] = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(__magic_name__ , """log.txt""" ) ).exists() )
681
0
from math import factorial SCREAMING_SNAKE_CASE = {str(digit): factorial(digit) for digit in range(1_0)} def a (lowerCAmelCase__ ): if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise TypeError("""Parameter number must be int""" ) if number < 0: raise ValueError("""Parameter number must be greater than or equal to 0""" ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(lowerCAmelCase__ ) ) def a (lowerCAmelCase__ = 60 , lowerCAmelCase__ = 1_000_000 ): if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise TypeError("""Parameters chain_length and number_limit must be int""" ) if chain_length <= 0 or number_limit <= 0: raise ValueError( """Parameters chain_length and number_limit must be greater than 0""" ) # the counter for the chains with the exact desired length __a = 0 # the cached sizes of the previous chains __a = {} for start_chain_element in range(1 , lowerCAmelCase__ ): # The temporary set will contain the elements of the chain __a = set() __a = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. __a = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(lowerCAmelCase__ ) chain_set_length += 1 __a = digit_factorial_sum(lowerCAmelCase__ ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] __a = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(f'''{solution()}''')
99
import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def _a ( lowerCamelCase ): return x + 2 class A__ ( unittest.TestCase): def UpperCamelCase__ ( self ): lowerCamelCase : List[Any] = """x = 3""" lowerCamelCase : Tuple = {} lowerCamelCase : List[str] = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result == 3 self.assertDictEqual(__magic_name__ , {"""x""": 3} ) lowerCamelCase : Optional[int] = """x = y""" lowerCamelCase : Tuple = {"""y""": 5} lowerCamelCase : Tuple = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 5, """y""": 5} ) def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = """y = add_two(x)""" lowerCamelCase : List[Any] = {"""x""": 3} lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} ) # Won't work without the tool with CaptureStdout() as out: lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result is None assert "tried to execute add_two" in out.out def UpperCamelCase__ ( self ): lowerCamelCase : int = """x = 3""" lowerCamelCase : Dict = {} lowerCamelCase : Tuple = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result == 3 self.assertDictEqual(__magic_name__ , {"""x""": 3} ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[Any] = """test_dict = {'x': x, 'y': add_two(x)}""" lowerCamelCase : Optional[int] = {"""x""": 3} lowerCamelCase : Tuple = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} ) self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = """x = 3\ny = 5""" lowerCamelCase : Optional[int] = {} lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = """text = f'This is x: {x}.'""" lowerCamelCase : Optional[int] = {"""x""": 3} lowerCamelCase : Optional[int] = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(__magic_name__ , {"""x""": 3, """text""": """This is x: 3."""} ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = """if x <= 3:\n y = 2\nelse:\n y = 5""" lowerCamelCase : Tuple = {"""x""": 3} lowerCamelCase : int = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 2} ) lowerCamelCase : Tuple = {"""x""": 8} lowerCamelCase : Dict = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 8, """y""": 5} ) def UpperCamelCase__ ( self ): lowerCamelCase : Dict = """test_list = [x, add_two(x)]""" lowerCamelCase : List[Any] = {"""x""": 3} lowerCamelCase : List[str] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) self.assertListEqual(__magic_name__ , [3, 5] ) self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_list""": [3, 5]} ) def UpperCamelCase__ ( self ): lowerCamelCase : str = """y = x""" lowerCamelCase : List[Any] = {"""x""": 3} lowerCamelCase : Any = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result == 3 self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 3} ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = """test_list = [x, add_two(x)]\ntest_list[1]""" lowerCamelCase : Any = {"""x""": 3} lowerCamelCase : List[str] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_list""": [3, 5]} ) lowerCamelCase : Any = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']""" lowerCamelCase : Dict = {"""x""": 3} lowerCamelCase : Any = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def UpperCamelCase__ ( self ): lowerCamelCase : Union[str, Any] = """x = 0\nfor i in range(3):\n x = i""" lowerCamelCase : int = {} lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {"""range""": range} , state=__magic_name__ ) assert result == 2 self.assertDictEqual(__magic_name__ , {"""x""": 2, """i""": 2} )
681
0
import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin _A : str = get_tests_dir("""fixtures/spiece.model""") @require_sentencepiece @require_tokenizers class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ : List[str] = DebertaVaTokenizer lowerCamelCase__ : Union[str, Any] = DebertaVaTokenizerFast lowerCamelCase__ : List[Any] = True lowerCamelCase__ : List[Any] = True def lowercase_ ( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(A_ , unk_token='''<unk>''' ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase_ ( self , A_ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = '''this is a test''' SCREAMING_SNAKE_CASE__ = '''this is a test''' return input_text, output_text def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = '''<pad>''' SCREAMING_SNAKE_CASE__ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ ) def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''<unk>''' ) self.assertEqual(vocab_keys[-1] , '''[PAD]''' ) self.assertEqual(len(A_ ) , 3_00_01 ) def lowercase_ ( self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 ) def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = ''' \tHeLLo!how \n Are yoU? ''' SCREAMING_SNAKE_CASE__ = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?'''] # fmt: on SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(A_ , do_lower_case=A_ ) SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) ) self.assertListEqual(A_ , A_ ) SCREAMING_SNAKE_CASE__ = DebertaVaTokenizerFast(A_ , do_lower_case=A_ ) SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) ) self.assertListEqual(A_ , A_ ) @unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' ) def lowercase_ ( self ): '''simple docstring''' pass @unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' ) def lowercase_ ( self ): '''simple docstring''' pass def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = '''I was born in 92000, and this is falsé.''' SCREAMING_SNAKE_CASE__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(A_ , split_by_punct=A_ ) SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) ) self.assertListEqual(A_ , A_ ) SCREAMING_SNAKE_CASE__ = DebertaVaTokenizerFast(A_ , split_by_punct=A_ ) SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) ) self.assertListEqual(A_ , A_ ) def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = '''I was born in 92000, and this is falsé.''' SCREAMING_SNAKE_CASE__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(A_ , do_lower_case=A_ , split_by_punct=A_ ) SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) ) self.assertListEqual(A_ , A_ ) SCREAMING_SNAKE_CASE__ = DebertaVaTokenizerFast(A_ , do_lower_case=A_ , split_by_punct=A_ ) SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) ) self.assertListEqual(A_ , A_ ) def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = '''I was born in 92000, and this is falsé.''' SCREAMING_SNAKE_CASE__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ] # fmt: on SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(A_ , do_lower_case=A_ , split_by_punct=A_ ) SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) ) self.assertListEqual(A_ , A_ ) SCREAMING_SNAKE_CASE__ = DebertaVaTokenizerFast(A_ , do_lower_case=A_ , split_by_punct=A_ ) SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) ) self.assertListEqual(A_ , A_ ) def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = '''I was born in 92000, and this is falsé.''' SCREAMING_SNAKE_CASE__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(A_ , do_lower_case=A_ , split_by_punct=A_ ) SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) ) self.assertListEqual(A_ , A_ ) SCREAMING_SNAKE_CASE__ = DebertaVaTokenizerFast(A_ , do_lower_case=A_ , split_by_punct=A_ ) SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) ) self.assertListEqual(A_ , A_ ) def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = ''' \tHeLLo!how \n Are yoU? ''' SCREAMING_SNAKE_CASE__ = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?'''] # fmt: on SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(A_ , do_lower_case=A_ , split_by_punct=A_ ) SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) ) self.assertListEqual(A_ , A_ ) SCREAMING_SNAKE_CASE__ = DebertaVaTokenizerFast(A_ , do_lower_case=A_ , split_by_punct=A_ ) SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) ) self.assertListEqual(A_ , A_ ) def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = self.get_tokenizer() SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE__ = '''I was born in 92000, and this is falsé.''' SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) ) SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) ) self.assertListEqual(A_ , A_ ) SCREAMING_SNAKE_CASE__ = tokenizer.encode(A_ , add_special_tokens=A_ ) SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(A_ , add_special_tokens=A_ ) self.assertListEqual(A_ , A_ ) SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE__ = tokenizer.encode(A_ ) SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(A_ ) self.assertListEqual(A_ , A_ ) def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = '''This is a test''' SCREAMING_SNAKE_CASE__ = [13, 1, 43_98, 25, 21, 12_89] SCREAMING_SNAKE_CASE__ = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test'''] SCREAMING_SNAKE_CASE__ = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test'''] SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(A_ , keep_accents=A_ ) SCREAMING_SNAKE_CASE__ = DebertaVaTokenizerFast(A_ , keep_accents=A_ ) SCREAMING_SNAKE_CASE__ = tokenizer.encode(A_ , add_special_tokens=A_ ) self.assertListEqual(A_ , A_ ) SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(A_ ) self.assertListEqual(A_ , A_ ) SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(A_ ) self.assertListEqual(A_ , A_ ) SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(A_ , add_special_tokens=A_ ) self.assertListEqual(A_ , A_ ) SCREAMING_SNAKE_CASE__ = rust_tokenizer.tokenize(A_ ) self.assertListEqual(A_ , A_ ) SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(A_ ) self.assertListEqual(A_ , A_ ) # fmt: off SCREAMING_SNAKE_CASE__ = '''I was born in 92000, and this is falsé.''' SCREAMING_SNAKE_CASE__ = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] SCREAMING_SNAKE_CASE__ = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ] SCREAMING_SNAKE_CASE__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ] # fmt: on SCREAMING_SNAKE_CASE__ = tokenizer.encode(A_ , add_special_tokens=A_ ) self.assertListEqual(A_ , A_ ) SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(A_ ) self.assertListEqual(A_ , A_ ) SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(A_ ) self.assertListEqual(A_ , A_ ) SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(A_ , add_special_tokens=A_ ) self.assertListEqual(A_ , A_ ) SCREAMING_SNAKE_CASE__ = rust_tokenizer.tokenize(A_ ) self.assertListEqual(A_ , A_ ) SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(A_ ) self.assertListEqual(A_ , A_ ) def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(A_ ) SCREAMING_SNAKE_CASE__ = tokenizer.encode('''sequence builders''' ) SCREAMING_SNAKE_CASE__ = tokenizer.encode('''multi-sequence build''' ) SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(A_ ) SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(A_ , A_ ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , A_ ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , A_ , ) @slow def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = {'''input_ids''': [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A_ , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
100
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase =logging.get_logger(__name__) _lowerCamelCase ={ """edbeeching/decision-transformer-gym-hopper-medium""": ( """https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json""" ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class A__ ( __SCREAMING_SNAKE_CASE): _UpperCAmelCase : Optional[int] = """decision_transformer""" _UpperCAmelCase : str = ["""past_key_values"""] _UpperCAmelCase : Any = { """max_position_embeddings""": """n_positions""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , __magic_name__=1_7 , __magic_name__=4 , __magic_name__=1_2_8 , __magic_name__=4_0_9_6 , __magic_name__=True , __magic_name__=1 , __magic_name__=1_0_2_4 , __magic_name__=3 , __magic_name__=1 , __magic_name__=None , __magic_name__="relu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=True , __magic_name__=5_0_2_5_6 , __magic_name__=5_0_2_5_6 , __magic_name__=False , __magic_name__=False , **__magic_name__ , ): lowerCamelCase : Optional[int] = state_dim lowerCamelCase : int = act_dim lowerCamelCase : int = hidden_size lowerCamelCase : Union[str, Any] = max_ep_len lowerCamelCase : Optional[int] = action_tanh lowerCamelCase : Any = vocab_size lowerCamelCase : List[str] = n_positions lowerCamelCase : List[Any] = n_layer lowerCamelCase : Dict = n_head lowerCamelCase : Optional[Any] = n_inner lowerCamelCase : Tuple = activation_function lowerCamelCase : Tuple = resid_pdrop lowerCamelCase : str = embd_pdrop lowerCamelCase : Dict = attn_pdrop lowerCamelCase : Tuple = layer_norm_epsilon lowerCamelCase : Tuple = initializer_range lowerCamelCase : Tuple = scale_attn_weights lowerCamelCase : str = use_cache lowerCamelCase : List[Any] = scale_attn_by_inverse_layer_idx lowerCamelCase : List[str] = reorder_and_upcast_attn lowerCamelCase : Optional[Any] = bos_token_id lowerCamelCase : str = eos_token_id super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
681
0
from ..utils import is_flax_available, is_torch_available if is_torch_available(): from .autoencoder_kl import AutoencoderKL from .controlnet import ControlNetModel from .dual_transformer_ad import DualTransformeraDModel from .modeling_utils import ModelMixin from .prior_transformer import PriorTransformer from .ta_film_transformer import TaFilmDecoder from .transformer_ad import TransformeraDModel from .unet_ad import UNetaDModel from .unet_ad import UNetaDModel from .unet_ad_condition import UNetaDConditionModel from .unet_ad_condition import UNetaDConditionModel from .vq_model import VQModel if is_flax_available(): from .controlnet_flax import FlaxControlNetModel from .unet_ad_condition_flax import FlaxUNetaDConditionModel from .vae_flax import FlaxAutoencoderKL
101
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig _lowerCamelCase =logging.get_logger(__name__) class A__ : def __init__( self , __magic_name__ , __magic_name__ ): lowerCamelCase : Any = question_encoder lowerCamelCase : Dict = generator lowerCamelCase : Tuple = self.question_encoder def UpperCamelCase__ ( self , __magic_name__ ): if os.path.isfile(__magic_name__ ): raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) lowerCamelCase : Any = os.path.join(__magic_name__ , """question_encoder_tokenizer""" ) lowerCamelCase : str = os.path.join(__magic_name__ , """generator_tokenizer""" ) self.question_encoder.save_pretrained(__magic_name__ ) self.generator.save_pretrained(__magic_name__ ) @classmethod def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ): # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer lowerCamelCase : Any = kwargs.pop("""config""" , __magic_name__ ) if config is None: lowerCamelCase : Tuple = RagConfig.from_pretrained(__magic_name__ ) lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained( __magic_name__ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" ) lowerCamelCase : Any = AutoTokenizer.from_pretrained( __magic_name__ , config=config.generator , subfolder="""generator_tokenizer""" ) return cls(question_encoder=__magic_name__ , generator=__magic_name__ ) def __call__( self , *__magic_name__ , **__magic_name__ ): return self.current_tokenizer(*__magic_name__ , **__magic_name__ ) def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ): return self.generator.batch_decode(*__magic_name__ , **__magic_name__ ) def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ): return self.generator.decode(*__magic_name__ , **__magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : Union[str, Any] = self.question_encoder def UpperCamelCase__ ( self ): lowerCamelCase : str = self.generator def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "longest" , __magic_name__ = None , __magic_name__ = True , **__magic_name__ , ): warnings.warn( """`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """ """regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """ """context manager to prepare your targets. See the documentation of your specific tokenizer for more """ """details""" , __magic_name__ , ) if max_length is None: lowerCamelCase : int = self.current_tokenizer.model_max_length lowerCamelCase : int = self( __magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , max_length=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: lowerCamelCase : int = self.current_tokenizer.model_max_length lowerCamelCase : Dict = self( text_target=__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , padding=__magic_name__ , max_length=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , ) lowerCamelCase : List[Any] = labels["""input_ids"""] return model_inputs
681
0
"""simple docstring""" import os import sys import tempfile import torch from .state import AcceleratorState from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=() , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="no" , SCREAMING_SNAKE_CASE="29500" ): UpperCamelCase : Any = False UpperCamelCase : List[Any] = False if any(key.startswith("""KAGGLE""" ) for key in os.environ.keys() ): UpperCamelCase : List[str] = True elif "IPython" in sys.modules: UpperCamelCase : Any = """google.colab""" in str(sys.modules["""IPython"""].get_ipython() ) try: UpperCamelCase : Union[str, Any] = PrecisionType(mixed_precision.lower() ) except ValueError: raise ValueError( f"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" ) if (in_colab or in_kaggle) and (os.environ.get("""TPU_NAME""" , SCREAMING_SNAKE_CASE ) is not None): # TPU launch import torch_xla.distributed.xla_multiprocessing as xmp if len(AcceleratorState._shared_state ) > 0: raise ValueError( """To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside """ """your training function. Restart your notebook and make sure no cells initializes an """ """`Accelerator`.""" ) if num_processes is None: UpperCamelCase : List[str] = 8 UpperCamelCase : Any = PrepareForLaunch(SCREAMING_SNAKE_CASE , distributed_type="""TPU""" ) print(f"""Launching a training on {num_processes} TPU cores.""" ) xmp.spawn(SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , nprocs=SCREAMING_SNAKE_CASE , start_method="""fork""" ) elif in_colab: # No need for a distributed launch otherwise as it's either CPU or one GPU. if torch.cuda.is_available(): print("""Launching training on one GPU.""" ) else: print("""Launching training on one CPU.""" ) function(*SCREAMING_SNAKE_CASE ) else: if num_processes is None: raise ValueError( """You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.""" ) if num_processes > 1: # Multi-GPU launch from torch.multiprocessing import start_processes from torch.multiprocessing.spawn import ProcessRaisedException if len(AcceleratorState._shared_state ) > 0: raise ValueError( """To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized """ """inside your training function. Restart your notebook and make sure no cells initializes an """ """`Accelerator`.""" ) if torch.cuda.is_initialized(): raise ValueError( """To launch a multi-GPU training from your notebook, you need to avoid running any instruction """ """using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA """ """function.""" ) # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=SCREAMING_SNAKE_CASE , master_addr="""127.0.01""" , master_port=SCREAMING_SNAKE_CASE , mixed_precision=SCREAMING_SNAKE_CASE ): UpperCamelCase : Optional[int] = PrepareForLaunch(SCREAMING_SNAKE_CASE , distributed_type="""MULTI_GPU""" ) print(f"""Launching training on {num_processes} GPUs.""" ) try: start_processes(SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , nprocs=SCREAMING_SNAKE_CASE , start_method="""fork""" ) except ProcessRaisedException as e: if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]: raise RuntimeError( """CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. """ """This likely stems from an outside import causing issues once the `notebook_launcher()` is called. """ """Please review your imports and test them when running the `notebook_launcher()` to identify """ """which one is problematic.""" ) from e else: # No need for a distributed launch otherwise as it's either CPU, GPU or MPS. if is_mps_available(): UpperCamelCase : List[str] = """1""" print("""Launching training on MPS.""" ) elif torch.cuda.is_available(): print("""Launching training on one GPU.""" ) else: print("""Launching training on CPU.""" ) function(*SCREAMING_SNAKE_CASE ) def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=() , SCREAMING_SNAKE_CASE=2 ): from torch.multiprocessing import start_processes with tempfile.NamedTemporaryFile() as tmp_file: # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=SCREAMING_SNAKE_CASE , master_addr="""127.0.01""" , master_port="""29500""" , accelerate_mixed_precision="""no""" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="""yes""" , ): UpperCamelCase : Union[str, Any] = PrepareForLaunch(SCREAMING_SNAKE_CASE , debug=SCREAMING_SNAKE_CASE ) start_processes(SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , nprocs=SCREAMING_SNAKE_CASE , start_method="""fork""" )
102
import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def _a ( lowerCamelCase, lowerCamelCase ): lowerCamelCase : List[Any] = F'''{sampling_rate}''' lowerCamelCase : Optional[int] = """1""" lowerCamelCase : Any = """f32le""" lowerCamelCase : Any = [ """ffmpeg""", """-i""", """pipe:0""", """-ac""", ac, """-ar""", ar, """-f""", format_for_conversion, """-hide_banner""", """-loglevel""", """quiet""", """pipe:1""", ] try: with subprocess.Popen(lowerCamelCase, stdin=subprocess.PIPE, stdout=subprocess.PIPE ) as ffmpeg_process: lowerCamelCase : Optional[int] = ffmpeg_process.communicate(lowerCamelCase ) except FileNotFoundError as error: raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error lowerCamelCase : Union[str, Any] = output_stream[0] lowerCamelCase : Optional[Any] = np.frombuffer(lowerCamelCase, np.floataa ) if audio.shape[0] == 0: raise ValueError("""Malformed soundfile""" ) return audio def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase = "f32le", ): lowerCamelCase : Dict = F'''{sampling_rate}''' lowerCamelCase : List[Any] = """1""" if format_for_conversion == "s16le": lowerCamelCase : Any = 2 elif format_for_conversion == "f32le": lowerCamelCase : Dict = 4 else: raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) lowerCamelCase : Dict = platform.system() if system == "Linux": lowerCamelCase : Union[str, Any] = """alsa""" lowerCamelCase : List[Any] = """default""" elif system == "Darwin": lowerCamelCase : List[Any] = """avfoundation""" lowerCamelCase : List[Any] = """:0""" elif system == "Windows": lowerCamelCase : int = """dshow""" lowerCamelCase : Any = """default""" lowerCamelCase : Any = [ """ffmpeg""", """-f""", format_, """-i""", input_, """-ac""", ac, """-ar""", ar, """-f""", format_for_conversion, """-fflags""", """nobuffer""", """-hide_banner""", """-loglevel""", """quiet""", """pipe:1""", ] lowerCamelCase : List[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample lowerCamelCase : Any = _ffmpeg_stream(lowerCamelCase, lowerCamelCase ) for item in iterator: yield item def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = "f32le", ): if stream_chunk_s is not None: lowerCamelCase : int = stream_chunk_s else: lowerCamelCase : Dict = chunk_length_s lowerCamelCase : Optional[Any] = ffmpeg_microphone(lowerCamelCase, lowerCamelCase, format_for_conversion=lowerCamelCase ) if format_for_conversion == "s16le": lowerCamelCase : Optional[int] = np.intaa lowerCamelCase : Optional[Any] = 2 elif format_for_conversion == "f32le": lowerCamelCase : int = np.floataa lowerCamelCase : Any = 4 else: raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) if stride_length_s is None: lowerCamelCase : Any = chunk_length_s / 6 lowerCamelCase : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(lowerCamelCase, (int, float) ): lowerCamelCase : Optional[int] = [stride_length_s, stride_length_s] lowerCamelCase : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample lowerCamelCase : Optional[int] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample lowerCamelCase : List[Any] = datetime.datetime.now() lowerCamelCase : List[Any] = datetime.timedelta(seconds=lowerCamelCase ) for item in chunk_bytes_iter(lowerCamelCase, lowerCamelCase, stride=(stride_left, stride_right), stream=lowerCamelCase ): # Put everything back in numpy scale lowerCamelCase : Dict = np.frombuffer(item["""raw"""], dtype=lowerCamelCase ) lowerCamelCase : List[Any] = ( item["""stride"""][0] // size_of_sample, item["""stride"""][1] // size_of_sample, ) lowerCamelCase : Tuple = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = False ): lowerCamelCase : Optional[int] = B"""""" lowerCamelCase , lowerCamelCase : str = stride if stride_left + stride_right >= chunk_len: raise ValueError( F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' ) lowerCamelCase : str = 0 for raw in iterator: acc += raw if stream and len(lowerCamelCase ) < chunk_len: lowerCamelCase : Optional[int] = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(lowerCamelCase ) >= chunk_len: # We are flushing the accumulator lowerCamelCase : str = (_stride_left, stride_right) lowerCamelCase : Dict = {"""raw""": acc[:chunk_len], """stride""": stride} if stream: lowerCamelCase : Optional[int] = False yield item lowerCamelCase : str = stride_left lowerCamelCase : Tuple = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(lowerCamelCase ) > stride_left: lowerCamelCase : List[str] = {"""raw""": acc, """stride""": (_stride_left, 0)} if stream: lowerCamelCase : List[Any] = False yield item def _a ( lowerCamelCase, lowerCamelCase ): lowerCamelCase : Optional[int] = 2**24 # 16Mo try: with subprocess.Popen(lowerCamelCase, stdout=subprocess.PIPE, bufsize=lowerCamelCase ) as ffmpeg_process: while True: lowerCamelCase : Any = ffmpeg_process.stdout.read(lowerCamelCase ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
681
0
"""simple docstring""" from .data_collator import ( DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForSeqaSeq, DataCollatorForSOP, DataCollatorForTokenClassification, DataCollatorForWholeWordMask, DataCollatorWithPadding, DefaultDataCollator, default_data_collator, ) from .metrics import glue_compute_metrics, xnli_compute_metrics from .processors import ( DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor, SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels, squad_convert_examples_to_features, xnli_output_modes, xnli_processors, xnli_tasks_num_labels, )
103
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , ) @pytest.mark.usefixtures("""sm_env""") @parameterized_class( [ { """framework""": """pytorch""", """script""": """run_glue_model_parallelism.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, { """framework""": """pytorch""", """script""": """run_glue.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, ]) class A__ ( unittest.TestCase): def UpperCamelCase__ ( self ): if self.framework == "pytorch": subprocess.run( F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=__magic_name__ , ) assert hasattr(self , """env""" ) def UpperCamelCase__ ( self , __magic_name__ ): # configuration for running training on smdistributed Model Parallel lowerCamelCase : Any = { """enabled""": True, """processes_per_host""": 8, } lowerCamelCase : Any = { """enabled""": True, """parameters""": { """microbatches""": 4, """placement_strategy""": """spread""", """pipeline""": """interleaved""", """optimize""": """speed""", """partitions""": 4, """ddp""": True, }, } lowerCamelCase : Optional[Any] = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options} lowerCamelCase : Dict = """trainer""" if self.script == """run_glue.py""" else """smtrainer""" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=__magic_name__ , instance_type=self.instance_type , debugger_hook_config=__magic_name__ , hyperparameters={ **self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path, """max_steps""": 5_0_0, } , metric_definitions=self.env.metric_definitions , distribution=__magic_name__ , py_version="""py36""" , ) def UpperCamelCase__ ( self , __magic_name__ ): TrainingJobAnalytics(__magic_name__ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' ) @parameterized.expand([(1,)] ) def UpperCamelCase__ ( self , __magic_name__ ): # create estimator lowerCamelCase : int = self.create_estimator(__magic_name__ ) # run training estimator.fit() # result dataframe lowerCamelCase : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis lowerCamelCase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) lowerCamelCase : int = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping lowerCamelCase : int = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(F'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __magic_name__ )
681
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase = { """configuration_lilt""": ["""LILT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LiltConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ """LILT_PRETRAINED_MODEL_ARCHIVE_LIST""", """LiltForQuestionAnswering""", """LiltForSequenceClassification""", """LiltForTokenClassification""", """LiltModel""", """LiltPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lilt import ( LILT_PRETRAINED_MODEL_ARCHIVE_LIST, LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, LiltPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
104
from __future__ import annotations def _a ( lowerCamelCase ): lowerCamelCase : Union[str, Any] = str(lowerCamelCase ) return n == n[::-1] def _a ( lowerCamelCase = 100_0000 ): lowerCamelCase : Any = 0 for i in range(1, lowerCamelCase ): if is_palindrome(lowerCamelCase ) and is_palindrome(bin(lowerCamelCase ).split("""b""" )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
681
0
from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : complex , lowerCamelCase_ : str = "x" , lowerCamelCase_ : float = 10**-10 , lowerCamelCase_ : int = 1 , ) -> complex: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = symbols(lowerCamelCase_ ) SCREAMING_SNAKE_CASE_ : int = lambdify(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = lambdify(lowerCamelCase_ , diff(lowerCamelCase_ , lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE_ : Dict = starting_point while True: if diff_function(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE_ : Optional[int] = prev_guess - multiplicity * func(lowerCamelCase_ ) / diff_function( lowerCamelCase_ ) else: raise ZeroDivisionError('Could not find root' ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess SCREAMING_SNAKE_CASE_ : str = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""") # Find root of polynomial # Find fourth Root of 5 print(F"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}""") # Find value of e print( '''The root of log(y) - 1 = 0 is ''', F"""{newton_raphson("log(y) - 1", 2, variable="y")}""", ) # Exponential Roots print( '''The root of exp(x) - 1 = 0 is''', F"""{newton_raphson("exp(x) - 1", 10, precision=0.0_05)}""", ) # Find root of cos(x) print(F"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
105
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def _a ( lowerCamelCase, lowerCamelCase=False ): lowerCamelCase : Dict = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("""module.cls_token""", """vit.embeddings.cls_token"""), ("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""module.pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""module.norm.weight""", """layernorm.weight"""), ("""module.norm.bias""", """layernorm.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" lowerCamelCase : Any = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase=False ): for i in range(config.num_hidden_layers ): if base_model: lowerCamelCase : Optional[Any] = """""" else: lowerCamelCase : Optional[int] = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCamelCase : Dict = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' ) lowerCamelCase : List[str] = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase : Union[str, Any] = in_proj_weight[ : config.hidden_size, : ] lowerCamelCase : Optional[int] = in_proj_bias[: config.hidden_size] lowerCamelCase : Optional[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase : List[str] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCamelCase : Union[str, Any] = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase : Any = in_proj_bias[-config.hidden_size :] def _a ( lowerCamelCase ): lowerCamelCase : Tuple = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(lowerCamelCase, lowerCamelCase ) def _a ( lowerCamelCase ): # projection head is used in the self-supervised pre-training in MSN, # for downstream task it's not needed. lowerCamelCase : Any = [ """module.fc.fc1.weight""", """module.fc.fc1.bias""", """module.fc.bn1.weight""", """module.fc.bn1.bias""", """module.fc.bn1.running_mean""", """module.fc.bn1.running_var""", """module.fc.bn1.num_batches_tracked""", """module.fc.fc2.weight""", """module.fc.fc2.bias""", """module.fc.bn2.weight""", """module.fc.bn2.bias""", """module.fc.bn2.running_mean""", """module.fc.bn2.running_var""", """module.fc.bn2.num_batches_tracked""", """module.fc.fc3.weight""", """module.fc.fc3.bias""", ] for k in ignore_keys: state_dict.pop(lowerCamelCase, lowerCamelCase ) def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ): lowerCamelCase : Dict = dct.pop(lowerCamelCase ) lowerCamelCase : str = val def _a ( lowerCamelCase, lowerCamelCase ): lowerCamelCase : Any = ViTMSNConfig() lowerCamelCase : Tuple = 1000 lowerCamelCase : List[Any] = """datasets/huggingface/label-files""" lowerCamelCase : Optional[Any] = """imagenet-1k-id2label.json""" lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase, lowerCamelCase ), """r""" ) ) lowerCamelCase : List[Any] = {int(lowerCamelCase ): v for k, v in idalabel.items()} lowerCamelCase : Optional[int] = idalabel lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: lowerCamelCase : int = 384 lowerCamelCase : Optional[int] = 1536 lowerCamelCase : Tuple = 6 elif "l16" in checkpoint_url: lowerCamelCase : Dict = 1024 lowerCamelCase : List[Any] = 4096 lowerCamelCase : Optional[int] = 24 lowerCamelCase : str = 16 lowerCamelCase : str = 0.1 elif "b4" in checkpoint_url: lowerCamelCase : Union[str, Any] = 4 elif "l7" in checkpoint_url: lowerCamelCase : Tuple = 7 lowerCamelCase : Optional[int] = 1024 lowerCamelCase : List[Any] = 4096 lowerCamelCase : Tuple = 24 lowerCamelCase : Dict = 16 lowerCamelCase : str = 0.1 lowerCamelCase : List[Any] = ViTMSNModel(lowerCamelCase ) lowerCamelCase : Dict = torch.hub.load_state_dict_from_url(lowerCamelCase, map_location="""cpu""" )["""target_encoder"""] lowerCamelCase : Any = ViTImageProcessor(size=config.image_size ) remove_projection_head(lowerCamelCase ) lowerCamelCase : Dict = create_rename_keys(lowerCamelCase, base_model=lowerCamelCase ) for src, dest in rename_keys: rename_key(lowerCamelCase, lowerCamelCase, lowerCamelCase ) read_in_q_k_v(lowerCamelCase, lowerCamelCase, base_model=lowerCamelCase ) model.load_state_dict(lowerCamelCase ) model.eval() lowerCamelCase : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase : Dict = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw ) lowerCamelCase : Union[str, Any] = ViTImageProcessor( size=config.image_size, image_mean=lowerCamelCase, image_std=lowerCamelCase ) lowerCamelCase : Tuple = image_processor(images=lowerCamelCase, return_tensors="""pt""" ) # forward pass torch.manual_seed(2 ) lowerCamelCase : int = model(**lowerCamelCase ) lowerCamelCase : Union[str, Any] = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: lowerCamelCase : Union[str, Any] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] ) elif "b16" in checkpoint_url: lowerCamelCase : Tuple = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] ) elif "l16" in checkpoint_url: lowerCamelCase : List[str] = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] ) elif "b4" in checkpoint_url: lowerCamelCase : Tuple = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] ) else: lowerCamelCase : List[str] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3], lowerCamelCase, atol=1e-4 ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCamelCase ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(lowerCamelCase ) if __name__ == "__main__": _lowerCamelCase =argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar""", type=str, help="""URL of the checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) _lowerCamelCase =parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
681
0
import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor __snake_case :Optional[Any] =logging.get_logger(__name__) class lowerCAmelCase__ ( _lowerCamelCase ): def __init__( self : Optional[int] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Any ) -> None: warnings.warn( 'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use MobileViTImageProcessor instead.' , __UpperCamelCase , ) super().__init__(*__UpperCamelCase , **__UpperCamelCase )
106
def _a ( lowerCamelCase ): if num < 0: return False lowerCamelCase : int = num lowerCamelCase : int = 0 while num > 0: lowerCamelCase : str = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
681
0
'''simple docstring''' from sklearn.metrics import fa_score import datasets _UpperCAmelCase : Dict = ''' The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation: F1 = 2 * (precision * recall) / (precision + recall) ''' _UpperCAmelCase : str = ''' Args: predictions (`list` of `int`): Predicted labels. references (`list` of `int`): Ground truth labels. labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None. pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1. average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`. - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary. - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives. - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall. - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). sample_weight (`list` of `float`): Sample weights Defaults to None. Returns: f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better. Examples: Example 1-A simple binary example >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0]) >>> print(results) {\'f1\': 0.5} Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`. >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0) >>> print(round(results[\'f1\'], 2)) 0.67 Example 3-The same simple binary example as in Example 1, but with `sample_weight` included. >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3]) >>> print(round(results[\'f1\'], 2)) 0.35 Example 4-A multiclass example, with different values for the `average` input. >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro") >>> print(round(results[\'f1\'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro") >>> print(round(results[\'f1\'], 2)) 0.33 >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted") >>> print(round(results[\'f1\'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {\'f1\': array([0.8, 0. , 0. ])} ''' _UpperCAmelCase : str = ''' @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase_ ( datasets.Metric ): """simple docstring""" def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('int32' ) ), 'references': datasets.Sequence(datasets.Value('int32' ) ), } if self.config_name == 'multilabel' else { 'predictions': datasets.Value('int32' ), 'references': datasets.Value('int32' ), } ), reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'], ) def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : int, UpperCamelCase__ : List[Any], UpperCamelCase__ : Dict=None, UpperCamelCase__ : Dict=1, UpperCamelCase__ : List[Any]="binary", UpperCamelCase__ : str=None ) -> Optional[int]: _A = fa_score( UpperCamelCase__, UpperCamelCase__, labels=UpperCamelCase__, pos_label=UpperCamelCase__, average=UpperCamelCase__, sample_weight=UpperCamelCase__ ) return {"f1": float(UpperCamelCase__ ) if score.size == 1 else score}
107
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_torch_available from ...utils import OptionalDependencyNotAvailable _lowerCamelCase ={ """configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""], """tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase =[ """GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTNeoXJapaneseForCausalLM""", """GPTNeoXJapaneseLayer""", """GPTNeoXJapaneseModel""", """GPTNeoXJapanesePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox_japanese import ( GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseLayer, GPTNeoXJapaneseModel, GPTNeoXJapanesePreTrainedModel, ) else: import sys _lowerCamelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
681
0
__a: List[Any] = [0, 2, 4, 6, 8] __a: Union[str, Any] = [1, 3, 5, 7, 9] def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case , __snake_case ) -> int: if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1 ): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 1_0 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 _UpperCAmelCase = 0 for digit in range(1_0 ): _UpperCAmelCase = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 1_0 , __snake_case , __snake_case ) return result _UpperCAmelCase = 0 for digita in range(1_0 ): _UpperCAmelCase = digita if (remainder + digita) % 2 == 0: _UpperCAmelCase = ODD_DIGITS else: _UpperCAmelCase = EVEN_DIGITS for digita in other_parity_digits: _UpperCAmelCase = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 1_0 , __snake_case , __snake_case , ) return result def _SCREAMING_SNAKE_CASE ( __snake_case = 9 ) -> int: _UpperCAmelCase = 0 for length in range(1 , max_power + 1 ): result += reversible_numbers(__snake_case , 0 , [0] * length , __snake_case ) return result if __name__ == "__main__": print(F"{solution() = }")
108
import copy import random from transformers import CLIPTokenizer class A__ ( __SCREAMING_SNAKE_CASE): def __init__( self , *__magic_name__ , **__magic_name__ ): super().__init__(*__magic_name__ , **__magic_name__ ) lowerCamelCase : Dict = {} def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , **__magic_name__ ): lowerCamelCase : Any = super().add_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ ) if num_added_tokens == 0: raise ValueError( F'''The tokenizer already contains the token {placeholder_token}. Please pass a different''' """ `placeholder_token` that is not already in the tokenizer.""" ) def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=1 , **__magic_name__ ): lowerCamelCase : List[Any] = [] if num_vec_per_token == 1: self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ ) output.append(__magic_name__ ) else: lowerCamelCase : Dict = [] for i in range(__magic_name__ ): lowerCamelCase : Optional[Any] = placeholder_token + F'''_{i}''' self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ ) output.append(__magic_name__ ) # handle cases where there is a new placeholder token that contains the current placeholder token but is larger for token in self.token_map: if token in placeholder_token: raise ValueError( F'''The tokenizer already has placeholder token {token} that can get confused with''' F''' {placeholder_token}keep placeholder tokens independent''' ) lowerCamelCase : Any = output def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=False , __magic_name__=1.0 ): if isinstance(__magic_name__ , __magic_name__ ): lowerCamelCase : List[str] = [] for i in range(len(__magic_name__ ) ): output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__magic_name__ ) ) return output for placeholder_token in self.token_map: if placeholder_token in text: lowerCamelCase : List[str] = self.token_map[placeholder_token] lowerCamelCase : Optional[Any] = tokens[: 1 + int(len(__magic_name__ ) * prop_tokens_to_load )] if vector_shuffle: lowerCamelCase : Union[str, Any] = copy.copy(__magic_name__ ) random.shuffle(__magic_name__ ) lowerCamelCase : str = text.replace(__magic_name__ , """ """.join(__magic_name__ ) ) return text def __call__( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ): return super().__call__( self.replace_placeholder_tokens_in_text( __magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , ) def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ): return super().encode( self.replace_placeholder_tokens_in_text( __magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , )
681
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) a = { "s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json", } class __a ( _snake_case ): __UpperCamelCase : List[str] = 'open-llama' def __init__( self : List[Any] ,lowerCamelCase : Optional[Any]=10_0000 ,lowerCamelCase : Optional[Any]=4096 ,lowerCamelCase : Tuple=1_1008 ,lowerCamelCase : Optional[int]=32 ,lowerCamelCase : Tuple=32 ,lowerCamelCase : int="silu" ,lowerCamelCase : Dict=2048 ,lowerCamelCase : Optional[int]=0.02 ,lowerCamelCase : Any=1E-6 ,lowerCamelCase : Dict=True ,lowerCamelCase : List[Any]=0 ,lowerCamelCase : Optional[Any]=1 ,lowerCamelCase : List[str]=2 ,lowerCamelCase : str=False ,lowerCamelCase : Dict=True ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Optional[int]=0.1 ,lowerCamelCase : List[Any]=True ,lowerCamelCase : Optional[Any]=True ,lowerCamelCase : str=None ,**lowerCamelCase : Optional[Any] ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = rms_norm_eps __SCREAMING_SNAKE_CASE = use_cache __SCREAMING_SNAKE_CASE = kwargs.pop( """use_memorry_efficient_attention""" ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_dropout_prob __SCREAMING_SNAKE_CASE = use_stable_embedding __SCREAMING_SNAKE_CASE = shared_input_output_embedding __SCREAMING_SNAKE_CASE = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=lowerCamelCase ,bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase ,tie_word_embeddings=lowerCamelCase ,**lowerCamelCase ,) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling ,lowerCamelCase ) or len(self.rope_scaling ) != 2: raise ValueError( """`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """ f"""got {self.rope_scaling}""" ) __SCREAMING_SNAKE_CASE = self.rope_scaling.get("""type""" ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.rope_scaling.get("""factor""" ,lowerCamelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" ) if rope_scaling_factor is None or not isinstance(lowerCamelCase ,lowerCamelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
109
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class A__ ( unittest.TestCase): def __init__( self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=1_8 , __magic_name__=3_0 , __magic_name__=4_0_0 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=[0.48_145_466, 0.4_578_275, 0.40_821_073] , __magic_name__=[0.26_862_954, 0.26_130_258, 0.27_577_711] , __magic_name__=True , ): lowerCamelCase : Union[str, Any] = size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4} lowerCamelCase : str = crop_size if crop_size is not None else {"""height""": 1_8, """width""": 1_8} lowerCamelCase : Optional[int] = parent lowerCamelCase : Union[str, Any] = batch_size lowerCamelCase : str = num_channels lowerCamelCase : Any = image_size lowerCamelCase : Optional[int] = min_resolution lowerCamelCase : Union[str, Any] = max_resolution lowerCamelCase : Union[str, Any] = do_resize lowerCamelCase : int = size lowerCamelCase : int = do_center_crop lowerCamelCase : Union[str, Any] = crop_size lowerCamelCase : Union[str, Any] = do_normalize lowerCamelCase : Dict = image_mean lowerCamelCase : Optional[Any] = image_std lowerCamelCase : Union[str, Any] = do_convert_rgb def UpperCamelCase__ ( self ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def UpperCamelCase__ ( self , __magic_name__=False , __magic_name__=False , __magic_name__=False ): assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: lowerCamelCase : Tuple = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) ) else: lowerCamelCase : Dict = [] for i in range(self.batch_size ): lowerCamelCase , lowerCamelCase : int = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 ) image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension lowerCamelCase : int = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs] if torchify: lowerCamelCase : int = [torch.from_numpy(__magic_name__ ) for x in image_inputs] return image_inputs @require_torch @require_vision class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase): _UpperCAmelCase : Any = ChineseCLIPImageProcessor if is_vision_available() else None def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = ChineseCLIPImageProcessingTester(self , do_center_crop=__magic_name__ ) @property def UpperCamelCase__ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) ) self.assertTrue(hasattr(__magic_name__ , """size""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_center_crop""" ) ) self.assertTrue(hasattr(__magic_name__ , """center_crop""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) ) self.assertTrue(hasattr(__magic_name__ , """image_mean""" ) ) self.assertTrue(hasattr(__magic_name__ , """image_std""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_convert_rgb""" ) ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 2_2_4, """width""": 2_2_4} ) self.assertEqual(image_processor.crop_size , {"""height""": 1_8, """width""": 1_8} ) lowerCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} ) self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): # Initialize image_processing lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , Image.Image ) # Test not batched input lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCamelCase : Optional[Any] = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def UpperCamelCase__ ( self ): # Initialize image_processing lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ , numpify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , np.ndarray ) # Test not batched input lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCamelCase : Tuple = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def UpperCamelCase__ ( self ): # Initialize image_processing lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase : Any = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ , torchify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , torch.Tensor ) # Test not batched input lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCamelCase : str = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) @require_torch @require_vision class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase): _UpperCAmelCase : Tuple = ChineseCLIPImageProcessor if is_vision_available() else None def UpperCamelCase__ ( self ): lowerCamelCase : Union[str, Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__magic_name__ ) lowerCamelCase : Any = 3 @property def UpperCamelCase__ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase__ ( self ): lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) ) self.assertTrue(hasattr(__magic_name__ , """size""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_center_crop""" ) ) self.assertTrue(hasattr(__magic_name__ , """center_crop""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) ) self.assertTrue(hasattr(__magic_name__ , """image_mean""" ) ) self.assertTrue(hasattr(__magic_name__ , """image_std""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_convert_rgb""" ) ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): # Initialize image_processing lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , Image.Image ) # Test not batched input lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCamelCase : Optional[Any] = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
681
0
'''simple docstring''' # Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #################################################################################################### # # Note: If when running this conversion script you're getting an exception: # ModuleNotFoundError: No module named 'megatron.model.enums' # you need to tell python where to find the clone of Megatron-LM, e.g.: # # cd /tmp # git clone https://github.com/NVIDIA/Megatron-LM # PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ... # # if you already have it cloned elsewhere, simply adjust the path to the existing path # # If the training was done using a Megatron-LM fork, e.g., # https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one # in your path, i.e., /path/to/Megatron-DeepSpeed/ # import argparse import os import re import zipfile import torch from transformers import AutoTokenizer, GPTaConfig def lowercase__( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int]=0 )-> Any: """simple docstring""" if name is None: _UpperCamelCase = None else: _UpperCamelCase = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}""" _UpperCamelCase = fmt.format(_UpperCamelCase ) # Print and recurse (if needed). if isinstance(_UpperCamelCase , _UpperCamelCase ): if msg is not None: print(_UpperCamelCase ) for k in val.keys(): recursive_print(_UpperCamelCase , val[k] , spaces + 2 ) elif isinstance(_UpperCamelCase , torch.Tensor ): print(_UpperCamelCase , ":" , val.size() ) else: print(_UpperCamelCase , ":" , _UpperCamelCase ) def lowercase__( _UpperCamelCase : Dict , _UpperCamelCase : int , _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str] )-> List[str]: """simple docstring""" _UpperCamelCase = param.size() if checkpoint_version == 1.0: # version 1.0 stores [num_heads * hidden_size * num_splits, :] _UpperCamelCase = (num_heads, hidden_size, num_splits) + input_shape[1:] _UpperCamelCase = param.view(*_UpperCamelCase ) _UpperCamelCase = param.transpose(0 , 2 ) _UpperCamelCase = param.transpose(1 , 2 ).contiguous() elif checkpoint_version >= 2.0: # other versions store [num_heads * num_splits * hidden_size, :] _UpperCamelCase = (num_heads, num_splits, hidden_size) + input_shape[1:] _UpperCamelCase = param.view(*_UpperCamelCase ) _UpperCamelCase = param.transpose(0 , 1 ).contiguous() _UpperCamelCase = param.view(*_UpperCamelCase ) return param def lowercase__( _UpperCamelCase : str , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple )-> List[str]: """simple docstring""" _UpperCamelCase = {} # old versions did not store training args _UpperCamelCase = input_state_dict.get("args" , _UpperCamelCase ) if ds_args is not None: # do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint # from pprint import pprint # pprint(vars(ds_args)) _UpperCamelCase = ds_args.padded_vocab_size _UpperCamelCase = ds_args.max_position_embeddings _UpperCamelCase = ds_args.hidden_size _UpperCamelCase = ds_args.num_layers _UpperCamelCase = ds_args.num_attention_heads _UpperCamelCase = ds_args.ffn_hidden_size # pprint(config) # The number of heads. _UpperCamelCase = config.n_head # The hidden_size per head. _UpperCamelCase = config.n_embd // config.n_head # Megatron-LM checkpoint version if "checkpoint_version" in input_state_dict.keys(): _UpperCamelCase = input_state_dict["""checkpoint_version"""] else: _UpperCamelCase = 0.0 # The model. _UpperCamelCase = input_state_dict["""model"""] # The language model. _UpperCamelCase = model["""language_model"""] # The embeddings. _UpperCamelCase = lm["""embedding"""] # The word embeddings. _UpperCamelCase = embeddings["""word_embeddings"""]["""weight"""] # Truncate the embedding table to vocab_size rows. _UpperCamelCase = word_embeddings[: config.vocab_size, :] _UpperCamelCase = word_embeddings # The position embeddings. _UpperCamelCase = embeddings["""position_embeddings"""]["""weight"""] # Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size] _UpperCamelCase = pos_embeddings.size(0 ) if n_positions != config.n_positions: raise ValueError( f"pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match" ) # Store the position embeddings. _UpperCamelCase = pos_embeddings # The transformer. _UpperCamelCase = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""] # The regex to extract layer names. _UpperCamelCase = re.compile(R"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)" ) # The simple map of names for "automated" rules. _UpperCamelCase = { """attention.dense""": """.attn.c_proj.""", """self_attention.dense""": """.attn.c_proj.""", """mlp.dense_h_to_4h""": """.mlp.c_fc.""", """mlp.dense_4h_to_h""": """.mlp.c_proj.""", } # Extract the layers. for key, val in transformer.items(): # Match the name. _UpperCamelCase = layer_re.match(_UpperCamelCase ) # Stop if that's not a layer if m is None: break # The index of the layer. _UpperCamelCase = int(m.group(1 ) ) # The name of the operation. _UpperCamelCase = m.group(2 ) # Is it a weight or a bias? _UpperCamelCase = m.group(3 ) # The name of the layer. _UpperCamelCase = f"transformer.h.{layer_idx}" # For layernorm(s), simply store the layer norm. if op_name.endswith("layernorm" ): _UpperCamelCase = """ln_1""" if op_name.startswith("input" ) else """ln_2""" _UpperCamelCase = val # Transpose the QKV matrix. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "weight": # Insert a tensor of 1x1xDxD bias. _UpperCamelCase = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view( 1 , 1 , _UpperCamelCase , _UpperCamelCase ) _UpperCamelCase = causal_mask # Insert a "dummy" tensor for masked_bias. _UpperCamelCase = torch.tensor(-1E4 , dtype=torch.floataa ) _UpperCamelCase = masked_bias _UpperCamelCase = fix_query_key_value_ordering(_UpperCamelCase , _UpperCamelCase , 3 , _UpperCamelCase , _UpperCamelCase ) # Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D. _UpperCamelCase = out_val.transpose(0 , 1 ).contiguous() # Store. _UpperCamelCase = out_val # Transpose the bias. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "bias": _UpperCamelCase = fix_query_key_value_ordering(_UpperCamelCase , _UpperCamelCase , 3 , _UpperCamelCase , _UpperCamelCase ) # Store. No change of shape. _UpperCamelCase = out_val # Transpose the weights. elif weight_or_bias == "weight": _UpperCamelCase = megatron_to_transformers[op_name] _UpperCamelCase = val.transpose(0 , 1 ) # Copy the bias. elif weight_or_bias == "bias": _UpperCamelCase = megatron_to_transformers[op_name] _UpperCamelCase = val # DEBUG. assert config.n_layer == layer_idx + 1 # The final layernorm. _UpperCamelCase = transformer["""final_layernorm.weight"""] _UpperCamelCase = transformer["""final_layernorm.bias"""] # For LM head, transformers' wants the matrix to weight embeddings. _UpperCamelCase = word_embeddings # It should be done! return output_state_dict def lowercase__( )-> List[Any]: """simple docstring""" _UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--print-checkpoint-structure" , action="store_true" ) parser.add_argument( "path_to_checkpoint" , type=_UpperCamelCase , help="Path to the checkpoint file (.zip archive or direct .pt file)" , ) parser.add_argument( "--config_file" , default="" , type=_UpperCamelCase , help="An optional config json file describing the pre-trained model." , ) _UpperCamelCase = parser.parse_args() # Extract the basename. _UpperCamelCase = os.path.dirname(args.path_to_checkpoint ) # Load the model. # the .zip is very optional, let's keep it for backward compatibility print(f"Extracting PyTorch state dictionary from {args.path_to_checkpoint}" ) if args.path_to_checkpoint.endswith(".zip" ): with zipfile.ZipFile(args.path_to_checkpoint , "r" ) as checkpoint: with checkpoint.open("release/mp_rank_00/model_optim_rng.pt" ) as pytorch_dict: _UpperCamelCase = torch.load(_UpperCamelCase , map_location="cpu" ) else: _UpperCamelCase = torch.load(args.path_to_checkpoint , map_location="cpu" ) _UpperCamelCase = input_state_dict.get("args" , _UpperCamelCase ) # Read the config, or default to the model released by NVIDIA. if args.config_file == "": if ds_args is not None: if ds_args.bias_gelu_fusion: _UpperCamelCase = """gelu_fast""" elif ds_args.openai_gelu: _UpperCamelCase = """gelu_new""" else: _UpperCamelCase = """gelu""" else: # in the very early days this used to be "gelu_new" _UpperCamelCase = """gelu_new""" # Spell out all parameters in case the defaults change. _UpperCamelCase = GPTaConfig( vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=_UpperCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="cls_index" , summary_use_proj=_UpperCamelCase , summary_activation=_UpperCamelCase , summary_proj_to_labels=_UpperCamelCase , summary_first_dropout=0.1 , scale_attn_weights=_UpperCamelCase , use_cache=_UpperCamelCase , bos_token_id=50256 , eos_token_id=50256 , ) else: _UpperCamelCase = GPTaConfig.from_json_file(args.config_file ) _UpperCamelCase = ["""GPT2LMHeadModel"""] # Convert. print("Converting" ) _UpperCamelCase = convert_megatron_checkpoint(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Print the structure of converted state dict. if args.print_checkpoint_structure: recursive_print(_UpperCamelCase , _UpperCamelCase ) # Add tokenizer class info to config # see https://github.com/huggingface/transformers/issues/13906) if ds_args is not None: _UpperCamelCase = ds_args.tokenizer_type if tokenizer_type == "GPT2BPETokenizer": _UpperCamelCase = """gpt2""" elif tokenizer_type == "PretrainedFromHF": _UpperCamelCase = ds_args.tokenizer_name_or_path else: raise ValueError(f"Unrecognized tokenizer_type {tokenizer_type}" ) else: _UpperCamelCase = """gpt2""" _UpperCamelCase = AutoTokenizer.from_pretrained(_UpperCamelCase ) _UpperCamelCase = type(_UpperCamelCase ).__name__ _UpperCamelCase = tokenizer_class # Store the config to file. print("Saving config" ) config.save_pretrained(_UpperCamelCase ) # Save tokenizer based on args print(f"Adding {tokenizer_class} tokenizer files" ) tokenizer.save_pretrained(_UpperCamelCase ) # Store the state_dict to file. _UpperCamelCase = os.path.join(_UpperCamelCase , "pytorch_model.bin" ) print(f"Saving checkpoint to \"{output_checkpoint_file}\"" ) torch.save(_UpperCamelCase , _UpperCamelCase ) #################################################################################################### if __name__ == "__main__": main() ####################################################################################################
138
from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A__ : def __init__( self , __magic_name__ , __magic_name__=3 , __magic_name__=3_2 , __magic_name__=3 , __magic_name__=1_0 , __magic_name__=[1_0, 2_0, 3_0, 4_0] , __magic_name__=[1, 1, 2, 1] , __magic_name__=True , __magic_name__=True , __magic_name__="relu" , __magic_name__=3 , __magic_name__=None , ): lowerCamelCase : Tuple = parent lowerCamelCase : Tuple = batch_size lowerCamelCase : List[Any] = image_size lowerCamelCase : Optional[Any] = num_channels lowerCamelCase : Dict = embeddings_size lowerCamelCase : Optional[int] = hidden_sizes lowerCamelCase : Union[str, Any] = depths lowerCamelCase : Optional[Any] = is_training lowerCamelCase : Union[str, Any] = use_labels lowerCamelCase : Dict = hidden_act lowerCamelCase : Any = num_labels lowerCamelCase : int = scope lowerCamelCase : Optional[Any] = len(__magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase : Tuple = None if self.use_labels: lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) lowerCamelCase : Tuple = self.get_config() return config, pixel_values, labels def UpperCamelCase__ ( self ): return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ): lowerCamelCase : Dict = TFResNetModel(config=__magic_name__ ) lowerCamelCase : Tuple = model(__magic_name__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ): lowerCamelCase : str = self.num_labels lowerCamelCase : Dict = TFResNetForImageClassification(__magic_name__ ) lowerCamelCase : Union[str, Any] = model(__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = self.prepare_config_and_inputs() lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = config_and_inputs lowerCamelCase : List[str] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase): _UpperCAmelCase : Any = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () _UpperCAmelCase : List[str] = ( {"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification} if is_tf_available() else {} ) _UpperCAmelCase : Optional[Any] = False _UpperCAmelCase : Optional[Any] = False _UpperCAmelCase : Dict = False _UpperCAmelCase : List[Any] = False _UpperCAmelCase : Any = False def UpperCamelCase__ ( self ): lowerCamelCase : int = TFResNetModelTester(self ) lowerCamelCase : str = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ ) def UpperCamelCase__ ( self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase__ ( self ): return @unittest.skip(reason="""ResNet does not use inputs_embeds""" ) def UpperCamelCase__ ( self ): pass @unittest.skip(reason="""ResNet does not support input and output embeddings""" ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase : List[str] = model_class(__magic_name__ ) lowerCamelCase : str = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase : Tuple = [*signature.parameters.keys()] lowerCamelCase : List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCamelCase__ ( self ): def check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ): lowerCamelCase : Any = model_class(__magic_name__ ) lowerCamelCase : List[Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) lowerCamelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase : Union[str, Any] = self.model_tester.num_stages self.assertEqual(len(__magic_name__ ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : Tuple = ["""basic""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: lowerCamelCase : Union[str, Any] = layer_type lowerCamelCase : str = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase : int = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__magic_name__ ) @slow def UpperCamelCase__ ( self ): for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase : Any = TFResNetModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def _a ( ): lowerCamelCase : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class A__ ( unittest.TestCase): @cached_property def UpperCamelCase__ ( self ): return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) lowerCamelCase : List[str] = self.default_image_processor lowerCamelCase : str = prepare_img() lowerCamelCase : Tuple = image_processor(images=__magic_name__ , return_tensors="""tf""" ) # forward pass lowerCamelCase : Tuple = model(**__magic_name__ ) # verify the logits lowerCamelCase : Optional[Any] = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , __magic_name__ ) lowerCamelCase : Optional[Any] = tf.constant([-11.1_069, -9.7_877, -8.3_777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __magic_name__ , atol=1e-4 ) )
681
0
"""simple docstring""" from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, ) @flax.struct.dataclass class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' _SCREAMING_SNAKE_CASE : jnp.ndarray _SCREAMING_SNAKE_CASE : jnp.ndarray class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' _SCREAMING_SNAKE_CASE : int _SCREAMING_SNAKE_CASE : Tuple[int] = (16, 32, 96, 2_56) _SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa def _lowerCAmelCase ( self : Optional[int] ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : str = nn.Conv( self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) SCREAMING_SNAKE_CASE : Optional[int] = [] for i in range(len(self.block_out_channels ) - 1 ): SCREAMING_SNAKE_CASE : int = self.block_out_channels[i] SCREAMING_SNAKE_CASE : Union[str, Any] = self.block_out_channels[i + 1] SCREAMING_SNAKE_CASE : List[str] = nn.Conv( _SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : int = nn.Conv( _SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Optional[Any] = blocks SCREAMING_SNAKE_CASE : List[Any] = nn.Conv( self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__( self : Any , _SCREAMING_SNAKE_CASE : int ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = self.conv_in(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : List[Any] = nn.silu(_SCREAMING_SNAKE_CASE ) for block in self.blocks: SCREAMING_SNAKE_CASE : Union[str, Any] = block(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Union[str, Any] = nn.silu(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Dict = self.conv_out(_SCREAMING_SNAKE_CASE ) return embedding @flax_register_to_config class lowerCAmelCase__ ( nn.Module , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): '''simple docstring''' _SCREAMING_SNAKE_CASE : int = 32 _SCREAMING_SNAKE_CASE : int = 4 _SCREAMING_SNAKE_CASE : Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) _SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False _SCREAMING_SNAKE_CASE : Tuple[int] = (3_20, 6_40, 12_80, 12_80) _SCREAMING_SNAKE_CASE : int = 2 _SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8 _SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None _SCREAMING_SNAKE_CASE : int = 12_80 _SCREAMING_SNAKE_CASE : float = 0.0 _SCREAMING_SNAKE_CASE : bool = False _SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa _SCREAMING_SNAKE_CASE : bool = True _SCREAMING_SNAKE_CASE : int = 0 _SCREAMING_SNAKE_CASE : str = "rgb" _SCREAMING_SNAKE_CASE : Tuple[int] = (16, 32, 96, 2_56) def _lowerCAmelCase ( self : List[str] , _SCREAMING_SNAKE_CASE : List[Any] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = (1, self.in_channels, self.sample_size, self.sample_size) SCREAMING_SNAKE_CASE : Any = jnp.zeros(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.ones((1,) , dtype=jnp.intaa ) SCREAMING_SNAKE_CASE : int = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE : Any = (1, 3, self.sample_size * 8, self.sample_size * 8) SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.zeros(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE : Any = jax.random.split(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : List[Any] = {"""params""": params_rng, """dropout""": dropout_rng} return self.init(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )["params"] def _lowerCAmelCase ( self : Dict ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : str = self.block_out_channels SCREAMING_SNAKE_CASE : Optional[Any] = block_out_channels[0] * 4 # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. SCREAMING_SNAKE_CASE : List[Any] = self.num_attention_heads or self.attention_head_dim # input SCREAMING_SNAKE_CASE : str = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time SCREAMING_SNAKE_CASE : Optional[int] = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) SCREAMING_SNAKE_CASE : str = FlaxTimestepEmbedding(_SCREAMING_SNAKE_CASE , dtype=self.dtype ) SCREAMING_SNAKE_CASE : str = FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , ) SCREAMING_SNAKE_CASE : Optional[int] = self.only_cross_attention if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE : Dict = (only_cross_attention,) * len(self.down_block_types ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE : Optional[Any] = (num_attention_heads,) * len(self.down_block_types ) # down SCREAMING_SNAKE_CASE : Tuple = [] SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : List[Any] = block_out_channels[0] SCREAMING_SNAKE_CASE : Any = nn.Conv( _SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(_SCREAMING_SNAKE_CASE ) for i, down_block_type in enumerate(self.down_block_types ): SCREAMING_SNAKE_CASE : Union[str, Any] = output_channel SCREAMING_SNAKE_CASE : int = block_out_channels[i] SCREAMING_SNAKE_CASE : List[Any] = i == len(_SCREAMING_SNAKE_CASE ) - 1 if down_block_type == "CrossAttnDownBlock2D": SCREAMING_SNAKE_CASE : Tuple = FlaxCrossAttnDownBlockaD( in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , ) else: SCREAMING_SNAKE_CASE : Dict = FlaxDownBlockaD( in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(_SCREAMING_SNAKE_CASE ) for _ in range(self.layers_per_block ): SCREAMING_SNAKE_CASE : Optional[int] = nn.Conv( _SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(_SCREAMING_SNAKE_CASE ) if not is_final_block: SCREAMING_SNAKE_CASE : Optional[int] = nn.Conv( _SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Union[str, Any] = down_blocks SCREAMING_SNAKE_CASE : Dict = controlnet_down_blocks # mid SCREAMING_SNAKE_CASE : Dict = block_out_channels[-1] SCREAMING_SNAKE_CASE : List[Any] = FlaxUNetMidBlockaDCrossAttn( in_channels=_SCREAMING_SNAKE_CASE , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , ) SCREAMING_SNAKE_CASE : Tuple = nn.Conv( _SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__( self : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str = 1.0 , _SCREAMING_SNAKE_CASE : str = True , _SCREAMING_SNAKE_CASE : Any = False , ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : Any = self.controlnet_conditioning_channel_order if channel_order == "bgr": SCREAMING_SNAKE_CASE : Any = jnp.flip(_SCREAMING_SNAKE_CASE , axis=1 ) # 1. time if not isinstance(_SCREAMING_SNAKE_CASE , jnp.ndarray ): SCREAMING_SNAKE_CASE : List[Any] = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(_SCREAMING_SNAKE_CASE , jnp.ndarray ) and len(timesteps.shape ) == 0: SCREAMING_SNAKE_CASE : List[str] = timesteps.astype(dtype=jnp.floataa ) SCREAMING_SNAKE_CASE : Optional[Any] = jnp.expand_dims(_SCREAMING_SNAKE_CASE , 0 ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.time_proj(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : str = self.time_embedding(_SCREAMING_SNAKE_CASE ) # 2. pre-process SCREAMING_SNAKE_CASE : str = jnp.transpose(_SCREAMING_SNAKE_CASE , (0, 2, 3, 1) ) SCREAMING_SNAKE_CASE : Optional[Any] = self.conv_in(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Optional[int] = jnp.transpose(_SCREAMING_SNAKE_CASE , (0, 2, 3, 1) ) SCREAMING_SNAKE_CASE : Optional[int] = self.controlnet_cond_embedding(_SCREAMING_SNAKE_CASE ) sample += controlnet_cond # 3. down SCREAMING_SNAKE_CASE : Optional[Any] = (sample,) for down_block in self.down_blocks: if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE : Optional[Any] = down_block(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=not train ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = down_block(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=not train ) down_block_res_samples += res_samples # 4. mid SCREAMING_SNAKE_CASE : str = self.mid_block(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=not train ) # 5. contronet blocks SCREAMING_SNAKE_CASE : List[Any] = () for down_block_res_sample, controlnet_block in zip(_SCREAMING_SNAKE_CASE , self.controlnet_down_blocks ): SCREAMING_SNAKE_CASE : Tuple = controlnet_block(_SCREAMING_SNAKE_CASE ) controlnet_down_block_res_samples += (down_block_res_sample,) SCREAMING_SNAKE_CASE : List[Any] = controlnet_down_block_res_samples SCREAMING_SNAKE_CASE : Tuple = self.controlnet_mid_block(_SCREAMING_SNAKE_CASE ) # 6. scaling SCREAMING_SNAKE_CASE : Optional[Any] = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=_SCREAMING_SNAKE_CASE , mid_block_res_sample=_SCREAMING_SNAKE_CASE )
265
import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ): # Initialise PyTorch model lowerCamelCase : str = MobileBertConfig.from_json_file(lowerCamelCase ) print(F'''Building PyTorch model from configuration: {config}''' ) lowerCamelCase : Tuple = MobileBertForPreTraining(lowerCamelCase ) # Load weights from tf checkpoint lowerCamelCase : Tuple = load_tf_weights_in_mobilebert(lowerCamelCase, lowerCamelCase, lowerCamelCase ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict(), lowerCamelCase ) if __name__ == "__main__": _lowerCamelCase =argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--mobilebert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained MobileBERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) _lowerCamelCase =parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
681
0
"""simple docstring""" import logging import math import os from dataclasses import dataclass, field from glob import glob from typing import Optional from torch.utils.data import ConcatDataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoConfig, AutoModelWithLMHead, AutoTokenizer, DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForWholeWordMask, HfArgumentParser, LineByLineTextDataset, LineByLineWithRefDataset, PreTrainedTokenizer, TextDataset, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process _UpperCamelCase = logging.getLogger(__name__) _UpperCamelCase = list(MODEL_WITH_LM_HEAD_MAPPING.keys()) _UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class __UpperCAmelCase : '''simple docstring''' _UpperCamelCase : Optional[str] = field( default=__SCREAMING_SNAKE_CASE , metadata={ 'help': ( 'The model checkpoint for weights initialization. Leave None if you want to train a model from' ' scratch.' ) } , ) _UpperCamelCase : Optional[str] = field( default=__SCREAMING_SNAKE_CASE , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__SCREAMING_SNAKE_CASE )} , ) _UpperCamelCase : Optional[str] = field( default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) _UpperCamelCase : Optional[str] = field( default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) _UpperCamelCase : Optional[str] = field( default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) @dataclass class __UpperCAmelCase : '''simple docstring''' _UpperCamelCase : Optional[str] = field( default=__SCREAMING_SNAKE_CASE , metadata={'help': 'The input training data file (a text file).'} ) _UpperCamelCase : Optional[str] = field( default=__SCREAMING_SNAKE_CASE , metadata={ 'help': ( 'The input training data files (multiple files in glob format). ' 'Very often splitting large files to smaller files can prevent tokenizer going out of memory' ) } , ) _UpperCamelCase : Optional[str] = field( default=__SCREAMING_SNAKE_CASE , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , ) _UpperCamelCase : Optional[str] = field( default=__SCREAMING_SNAKE_CASE , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , ) _UpperCamelCase : Optional[str] = field( default=__SCREAMING_SNAKE_CASE , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , ) _UpperCamelCase : bool = field( default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , ) _UpperCamelCase : bool = field( default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} ) _UpperCamelCase : bool = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether ot not to use whole word mask.'} ) _UpperCamelCase : float = field( default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} ) _UpperCamelCase : float = field( default=1 / 6 , metadata={ 'help': ( 'Ratio of length of a span of masked tokens to surrounding context length for permutation language' ' modeling.' ) } , ) _UpperCamelCase : int = field( default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} ) _UpperCamelCase : int = field( default=-1 , metadata={ 'help': ( 'Optional input sequence length after tokenization.' 'The training dataset will be truncated in block of this size for training.' 'Default to the model max input length for single sentence inputs (take into account special tokens).' ) } , ) _UpperCamelCase : bool = field( default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) def _A( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = False , lowerCAmelCase = None , ): def _dataset(lowerCAmelCase , lowerCAmelCase=None ): if args.line_by_line: if ref_path is not None: if not args.whole_word_mask or not args.mlm: raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" ) return LineByLineWithRefDataset( tokenizer=lowerCAmelCase , file_path=lowerCAmelCase , block_size=args.block_size , ref_path=lowerCAmelCase , ) return LineByLineTextDataset(tokenizer=lowerCAmelCase , file_path=lowerCAmelCase , block_size=args.block_size ) else: return TextDataset( tokenizer=lowerCAmelCase , file_path=lowerCAmelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCAmelCase , ) if evaluate: return _dataset(args.eval_data_file , args.eval_ref_file ) elif args.train_data_files: return ConcatDataset([_dataset(lowerCAmelCase ) for f in glob(args.train_data_files )] ) else: return _dataset(args.train_data_file , args.train_ref_file ) def _A( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. A__ : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) A__ : str = parser.parse_args_into_dataclasses() if data_args.eval_data_file is None and training_args.do_eval: raise ValueError( """Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """ """or remove the --do_eval argument.""" ) if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' """ --overwrite_output_dir to overcome.""" ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("""Training/evaluation parameters %s""" , lowerCAmelCase ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if model_args.config_name: A__ : Dict = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: A__ : Dict = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: A__ : int = CONFIG_MAPPING[model_args.model_type]() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.tokenizer_name: A__ : List[str] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: A__ : int = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: raise ValueError( """You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another""" """ script, save it,and load it from here, using --tokenizer_name""" ) if model_args.model_name_or_path: A__ : List[Any] = AutoModelWithLMHead.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCAmelCase , cache_dir=model_args.cache_dir , ) else: logger.info("""Training new model from scratch""" ) A__ : int = AutoModelWithLMHead.from_config(lowerCAmelCase ) model.resize_token_embeddings(len(lowerCAmelCase ) ) if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm: raise ValueError( """BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the""" """--mlm flag (masked language modeling).""" ) if data_args.block_size <= 0: A__ : Union[str, Any] = tokenizer.max_len # Our input block size will be the max possible for the model else: A__ : Dict = min(data_args.block_size , tokenizer.max_len ) # Get datasets A__ : Dict = ( get_dataset(lowerCAmelCase , tokenizer=lowerCAmelCase , cache_dir=model_args.cache_dir ) if training_args.do_train else None ) A__ : List[str] = ( get_dataset(lowerCAmelCase , tokenizer=lowerCAmelCase , evaluate=lowerCAmelCase , cache_dir=model_args.cache_dir ) if training_args.do_eval else None ) if config.model_type == "xlnet": A__ : List[str] = DataCollatorForPermutationLanguageModeling( tokenizer=lowerCAmelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , ) else: if data_args.mlm and data_args.whole_word_mask: A__ : int = DataCollatorForWholeWordMask( tokenizer=lowerCAmelCase , mlm_probability=data_args.mlm_probability ) else: A__ : List[Any] = DataCollatorForLanguageModeling( tokenizer=lowerCAmelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer A__ : int = Trainer( model=lowerCAmelCase , args=lowerCAmelCase , data_collator=lowerCAmelCase , train_dataset=lowerCAmelCase , eval_dataset=lowerCAmelCase , prediction_loss_only=lowerCAmelCase , ) # Training if training_args.do_train: A__ : Union[str, Any] = ( model_args.model_name_or_path if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ) else None ) trainer.train(model_path=lowerCAmelCase ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation A__ : Union[str, Any] = {} if training_args.do_eval: logger.info("""*** Evaluate ***""" ) A__ : List[str] = trainer.evaluate() A__ : List[Any] = math.exp(eval_output["""eval_loss"""] ) A__ : Dict = {"""perplexity""": perplexity} A__ : Optional[Any] = os.path.join(training_args.output_dir , """eval_results_lm.txt""" ) if trainer.is_world_master(): with open(lowerCAmelCase , """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key in sorted(result.keys() ): logger.info(""" %s = %s""" , lowerCAmelCase , str(result[key] ) ) writer.write("""%s = %s\n""" % (key, str(result[key] )) ) results.update(lowerCAmelCase ) return results def _A( lowerCAmelCase ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
363
import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def _a ( lowerCamelCase ): # vision encoder if "img_encoder.pos_embed" in name: lowerCamelCase : Tuple = name.replace("""img_encoder.pos_embed""", """vision_model.embeddings.position_embeddings""" ) if "img_encoder.patch_embed.proj" in name: lowerCamelCase : Union[str, Any] = name.replace("""img_encoder.patch_embed.proj""", """vision_model.embeddings.patch_embeddings.projection""" ) if "img_encoder.patch_embed.norm" in name: lowerCamelCase : Optional[int] = name.replace("""img_encoder.patch_embed.norm""", """vision_model.embeddings.layernorm""" ) if "img_encoder.layers" in name: lowerCamelCase : List[str] = name.replace("""img_encoder.layers""", """vision_model.encoder.stages""" ) if "blocks" in name and "res" not in name: lowerCamelCase : List[Any] = name.replace("""blocks""", """layers""" ) if "attn" in name and "pre_assign" not in name: lowerCamelCase : Optional[int] = name.replace("""attn""", """self_attn""" ) if "proj" in name and "self_attn" in name and "text" not in name: lowerCamelCase : Optional[int] = name.replace("""proj""", """out_proj""" ) if "pre_assign_attn.attn.proj" in name: lowerCamelCase : Any = name.replace("""pre_assign_attn.attn.proj""", """pre_assign_attn.attn.out_proj""" ) if "norm1" in name: lowerCamelCase : Optional[Any] = name.replace("""norm1""", """layer_norm1""" ) if "norm2" in name and "pre_assign" not in name: lowerCamelCase : Union[str, Any] = name.replace("""norm2""", """layer_norm2""" ) if "img_encoder.norm" in name: lowerCamelCase : Optional[int] = name.replace("""img_encoder.norm""", """vision_model.layernorm""" ) # text encoder if "text_encoder.token_embedding" in name: lowerCamelCase : int = name.replace("""text_encoder.token_embedding""", """text_model.embeddings.token_embedding""" ) if "text_encoder.positional_embedding" in name: lowerCamelCase : Optional[Any] = name.replace("""text_encoder.positional_embedding""", """text_model.embeddings.position_embedding.weight""" ) if "text_encoder.transformer.resblocks." in name: lowerCamelCase : Optional[Any] = name.replace("""text_encoder.transformer.resblocks.""", """text_model.encoder.layers.""" ) if "ln_1" in name: lowerCamelCase : Optional[Any] = name.replace("""ln_1""", """layer_norm1""" ) if "ln_2" in name: lowerCamelCase : str = name.replace("""ln_2""", """layer_norm2""" ) if "c_fc" in name: lowerCamelCase : Any = name.replace("""c_fc""", """fc1""" ) if "c_proj" in name: lowerCamelCase : Tuple = name.replace("""c_proj""", """fc2""" ) if "text_encoder" in name: lowerCamelCase : List[str] = name.replace("""text_encoder""", """text_model""" ) if "ln_final" in name: lowerCamelCase : Tuple = name.replace("""ln_final""", """final_layer_norm""" ) # projection layers if "img_projector.linear_hidden." in name: lowerCamelCase : Optional[int] = name.replace("""img_projector.linear_hidden.""", """visual_projection.""" ) if "img_projector.linear_out." in name: lowerCamelCase : Tuple = name.replace("""img_projector.linear_out.""", """visual_projection.3.""" ) if "text_projector.linear_hidden" in name: lowerCamelCase : Tuple = name.replace("""text_projector.linear_hidden""", """text_projection""" ) if "text_projector.linear_out" in name: lowerCamelCase : Tuple = name.replace("""text_projector.linear_out""", """text_projection.3""" ) return name def _a ( lowerCamelCase, lowerCamelCase ): for key in orig_state_dict.copy().keys(): lowerCamelCase : Tuple = orig_state_dict.pop(lowerCamelCase ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors lowerCamelCase : Any = key.split(""".""" ) lowerCamelCase , lowerCamelCase : Optional[Any] = int(key_split[2] ), int(key_split[4] ) lowerCamelCase : List[Any] = config.vision_config.hidden_size if "weight" in key: lowerCamelCase : int = val[:dim, :] lowerCamelCase : List[str] = val[dim : dim * 2, :] lowerCamelCase : Dict = val[-dim:, :] else: lowerCamelCase : List[Any] = val[:dim] lowerCamelCase : List[Any] = val[dim : dim * 2] lowerCamelCase : Tuple = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors lowerCamelCase : str = key.split(""".""" ) lowerCamelCase : Optional[int] = int(key_split[3] ) lowerCamelCase : List[str] = config.text_config.hidden_size if "weight" in key: lowerCamelCase : Optional[int] = val[:dim, :] lowerCamelCase : Any = val[ dim : dim * 2, : ] lowerCamelCase : Optional[Any] = val[-dim:, :] else: lowerCamelCase : Union[str, Any] = val[:dim] lowerCamelCase : Optional[int] = val[dim : dim * 2] lowerCamelCase : Union[str, Any] = val[-dim:] else: lowerCamelCase : List[Any] = rename_key(lowerCamelCase ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): lowerCamelCase : Any = val.squeeze_() else: lowerCamelCase : Union[str, Any] = val return orig_state_dict def _a ( ): lowerCamelCase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase : List[str] = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw ) return im @torch.no_grad() def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase="groupvit-gcc-yfcc", lowerCamelCase=False ): lowerCamelCase : int = GroupViTConfig() lowerCamelCase : Dict = GroupViTModel(lowerCamelCase ).eval() lowerCamelCase : Optional[int] = torch.load(lowerCamelCase, map_location="""cpu""" )["""model"""] lowerCamelCase : Tuple = convert_state_dict(lowerCamelCase, lowerCamelCase ) lowerCamelCase , lowerCamelCase : Tuple = model.load_state_dict(lowerCamelCase, strict=lowerCamelCase ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCamelCase ) == 0) # verify result lowerCamelCase : int = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" ) lowerCamelCase : int = prepare_img() lowerCamelCase : int = processor(text=["""a photo of a cat""", """a photo of a dog"""], images=lowerCamelCase, padding=lowerCamelCase, return_tensors="""pt""" ) with torch.no_grad(): lowerCamelCase : int = model(**lowerCamelCase ) if model_name == "groupvit-gcc-yfcc": lowerCamelCase : Any = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] ) elif model_name == "groupvit-gcc-redcaps": lowerCamelCase : Any = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] ) else: raise ValueError(F'''Model name {model_name} not supported.''' ) assert torch.allclose(outputs.logits_per_image, lowerCamelCase, atol=1e-3 ) processor.save_pretrained(lowerCamelCase ) model.save_pretrained(lowerCamelCase ) print("""Successfully saved processor and model to""", lowerCamelCase ) if push_to_hub: print("""Pushing to the hub...""" ) processor.push_to_hub(lowerCamelCase, organization="""nielsr""" ) model.push_to_hub(lowerCamelCase, organization="""nielsr""" ) if __name__ == "__main__": _lowerCamelCase =argparse.ArgumentParser() parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model.""" ) parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""") parser.add_argument( """--model_name""", default="""groupvit-gccy-fcc""", type=str, help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""", ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""", ) _lowerCamelCase =parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
681
0
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class snake_case__ ( unittest.TestCase ): def A ( self , UpperCamelCase_ ) -> List[str]: """simple docstring""" for model_result in results.values(): for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ): a_ : List[str] = model_result["""result"""][batch_size][sequence_length] self.assertIsNotNone(UpperCamelCase_ ) def A ( self ) -> str: """simple docstring""" a_ : List[str] = """sshleifer/tiny-gpt2""" a_ : str = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=UpperCamelCase_ , inference=UpperCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=UpperCamelCase_ , multi_process=UpperCamelCase_ , ) a_ : Dict = TensorFlowBenchmark(UpperCamelCase_ ) a_ : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def A ( self ) -> List[str]: """simple docstring""" a_ : Any = """sgugger/tiny-distilbert-classification""" a_ : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=UpperCamelCase_ , inference=UpperCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase_ , only_pretrain_model=UpperCamelCase_ , ) a_ : List[Any] = TensorFlowBenchmark(UpperCamelCase_ ) a_ : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def A ( self ) -> List[str]: """simple docstring""" a_ : Optional[int] = """sshleifer/tiny-gpt2""" a_ : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=UpperCamelCase_ , inference=UpperCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase_ , ) a_ : Any = TensorFlowBenchmark(UpperCamelCase_ ) a_ : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def A ( self ) -> List[str]: """simple docstring""" a_ : List[Any] = """sshleifer/tiny-gpt2""" a_ : Tuple = AutoConfig.from_pretrained(UpperCamelCase_ ) a_ : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=UpperCamelCase_ , inference=UpperCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=UpperCamelCase_ , multi_process=UpperCamelCase_ , ) a_ : Optional[Any] = TensorFlowBenchmark(UpperCamelCase_ , [config] ) a_ : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def A ( self ) -> Tuple: """simple docstring""" a_ : Tuple = """sshleifer/tiny-gpt2""" a_ : Union[str, Any] = AutoConfig.from_pretrained(UpperCamelCase_ ) a_ : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=UpperCamelCase_ , inference=UpperCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase_ , ) a_ : Union[str, Any] = TensorFlowBenchmark(UpperCamelCase_ , [config] ) a_ : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def A ( self ) -> Dict: """simple docstring""" a_ : Optional[int] = """sshleifer/tiny-gpt2""" a_ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=UpperCamelCase_ , inference=UpperCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase_ , ) a_ : int = TensorFlowBenchmark(UpperCamelCase_ ) a_ : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def A ( self ) -> int: """simple docstring""" a_ : int = """sshleifer/tiny-gpt2""" a_ : Tuple = AutoConfig.from_pretrained(UpperCamelCase_ ) a_ : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=UpperCamelCase_ , inference=UpperCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase_ , ) a_ : Any = TensorFlowBenchmark(UpperCamelCase_ , [config] ) a_ : str = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def A ( self ) -> Any: """simple docstring""" a_ : str = """patrickvonplaten/t5-tiny-random""" a_ : Tuple = AutoConfig.from_pretrained(UpperCamelCase_ ) a_ : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=UpperCamelCase_ , inference=UpperCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase_ , ) a_ : List[Any] = TensorFlowBenchmark(UpperCamelCase_ , configs=[config] ) a_ : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" ) def A ( self ) -> Optional[Any]: """simple docstring""" a_ : Optional[Any] = """sshleifer/tiny-gpt2""" a_ : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=UpperCamelCase_ , inference=UpperCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=UpperCamelCase_ , multi_process=UpperCamelCase_ , ) a_ : int = TensorFlowBenchmark(UpperCamelCase_ ) a_ : str = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def A ( self ) -> Optional[int]: """simple docstring""" a_ : Optional[int] = """sshleifer/tiny-gpt2""" with tempfile.TemporaryDirectory() as tmp_dir: a_ : List[str] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=UpperCamelCase_ , save_to_csv=UpperCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(UpperCamelCase_ , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(UpperCamelCase_ , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(UpperCamelCase_ , """env.csv""" ) , multi_process=UpperCamelCase_ , ) a_ : List[str] = TensorFlowBenchmark(UpperCamelCase_ ) benchmark.run() self.assertTrue(Path(os.path.join(UpperCamelCase_ , """inf_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(UpperCamelCase_ , """inf_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(UpperCamelCase_ , """env.csv""" ) ).exists() ) def A ( self ) -> Optional[Any]: """simple docstring""" a_ : str = """sshleifer/tiny-gpt2""" def _check_summary_is_not_empty(UpperCamelCase_ ): self.assertTrue(hasattr(UpperCamelCase_ , """sequential""" ) ) self.assertTrue(hasattr(UpperCamelCase_ , """cumulative""" ) ) self.assertTrue(hasattr(UpperCamelCase_ , """current""" ) ) self.assertTrue(hasattr(UpperCamelCase_ , """total""" ) ) with tempfile.TemporaryDirectory() as tmp_dir: a_ : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=UpperCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(UpperCamelCase_ , """log.txt""" ) , log_print=UpperCamelCase_ , trace_memory_line_by_line=UpperCamelCase_ , eager_mode=UpperCamelCase_ , multi_process=UpperCamelCase_ , ) a_ : Tuple = TensorFlowBenchmark(UpperCamelCase_ ) a_ : Union[str, Any] = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(UpperCamelCase_ , """log.txt""" ) ).exists() )
419
from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class A__ : # setable values _UpperCAmelCase : Optional[int] = None _UpperCAmelCase : Optional[jnp.ndarray] = None _UpperCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i) @classmethod def UpperCamelCase__ ( cls ): return cls() @dataclass class A__ ( __SCREAMING_SNAKE_CASE): _UpperCAmelCase : jnp.ndarray _UpperCAmelCase : jnp.ndarray _UpperCAmelCase : KarrasVeSchedulerState class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): @property def UpperCamelCase__ ( self ): return True @register_to_config def __init__( self , __magic_name__ = 0.02 , __magic_name__ = 1_0_0 , __magic_name__ = 1.007 , __magic_name__ = 8_0 , __magic_name__ = 0.05 , __magic_name__ = 5_0 , ): pass def UpperCamelCase__ ( self ): return KarrasVeSchedulerState.create() def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = () ): lowerCamelCase : Dict = jnp.arange(0 , __magic_name__ )[::-1].copy() lowerCamelCase : int = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=__magic_name__ , schedule=jnp.array(__magic_name__ , dtype=jnp.floataa ) , timesteps=__magic_name__ , ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ): if self.config.s_min <= sigma <= self.config.s_max: lowerCamelCase : Dict = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 ) else: lowerCamelCase : Dict = 0 # sample eps ~ N(0, S_noise^2 * I) lowerCamelCase : List[Any] = random.split(__magic_name__ , num=1 ) lowerCamelCase : Union[str, Any] = self.config.s_noise * random.normal(key=__magic_name__ , shape=sample.shape ) lowerCamelCase : List[Any] = sigma + gamma * sigma lowerCamelCase : str = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = True , ): lowerCamelCase : Optional[Any] = sample_hat + sigma_hat * model_output lowerCamelCase : Dict = (sample_hat - pred_original_sample) / sigma_hat lowerCamelCase : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=__magic_name__ , derivative=__magic_name__ , state=__magic_name__ ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = True , ): lowerCamelCase : str = sample_prev + sigma_prev * model_output lowerCamelCase : str = (sample_prev - pred_original_sample) / sigma_prev lowerCamelCase : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=__magic_name__ , derivative=__magic_name__ , state=__magic_name__ ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): raise NotImplementedError()
681
0
'''simple docstring''' from __future__ import annotations def __lowercase (_SCREAMING_SNAKE_CASE :Optional[int] , _SCREAMING_SNAKE_CASE :int ): SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : Optional[Any] = len(_SCREAMING_SNAKE_CASE ) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: SCREAMING_SNAKE_CASE : Optional[int] = i + 1 else: SCREAMING_SNAKE_CASE : Dict = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(f'''{two_pointer([2, 7, 11, 15], 9) = }''')
507
from itertools import product from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros def _a ( lowerCamelCase, lowerCamelCase ): lowerCamelCase : List[str] = k_size // 2 lowerCamelCase , lowerCamelCase : Optional[int] = mgrid[0 - center : k_size - center, 0 - center : k_size - center] lowerCamelCase : Optional[Any] = 1 / (2 * pi * sigma) * exp(-(square(lowerCamelCase ) + square(lowerCamelCase )) / (2 * square(lowerCamelCase )) ) return g def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ): lowerCamelCase , lowerCamelCase : Union[str, Any] = image.shape[0], image.shape[1] # dst image height and width lowerCamelCase : Dict = height - k_size + 1 lowerCamelCase : str = width - k_size + 1 # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows lowerCamelCase : Tuple = zeros((dst_height * dst_width, k_size * k_size) ) lowerCamelCase : List[Any] = 0 for i, j in product(range(lowerCamelCase ), range(lowerCamelCase ) ): lowerCamelCase : Dict = ravel(image[i : i + k_size, j : j + k_size] ) lowerCamelCase : Union[str, Any] = window row += 1 # turn the kernel into shape(k*k, 1) lowerCamelCase : Dict = gen_gaussian_kernel(lowerCamelCase, lowerCamelCase ) lowerCamelCase : str = ravel(lowerCamelCase ) # reshape and get the dst image lowerCamelCase : List[str] = dot(lowerCamelCase, lowerCamelCase ).reshape(lowerCamelCase, lowerCamelCase ).astype(lowerCamelCase ) return dst if __name__ == "__main__": # read original image _lowerCamelCase =imread(R"""../image_data/lena.jpg""") # turn image in gray scale value _lowerCamelCase =cvtColor(img, COLOR_BGR2GRAY) # get values with two different mask size _lowerCamelCase =gaussian_filter(gray, 3, sigma=1) _lowerCamelCase =gaussian_filter(gray, 5, sigma=0.8) # show result images imshow("""gaussian filter with 3x3 mask""", gaussianaxa) imshow("""gaussian filter with 5x5 mask""", gaussianaxa) waitKey()
681
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase_ : Dict = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Any = [ '''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''', '''SwinForImageClassification''', '''SwinForMaskedImageModeling''', '''SwinModel''', '''SwinPreTrainedModel''', '''SwinBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Tuple = [ '''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFSwinForImageClassification''', '''TFSwinForMaskedImageModeling''', '''TFSwinModel''', '''TFSwinPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys UpperCAmelCase_ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
255
import pytest _lowerCamelCase ="""__dummy_dataset1__""" _lowerCamelCase =""" import json import os import datasets REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\" URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"} class __DummyDataset1__(datasets.GeneratorBasedBuilder): def _info(self): features = datasets.Features( { \"tokens\": datasets.Sequence(datasets.Value(\"string\")), \"ner_tags\": datasets.Sequence( datasets.features.ClassLabel( names=[ \"O\", \"B-PER\", \"I-PER\", \"B-ORG\", \"I-ORG\", \"B-LOC\", \"I-LOC\", ] ) ), \"langs\": datasets.Sequence(datasets.Value(\"string\")), \"spans\": datasets.Sequence(datasets.Value(\"string\")), } ) return datasets.DatasetInfo(features=features) def _split_generators(self, dl_manager): dl_path = dl_manager.download(URLS) return [ datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}), datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}), ] def _generate_examples(self, filepath): with open(filepath, \"r\", encoding=\"utf-8\") as f: for i, line in enumerate(f): yield i, json.loads(line) """ @pytest.fixture def _a ( ): return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def _a ( ): return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ): lowerCamelCase : Union[str, Any] = dataset_loading_script_name lowerCamelCase : Dict = tmp_path / """datasets""" / script_name script_dir.mkdir(parents=lowerCamelCase ) lowerCamelCase : str = script_dir / F'''{script_name}.py''' with open(lowerCamelCase, """w""" ) as f: f.write(lowerCamelCase ) return str(lowerCamelCase )
681
0
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: _SCREAMING_SNAKE_CASE : Dict = None _SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE : Union[str, Any] = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''} _SCREAMING_SNAKE_CASE : int = { '''vocab_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''', }, '''tokenizer_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''', }, } _SCREAMING_SNAKE_CASE : Dict = { '''google/rembert''': 256, } _SCREAMING_SNAKE_CASE : int = '''▁''' class a ( __SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE : Union[str, Any] = RemBertTokenizer def __init__( self : int , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Dict="[CLS]" , __SCREAMING_SNAKE_CASE : List[Any]="[SEP]" , __SCREAMING_SNAKE_CASE : Optional[Any]="<unk>" , __SCREAMING_SNAKE_CASE : str="[SEP]" , __SCREAMING_SNAKE_CASE : Any="<pad>" , __SCREAMING_SNAKE_CASE : Union[str, Any]="[CLS]" , __SCREAMING_SNAKE_CASE : List[Any]="[MASK]" , **__SCREAMING_SNAKE_CASE : Any , ) -> Dict: # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase_ = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token super().__init__( __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) lowerCamelCase_ = do_lower_case lowerCamelCase_ = remove_space lowerCamelCase_ = keep_accents lowerCamelCase_ = vocab_file lowerCamelCase_ = False if not self.vocab_file else True def UpperCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] = None ) -> Tuple: lowerCamelCase_ = [self.sep_token_id] lowerCamelCase_ = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def UpperCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] = None , __SCREAMING_SNAKE_CASE : Any = False ) -> Union[str, Any]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] def UpperCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] = None ) -> Dict: lowerCamelCase_ = [self.sep_token_id] lowerCamelCase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict = None ) -> Optional[Any]: if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error('Vocabulary path ({}) should be a directory'.format(__SCREAMING_SNAKE_CASE ) ) return lowerCamelCase_ = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
549
import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""): _lowerCamelCase ={ """linear""": PIL.Image.Resampling.BILINEAR, """bilinear""": PIL.Image.Resampling.BILINEAR, """bicubic""": PIL.Image.Resampling.BICUBIC, """lanczos""": PIL.Image.Resampling.LANCZOS, """nearest""": PIL.Image.Resampling.NEAREST, } else: _lowerCamelCase ={ """linear""": PIL.Image.LINEAR, """bilinear""": PIL.Image.BILINEAR, """bicubic""": PIL.Image.BICUBIC, """lanczos""": PIL.Image.LANCZOS, """nearest""": PIL.Image.NEAREST, } def _a ( lowerCamelCase ): lowerCamelCase : Optional[Any] = (images / 2 + 0.5).clamp(0, 1 ) lowerCamelCase : Optional[Any] = images.cpu().permute(0, 2, 3, 1 ).float().numpy() lowerCamelCase : Any = numpy_to_pil(lowerCamelCase ) return images def _a ( lowerCamelCase ): if images.ndim == 3: lowerCamelCase : Optional[Any] = images[None, ...] lowerCamelCase : List[Any] = (images * 255).round().astype("""uint8""" ) if images.shape[-1] == 1: # special case for grayscale (single channel) images lowerCamelCase : Optional[int] = [Image.fromarray(image.squeeze(), mode="""L""" ) for image in images] else: lowerCamelCase : int = [Image.fromarray(lowerCamelCase ) for image in images] return pil_images
681
0
"""simple docstring""" import unittest from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __magic_name__ : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece class __snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase ): __a = XLMProphetNetTokenizer __a = False __a = True def __a ( self: Any ): super().setUp() # We have a SentencePiece fixture for testing __lowerCamelCase = XLMProphetNetTokenizer(A_ , keep_accents=A_ ) tokenizer.save_pretrained(self.tmpdirname ) def __a ( self: Any ): __lowerCamelCase = """[PAD]""" __lowerCamelCase = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ ) def __a ( self: List[Any] ): __lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """[PAD]""" ) self.assertEqual(vocab_keys[1] , """[CLS]""" ) self.assertEqual(vocab_keys[-1] , """j""" ) self.assertEqual(len(A_ ) , 10_12 ) def __a ( self: Optional[int] ): self.assertEqual(self.get_tokenizer().vocab_size , 10_12 ) def __a ( self: List[Any] ): __lowerCamelCase = XLMProphetNetTokenizer(A_ , keep_accents=A_ ) __lowerCamelCase = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(A_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(A_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) __lowerCamelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( A_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) __lowerCamelCase = tokenizer.convert_tokens_to_ids(A_ ) self.assertListEqual( A_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, -9, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, -9, 4] ] , ) __lowerCamelCase = tokenizer.convert_ids_to_tokens(A_ ) self.assertListEqual( A_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """[UNK]""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """[UNK]""", """.""", ] , ) @cached_property def __a ( self: Union[str, Any] ): return XLMProphetNetTokenizer.from_pretrained("""microsoft/xprophetnet-large-wiki100-cased""" ) @slow def __a ( self: Union[str, Any] ): __lowerCamelCase = """Hello World!""" __lowerCamelCase = [3_53_89, 66_72, 49, 2] self.assertListEqual(A_ , self.big_tokenizer.encode(A_ ) ) @slow def __a ( self: Union[str, Any] ): # fmt: off __lowerCamelCase = {"""input_ids""": [[1_10_73, 8_27_83, 18, 26, 8_27_83, 5_49, 5_15_40, 2_48, 1_72_09, 13_01, 2_17, 20, 21_51_86, 13_25, 1_47, 1_72_09, 13_01, 2_17, 20, 5_63_70, 53, 12_20_20, 20, 1_64_77, 27, 8_73_55, 45_48, 20, 47_28, 7_83_92, 17, 15_99_69, 18, 26, 2_44_91, 6_29, 15, 5_38, 2_27_04, 54_39, 15, 27_88, 2_44_91, 98_85, 15, 4_35_34, 6_05, 15, 8_14, 1_84_03, 3_32_00, 29, 15, 4_35_34, 2_44_58, 1_24_10, 1_11, 2_49_66, 8_36_69, 96_37, 14_40_68, 26, 8_50, 2_23_46, 27, 1_47, 2_49_66, 8_36_69, 8_34_90, 26, 3_91_13, 7_35, 27, 6_89, 6_56, 28_00, 13_39, 46_00, 53, 12_20_20, 11_57_85, 34, 8_16, 13_39, 4_68_87, 18, 1_47, 5_39_05, 19_51, 4_22_38, 4_11_70, 1_77_32, 8_34, 4_36, 15, 2_75_23, 9_87_33, 2_17, 1_47, 55_42, 49_81, 9_30, 1_73_47, 16, 2], [2_00_91, 6_29, 94, 8_27_86, 58, 4_90, 20, 15_28, 84, 5_39_05, 3_44, 8_05_92, 11_01_28, 1_88_22, 52_67, 13_06, 62, 15_25_37, 3_08, 79_97, 4_01, 12_44_27, 5_49, 3_54_42, 2_25, 1_09, 1_50_55, 2_57_48, 1_47, 71_19, 4_37_12, 34, 7_67, 13_53_66, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_92, 6_37_84, 11_94_66, 17, 14_78_08, 8_82_14, 18, 6_56, 81, 32, 32_96, 1_02_80, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A_ , model_name="""microsoft/xprophetnet-large-wiki100-cased""" , revision="""1acad1643ddd54a44df6a1b797ada8373685d90e""" , )
281
from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class A__ ( nn.Module): def __init__( self , __magic_name__ = 1_6 , __magic_name__ = 8_8 , __magic_name__ = None , __magic_name__ = 1 , __magic_name__ = 0.0 , __magic_name__ = 3_2 , __magic_name__ = None , __magic_name__ = False , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "geglu" , __magic_name__ = None , ): super().__init__() lowerCamelCase : Any = nn.ModuleList( [ TransformeraDModel( num_attention_heads=__magic_name__ , attention_head_dim=__magic_name__ , in_channels=__magic_name__ , num_layers=__magic_name__ , dropout=__magic_name__ , norm_num_groups=__magic_name__ , cross_attention_dim=__magic_name__ , attention_bias=__magic_name__ , sample_size=__magic_name__ , num_vector_embeds=__magic_name__ , activation_fn=__magic_name__ , num_embeds_ada_norm=__magic_name__ , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference lowerCamelCase : Any = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` lowerCamelCase : List[Any] = [7_7, 2_5_7] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` lowerCamelCase : Optional[int] = [1, 0] def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__ = True , ): lowerCamelCase : List[Any] = hidden_states lowerCamelCase : Dict = [] lowerCamelCase : List[Any] = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens lowerCamelCase : Dict = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] lowerCamelCase : Optional[int] = self.transformer_index_for_condition[i] lowerCamelCase : List[Any] = self.transformers[transformer_index]( __magic_name__ , encoder_hidden_states=__magic_name__ , timestep=__magic_name__ , cross_attention_kwargs=__magic_name__ , return_dict=__magic_name__ , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] lowerCamelCase : Any = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) lowerCamelCase : Dict = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=__magic_name__ )
681
0
"""simple docstring""" import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels lowercase__ : int = object() # For specifying empty leaf dict `{}` lowercase__ : Optional[int] = object() def __lowercase ( _a , _a ): snake_case_ : Optional[Any] = tuple((re.compile(x + '''$''' ) for x in qs) ) for i in range(len(_a ) - len(_a ) + 1 ): snake_case_ : Dict = [x.match(_a ) for x, y in zip(_a , ks[i:] )] if matches and all(_a ): return True return False def __lowercase ( _a ): def replace(_a , _a ): for rule, replacement in rules: if _match(_a , _a ): return replacement return val return replace def __lowercase ( ): return [ # embeddings (("transformer", "wpe", "embedding"), P('''mp''' , _a )), (("transformer", "wte", "embedding"), P('''mp''' , _a )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(_a , '''mp''' )), (("attention", "out_proj", "kernel"), P('''mp''' , _a )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(_a , '''mp''' )), (("mlp", "c_fc", "bias"), P('''mp''' )), (("mlp", "c_proj", "kernel"), P('''mp''' , _a )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def __lowercase ( _a ): snake_case_ : Optional[int] = _get_partition_rules() snake_case_ : Tuple = _replacement_rules(_a ) snake_case_ : List[str] = {k: _unmatched for k in flatten_dict(_a )} snake_case_ : List[str] = {k: replace(_a , _a ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(_a ) )
123
import unittest from transformers import BertGenerationTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _lowerCamelCase ="""▁""" _lowerCamelCase =get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase): _UpperCAmelCase : str = BertGenerationTokenizer _UpperCAmelCase : Tuple = False _UpperCAmelCase : List[Any] = True def UpperCamelCase__ ( self ): super().setUp() lowerCamelCase : int = BertGenerationTokenizer(__magic_name__ , keep_accents=__magic_name__ ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = """<s>""" lowerCamelCase : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<unk>""" ) self.assertEqual(vocab_keys[1] , """<s>""" ) self.assertEqual(vocab_keys[-1] , """<pad>""" ) self.assertEqual(len(__magic_name__ ) , 1_0_0_2 ) def UpperCamelCase__ ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = BertGenerationTokenizer(__magic_name__ , keep_accents=__magic_name__ ) lowerCamelCase : Optional[Any] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(__magic_name__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__magic_name__ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , ) lowerCamelCase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __magic_name__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) lowerCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__magic_name__ ) self.assertListEqual( __magic_name__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , ) lowerCamelCase : int = tokenizer.convert_ids_to_tokens(__magic_name__ ) self.assertListEqual( __magic_name__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) @cached_property def UpperCamelCase__ ( self ): return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" ) @slow def UpperCamelCase__ ( self ): lowerCamelCase : List[Any] = """Hello World!""" lowerCamelCase : Any = [1_8_5_3_6, 2_2_6_0, 1_0_1] self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) ) @slow def UpperCamelCase__ ( self ): lowerCamelCase : str = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth""" ) lowerCamelCase : str = [ 8_7_1, 4_1_9, 3_5_8, 9_4_6, 9_9_1, 2_5_2_1, 4_5_2, 3_5_8, 1_3_5_7, 3_8_7, 7_7_5_1, 3_5_3_6, 1_1_2, 9_8_5, 4_5_6, 1_2_6, 8_6_5, 9_3_8, 5_4_0_0, 5_7_3_4, 4_5_8, 1_3_6_8, 4_6_7, 7_8_6, 2_4_6_2, 5_2_4_6, 1_1_5_9, 6_3_3, 8_6_5, 4_5_1_9, 4_5_7, 5_8_2, 8_5_2, 2_5_5_7, 4_2_7, 9_1_6, 5_0_8, 4_0_5, 3_4_3_2_4, 4_9_7, 3_9_1, 4_0_8, 1_1_3_4_2, 1_2_4_4, 3_8_5, 1_0_0, 9_3_8, 9_8_5, 4_5_6, 5_7_4, 3_6_2, 1_2_5_9_7, 3_2_0_0, 3_1_2_9, 1_1_7_2, ] self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) ) @require_torch @slow def UpperCamelCase__ ( self ): import torch from transformers import BertGenerationConfig, BertGenerationEncoder # Build sequence lowerCamelCase : Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:1_0] lowerCamelCase : Dict = """ """.join(__magic_name__ ) lowerCamelCase : Any = self.big_tokenizer.encode_plus(__magic_name__ , return_tensors="""pt""" , return_token_type_ids=__magic_name__ ) lowerCamelCase : List[str] = self.big_tokenizer.batch_encode_plus( [sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=__magic_name__ ) lowerCamelCase : Tuple = BertGenerationConfig() lowerCamelCase : Optional[int] = BertGenerationEncoder(__magic_name__ ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**__magic_name__ ) model(**__magic_name__ ) @slow def UpperCamelCase__ ( self ): # fmt: off lowerCamelCase : Any = {"""input_ids""": [[3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4], [4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__magic_name__ , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
681
0
'''simple docstring''' import heapq def _A ( A ) -> Dict: lowercase : list[list] = [] # for each node and his adjacency list add them and the rank of the node to queue # using heapq module the queue will be filled like a Priority Queue # heapq works with a min priority queue, so I used -1*len(v) to build it for key, value in graph.items(): # O(log(n)) heapq.heappush(A ,[-1 * len(A ), (key, value)] ) # chosen_vertices = set of chosen vertices lowercase : Tuple = set() # while queue isn't empty and there are still edges # (queue[0][0] is the rank of the node with max rank) while queue and queue[0][0] != 0: # extract vertex with max rank from queue and add it to chosen_vertices lowercase : List[Any] = heapq.heappop(A )[1][0] chosen_vertices.add(A ) # Remove all arcs adjacent to argmax for elem in queue: # if v haven't adjacent node, skip if elem[0] == 0: continue # if argmax is reachable from elem # remove argmax from elem's adjacent list and update his rank if argmax in elem[1][1]: lowercase : List[str] = elem[1][1].index(A ) del elem[1][1][index] elem[0] += 1 # re-order the queue heapq.heapify(A ) return chosen_vertices if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase : Tuple = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} print(F'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
372
from arguments import InitializationArguments from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser # Configuration _lowerCamelCase =HfArgumentParser(InitializationArguments) _lowerCamelCase =parser.parse_args() # Load codeparrot tokenizer trained for Python code tokenization _lowerCamelCase =AutoTokenizer.from_pretrained(args.tokenizer_name) # Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks _lowerCamelCase ={ """vocab_size""": len(tokenizer), """scale_attn_by_inverse_layer_idx""": True, """reorder_and_upcast_attn""": True, } # Load model config (GPT-2 large in this case) _lowerCamelCase =AutoConfig.from_pretrained(args.config_name, **config_kwargs) # Initialize new model with config _lowerCamelCase =AutoModelForCausalLM.from_config(config) # Save model to the hub model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
681
0
import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _UpperCamelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =GPTaTokenizer lowerCamelCase__ =GPTaTokenizerFast lowerCamelCase__ =True lowerCamelCase__ ={"""add_prefix_space""": True} lowerCamelCase__ =False def __UpperCamelCase ( self : Dict ) -> Optional[int]: """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt SCREAMING_SNAKE_CASE : List[Any] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", """<|endoftext|>""", ] SCREAMING_SNAKE_CASE : Union[str, Any] = dict(zip(a , range(len(a ) ) ) ) SCREAMING_SNAKE_CASE : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] SCREAMING_SNAKE_CASE : Optional[int] = {"""unk_token""": """<unk>"""} SCREAMING_SNAKE_CASE : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) SCREAMING_SNAKE_CASE : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(a ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(a ) ) def __UpperCamelCase ( self : List[Any] , **a : int ) -> Dict: """simple docstring""" kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **a ) def __UpperCamelCase ( self : Dict , **a : Optional[int] ) -> int: """simple docstring""" kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **a ) def __UpperCamelCase ( self : Tuple , a : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : int = """lower newer""" SCREAMING_SNAKE_CASE : str = """lower newer""" return input_text, output_text def __UpperCamelCase ( self : int ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : int = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) SCREAMING_SNAKE_CASE : Dict = """lower newer""" SCREAMING_SNAKE_CASE : Tuple = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.tokenize(a , add_prefix_space=a ) self.assertListEqual(a , a ) SCREAMING_SNAKE_CASE : Optional[int] = tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE : Union[str, Any] = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a ) def __UpperCamelCase ( self : int ) -> Optional[Any]: """simple docstring""" if not self.test_rust_tokenizer: return SCREAMING_SNAKE_CASE : Any = self.get_tokenizer() SCREAMING_SNAKE_CASE : str = self.get_rust_tokenizer(add_prefix_space=a ) SCREAMING_SNAKE_CASE : List[Any] = """lower newer""" # Testing tokenization SCREAMING_SNAKE_CASE : int = tokenizer.tokenize(a , add_prefix_space=a ) SCREAMING_SNAKE_CASE : List[str] = rust_tokenizer.tokenize(a ) self.assertListEqual(a , a ) # Testing conversion to ids without special tokens SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode(a , add_special_tokens=a , add_prefix_space=a ) SCREAMING_SNAKE_CASE : Tuple = rust_tokenizer.encode(a , add_special_tokens=a ) self.assertListEqual(a , a ) # Testing conversion to ids with special tokens SCREAMING_SNAKE_CASE : Any = self.get_rust_tokenizer(add_prefix_space=a ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode(a , add_prefix_space=a ) SCREAMING_SNAKE_CASE : Optional[Any] = rust_tokenizer.encode(a ) self.assertListEqual(a , a ) # Testing the unknown token SCREAMING_SNAKE_CASE : Union[str, Any] = tokens + [rust_tokenizer.unk_token] SCREAMING_SNAKE_CASE : Optional[int] = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a ) , a ) def __UpperCamelCase ( self : Any , *a : List[Any] , **a : Optional[Any] ) -> List[str]: """simple docstring""" pass def __UpperCamelCase ( self : str , a : Tuple=15 ) -> Optional[Any]: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): SCREAMING_SNAKE_CASE : Tuple = self.rust_tokenizer_class.from_pretrained(a , **a ) # Simple input SCREAMING_SNAKE_CASE : Tuple = """This is a simple input""" SCREAMING_SNAKE_CASE : Optional[Any] = ["""This is a simple input 1""", """This is a simple input 2"""] SCREAMING_SNAKE_CASE : Union[str, Any] = ("""This is a simple input""", """This is a pair""") SCREAMING_SNAKE_CASE : Union[str, Any] = [ ("""This is a simple input 1""", """This is a simple input 2"""), ("""This is a simple pair 1""", """This is a simple pair 2"""), ] # Simple input tests self.assertRaises(a , tokenizer_r.encode , a , max_length=a , padding="max_length" ) # Simple input self.assertRaises(a , tokenizer_r.encode_plus , a , max_length=a , padding="max_length" ) # Simple input self.assertRaises( a , tokenizer_r.batch_encode_plus , a , max_length=a , padding="max_length" , ) # Pair input self.assertRaises(a , tokenizer_r.encode , a , max_length=a , padding="max_length" ) # Pair input self.assertRaises(a , tokenizer_r.encode_plus , a , max_length=a , padding="max_length" ) # Pair input self.assertRaises( a , tokenizer_r.batch_encode_plus , a , max_length=a , padding="max_length" , ) def __UpperCamelCase ( self : Optional[int] ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" ) # Simple input SCREAMING_SNAKE_CASE : Optional[int] = """This is a simple input""" SCREAMING_SNAKE_CASE : str = ["""This is a simple input looooooooong""", """This is a simple input"""] SCREAMING_SNAKE_CASE : Optional[Any] = ("""This is a simple input""", """This is a pair""") SCREAMING_SNAKE_CASE : Dict = [ ("""This is a simple input loooooong""", """This is a simple input"""), ("""This is a simple pair loooooong""", """This is a simple pair"""), ] SCREAMING_SNAKE_CASE : Dict = tokenizer.pad_token_id SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(a , padding="max_length" , max_length=30 , return_tensors="np" ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(a , padding=a , truncate=a , return_tensors="np" ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer(*a , padding="max_length" , max_length=60 , return_tensors="np" ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer(a , padding=a , truncate=a , return_tensors="np" ) # s # test single string max_length padding self.assertEqual(out_s["input_ids"].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s["input_ids"] ) self.assertTrue(0 in out_s["attention_mask"] ) # s2 # test automatic padding self.assertEqual(out_sa["input_ids"].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["input_ids"][0] ) self.assertFalse(0 in out_sa["attention_mask"][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["input_ids"][1] ) self.assertTrue(0 in out_sa["attention_mask"][1] ) # p # test single pair max_length padding self.assertEqual(out_p["input_ids"].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p["input_ids"] ) self.assertTrue(0 in out_p["attention_mask"] ) # p2 # test automatic padding pair self.assertEqual(out_pa["input_ids"].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["input_ids"][0] ) self.assertFalse(0 in out_pa["attention_mask"][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["input_ids"][1] ) self.assertTrue(0 in out_pa["attention_mask"][1] ) def __UpperCamelCase ( self : List[Any] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = """$$$""" SCREAMING_SNAKE_CASE : Tuple = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=a , add_bos_token=a ) SCREAMING_SNAKE_CASE : Tuple = """This is a simple input""" SCREAMING_SNAKE_CASE : Optional[Any] = ["""This is a simple input 1""", """This is a simple input 2"""] SCREAMING_SNAKE_CASE : str = tokenizer.bos_token_id SCREAMING_SNAKE_CASE : Tuple = tokenizer(a ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(a ) self.assertEqual(out_s.input_ids[0] , a ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.decode(out_s.input_ids ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , a ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def __UpperCamelCase ( self : Any ) -> Any: """simple docstring""" pass def __UpperCamelCase ( self : str ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = [self.get_tokenizer(do_lower_case=a , add_bos_token=a )] for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): SCREAMING_SNAKE_CASE : Union[str, Any] = """Encode this.""" SCREAMING_SNAKE_CASE : Union[str, Any] = """This one too please.""" SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode(a , add_special_tokens=a ) encoded_sequence += tokenizer.encode(a , add_special_tokens=a ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode_plus( a , a , add_special_tokens=a , return_special_tokens_mask=a , ) SCREAMING_SNAKE_CASE : Tuple = encoded_sequence_dict["""input_ids"""] SCREAMING_SNAKE_CASE : Any = encoded_sequence_dict["""special_tokens_mask"""] self.assertEqual(len(a ) , len(a ) ) SCREAMING_SNAKE_CASE : Tuple = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(a ) ] SCREAMING_SNAKE_CASE : Tuple = [x for x in filtered_sequence if x is not None] self.assertEqual(a , a ) @require_tokenizers class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __UpperCamelCase ( self : int ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=a ) SCREAMING_SNAKE_CASE : str = """A photo of a cat""" SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode( a , ) self.assertEqual(a , [2, 250, 1345, 9, 10, 4758] ) tokenizer.save_pretrained("test_opt" ) SCREAMING_SNAKE_CASE : Union[str, Any] = AutoTokenizer.from_pretrained("./test_opt" ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode( a , ) self.assertEqual(a , [2, 250, 1345, 9, 10, 4758] ) def __UpperCamelCase ( self : Tuple ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=a ) SCREAMING_SNAKE_CASE : List[str] = """A photo of a cat""" SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode( a , ) # Same as above self.assertEqual(a , [2, 250, 1345, 9, 10, 4758] ) @unittest.skip("This test is failing because of a bug in the fast tokenizer" ) def __UpperCamelCase ( self : Optional[Any] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=a ) SCREAMING_SNAKE_CASE : Union[str, Any] = """bos""" SCREAMING_SNAKE_CASE : List[str] = tokenizer.get_vocab()["""bos"""] SCREAMING_SNAKE_CASE : List[Any] = """A photo of a cat""" SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode( a , ) # We changed the bos token self.assertEqual(a , [3_1957, 250, 1345, 9, 10, 4758] ) tokenizer.save_pretrained("./tok" ) SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained("./tok" ) self.assertTrue(tokenizer.is_fast ) SCREAMING_SNAKE_CASE : Dict = tokenizer.encode( a , ) self.assertEqual(a , [3_1957, 250, 1345, 9, 10, 4758] )
25
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class A__ ( unittest.TestCase): def UpperCamelCase__ ( self , __magic_name__ ): for model_result in results.values(): for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ): lowerCamelCase : List[str] = model_result["""result"""][batch_size][sequence_length] self.assertIsNotNone(__magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = """sshleifer/tiny-gpt2""" lowerCamelCase : str = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__magic_name__ , multi_process=__magic_name__ , ) lowerCamelCase : Dict = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : Any = """sgugger/tiny-distilbert-classification""" lowerCamelCase : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , only_pretrain_model=__magic_name__ , ) lowerCamelCase : List[Any] = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2""" lowerCamelCase : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) lowerCamelCase : Any = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : List[Any] = """sshleifer/tiny-gpt2""" lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ ) lowerCamelCase : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__magic_name__ , multi_process=__magic_name__ , ) lowerCamelCase : Optional[Any] = TensorFlowBenchmark(__magic_name__ , [config] ) lowerCamelCase : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = """sshleifer/tiny-gpt2""" lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(__magic_name__ ) lowerCamelCase : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) lowerCamelCase : Union[str, Any] = TensorFlowBenchmark(__magic_name__ , [config] ) lowerCamelCase : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2""" lowerCamelCase : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) lowerCamelCase : int = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase__ ( self ): lowerCamelCase : int = """sshleifer/tiny-gpt2""" lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ ) lowerCamelCase : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) lowerCamelCase : Any = TensorFlowBenchmark(__magic_name__ , [config] ) lowerCamelCase : str = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase__ ( self ): lowerCamelCase : str = """patrickvonplaten/t5-tiny-random""" lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ ) lowerCamelCase : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) lowerCamelCase : List[Any] = TensorFlowBenchmark(__magic_name__ , configs=[config] ) lowerCamelCase : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[Any] = """sshleifer/tiny-gpt2""" lowerCamelCase : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__magic_name__ , multi_process=__magic_name__ , ) lowerCamelCase : int = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : str = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2""" with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase : List[str] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=__magic_name__ , save_to_csv=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__magic_name__ , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(__magic_name__ , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(__magic_name__ , """env.csv""" ) , multi_process=__magic_name__ , ) lowerCamelCase : List[str] = TensorFlowBenchmark(__magic_name__ ) benchmark.run() self.assertTrue(Path(os.path.join(__magic_name__ , """inf_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(__magic_name__ , """inf_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(__magic_name__ , """env.csv""" ) ).exists() ) def UpperCamelCase__ ( self ): lowerCamelCase : str = """sshleifer/tiny-gpt2""" def _check_summary_is_not_empty(__magic_name__ ): self.assertTrue(hasattr(__magic_name__ , """sequential""" ) ) self.assertTrue(hasattr(__magic_name__ , """cumulative""" ) ) self.assertTrue(hasattr(__magic_name__ , """current""" ) ) self.assertTrue(hasattr(__magic_name__ , """total""" ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__magic_name__ , """log.txt""" ) , log_print=__magic_name__ , trace_memory_line_by_line=__magic_name__ , eager_mode=__magic_name__ , multi_process=__magic_name__ , ) lowerCamelCase : Tuple = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : Union[str, Any] = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(__magic_name__ , """log.txt""" ) ).exists() )
681
0
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VivitImageProcessor class A_ ( unittest.TestCase ): '''simple docstring''' def __init__( self , A_ , A_=7 , A_=3 , A_=10 , A_=18 , A_=30 , A_=4_00 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , A_=None , ): _UpperCamelCase = size if size is not None else {"""shortest_edge""": 18} _UpperCamelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = num_channels _UpperCamelCase = num_frames _UpperCamelCase = image_size _UpperCamelCase = min_resolution _UpperCamelCase = max_resolution _UpperCamelCase = do_resize _UpperCamelCase = size _UpperCamelCase = do_normalize _UpperCamelCase = image_mean _UpperCamelCase = image_std _UpperCamelCase = crop_size def a ( self ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class A_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' _lowerCAmelCase = VivitImageProcessor if is_vision_available() else None def a ( self ): _UpperCamelCase = VivitImageProcessingTester(self ) @property def a ( self ): return self.image_processor_tester.prepare_image_processor_dict() def a ( self ): _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A_ , "image_mean" ) ) self.assertTrue(hasattr(A_ , "image_std" ) ) self.assertTrue(hasattr(A_ , "do_normalize" ) ) self.assertTrue(hasattr(A_ , "do_resize" ) ) self.assertTrue(hasattr(A_ , "do_center_crop" ) ) self.assertTrue(hasattr(A_ , "size" ) ) def a ( self ): _UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18} ) self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} ) _UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"shortest_edge": 42} ) self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} ) def a ( self ): # Initialize image_processing _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL videos _UpperCamelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ ) for video in video_inputs: self.assertIsInstance(A_ , A_ ) self.assertIsInstance(video[0] , Image.Image ) # Test not batched input _UpperCamelCase = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched _UpperCamelCase = image_processing(A_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def a ( self ): # Initialize image_processing _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCamelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ ) for video in video_inputs: self.assertIsInstance(A_ , A_ ) self.assertIsInstance(video[0] , np.ndarray ) # Test not batched input _UpperCamelCase = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched _UpperCamelCase = image_processing(A_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def a ( self ): # Initialize image_processing _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCamelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ ) for video in video_inputs: self.assertIsInstance(A_ , A_ ) self.assertIsInstance(video[0] , torch.Tensor ) # Test not batched input _UpperCamelCase = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched _UpperCamelCase = image_processing(A_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
138
import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def _a ( lowerCamelCase ): return x + 2 class A__ ( unittest.TestCase): def UpperCamelCase__ ( self ): lowerCamelCase : List[Any] = """x = 3""" lowerCamelCase : Tuple = {} lowerCamelCase : List[str] = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result == 3 self.assertDictEqual(__magic_name__ , {"""x""": 3} ) lowerCamelCase : Optional[int] = """x = y""" lowerCamelCase : Tuple = {"""y""": 5} lowerCamelCase : Tuple = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 5, """y""": 5} ) def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = """y = add_two(x)""" lowerCamelCase : List[Any] = {"""x""": 3} lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} ) # Won't work without the tool with CaptureStdout() as out: lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result is None assert "tried to execute add_two" in out.out def UpperCamelCase__ ( self ): lowerCamelCase : int = """x = 3""" lowerCamelCase : Dict = {} lowerCamelCase : Tuple = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result == 3 self.assertDictEqual(__magic_name__ , {"""x""": 3} ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[Any] = """test_dict = {'x': x, 'y': add_two(x)}""" lowerCamelCase : Optional[int] = {"""x""": 3} lowerCamelCase : Tuple = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} ) self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = """x = 3\ny = 5""" lowerCamelCase : Optional[int] = {} lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = """text = f'This is x: {x}.'""" lowerCamelCase : Optional[int] = {"""x""": 3} lowerCamelCase : Optional[int] = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(__magic_name__ , {"""x""": 3, """text""": """This is x: 3."""} ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = """if x <= 3:\n y = 2\nelse:\n y = 5""" lowerCamelCase : Tuple = {"""x""": 3} lowerCamelCase : int = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 2} ) lowerCamelCase : Tuple = {"""x""": 8} lowerCamelCase : Dict = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 8, """y""": 5} ) def UpperCamelCase__ ( self ): lowerCamelCase : Dict = """test_list = [x, add_two(x)]""" lowerCamelCase : List[Any] = {"""x""": 3} lowerCamelCase : List[str] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) self.assertListEqual(__magic_name__ , [3, 5] ) self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_list""": [3, 5]} ) def UpperCamelCase__ ( self ): lowerCamelCase : str = """y = x""" lowerCamelCase : List[Any] = {"""x""": 3} lowerCamelCase : Any = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result == 3 self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 3} ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = """test_list = [x, add_two(x)]\ntest_list[1]""" lowerCamelCase : Any = {"""x""": 3} lowerCamelCase : List[str] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_list""": [3, 5]} ) lowerCamelCase : Any = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']""" lowerCamelCase : Dict = {"""x""": 3} lowerCamelCase : Any = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def UpperCamelCase__ ( self ): lowerCamelCase : Union[str, Any] = """x = 0\nfor i in range(3):\n x = i""" lowerCamelCase : int = {} lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {"""range""": range} , state=__magic_name__ ) assert result == 2 self.assertDictEqual(__magic_name__ , {"""x""": 2, """i""": 2} )
681
0
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING A_ : int = logging.get_logger(__name__) A_ : Tuple = { 'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json', # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' _SCREAMING_SNAKE_CASE : int = """deformable_detr""" _SCREAMING_SNAKE_CASE : Optional[int] = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self : int , _SCREAMING_SNAKE_CASE : Optional[Any]=True , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : Any=3 , _SCREAMING_SNAKE_CASE : str=300 , _SCREAMING_SNAKE_CASE : Optional[Any]=1_024 , _SCREAMING_SNAKE_CASE : Union[str, Any]=6 , _SCREAMING_SNAKE_CASE : Optional[Any]=1_024 , _SCREAMING_SNAKE_CASE : Any=8 , _SCREAMING_SNAKE_CASE : List[Any]=6 , _SCREAMING_SNAKE_CASE : int=1_024 , _SCREAMING_SNAKE_CASE : int=8 , _SCREAMING_SNAKE_CASE : int=0.0 , _SCREAMING_SNAKE_CASE : int=True , _SCREAMING_SNAKE_CASE : List[str]="relu" , _SCREAMING_SNAKE_CASE : Any=256 , _SCREAMING_SNAKE_CASE : Any=0.1 , _SCREAMING_SNAKE_CASE : List[str]=0.0 , _SCREAMING_SNAKE_CASE : Tuple=0.0 , _SCREAMING_SNAKE_CASE : str=0.0_2 , _SCREAMING_SNAKE_CASE : int=1.0 , _SCREAMING_SNAKE_CASE : Any=True , _SCREAMING_SNAKE_CASE : Dict=False , _SCREAMING_SNAKE_CASE : Dict="sine" , _SCREAMING_SNAKE_CASE : List[Any]="resnet50" , _SCREAMING_SNAKE_CASE : Tuple=True , _SCREAMING_SNAKE_CASE : str=False , _SCREAMING_SNAKE_CASE : Optional[int]=4 , _SCREAMING_SNAKE_CASE : List[Any]=4 , _SCREAMING_SNAKE_CASE : Dict=4 , _SCREAMING_SNAKE_CASE : List[Any]=False , _SCREAMING_SNAKE_CASE : Dict=300 , _SCREAMING_SNAKE_CASE : Optional[Any]=False , _SCREAMING_SNAKE_CASE : Any=1 , _SCREAMING_SNAKE_CASE : Optional[Any]=5 , _SCREAMING_SNAKE_CASE : List[str]=2 , _SCREAMING_SNAKE_CASE : Dict=1 , _SCREAMING_SNAKE_CASE : Any=1 , _SCREAMING_SNAKE_CASE : Optional[int]=5 , _SCREAMING_SNAKE_CASE : int=2 , _SCREAMING_SNAKE_CASE : Any=0.1 , _SCREAMING_SNAKE_CASE : Any=0.2_5 , _SCREAMING_SNAKE_CASE : Optional[Any]=False , **_SCREAMING_SNAKE_CASE : List[str] , ) -> Any: """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' ) if not use_timm_backbone: if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' ) SCREAMING_SNAKE_CASE : Optional[Any] = CONFIG_MAPPING["""resnet"""](out_features=['stage4'] ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE : Tuple = backbone_config.get('model_type' ) SCREAMING_SNAKE_CASE : Union[str, Any] = CONFIG_MAPPING[backbone_model_type] SCREAMING_SNAKE_CASE : Tuple = config_class.from_dict(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Any = use_timm_backbone SCREAMING_SNAKE_CASE : List[str] = backbone_config SCREAMING_SNAKE_CASE : Optional[int] = num_channels SCREAMING_SNAKE_CASE : List[str] = num_queries SCREAMING_SNAKE_CASE : Dict = max_position_embeddings SCREAMING_SNAKE_CASE : Optional[Any] = d_model SCREAMING_SNAKE_CASE : Optional[Any] = encoder_ffn_dim SCREAMING_SNAKE_CASE : Any = encoder_layers SCREAMING_SNAKE_CASE : str = encoder_attention_heads SCREAMING_SNAKE_CASE : Optional[Any] = decoder_ffn_dim SCREAMING_SNAKE_CASE : Optional[int] = decoder_layers SCREAMING_SNAKE_CASE : Any = decoder_attention_heads SCREAMING_SNAKE_CASE : Tuple = dropout SCREAMING_SNAKE_CASE : Any = attention_dropout SCREAMING_SNAKE_CASE : Any = activation_dropout SCREAMING_SNAKE_CASE : Optional[Any] = activation_function SCREAMING_SNAKE_CASE : Tuple = init_std SCREAMING_SNAKE_CASE : Tuple = init_xavier_std SCREAMING_SNAKE_CASE : str = encoder_layerdrop SCREAMING_SNAKE_CASE : Optional[int] = auxiliary_loss SCREAMING_SNAKE_CASE : Dict = position_embedding_type SCREAMING_SNAKE_CASE : Any = backbone SCREAMING_SNAKE_CASE : Any = use_pretrained_backbone SCREAMING_SNAKE_CASE : Tuple = dilation # deformable attributes SCREAMING_SNAKE_CASE : Optional[int] = num_feature_levels SCREAMING_SNAKE_CASE : int = encoder_n_points SCREAMING_SNAKE_CASE : Any = decoder_n_points SCREAMING_SNAKE_CASE : int = two_stage SCREAMING_SNAKE_CASE : Optional[int] = two_stage_num_proposals SCREAMING_SNAKE_CASE : Any = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError('If two_stage is True, with_box_refine must be True.' ) # Hungarian matcher SCREAMING_SNAKE_CASE : Dict = class_cost SCREAMING_SNAKE_CASE : Tuple = bbox_cost SCREAMING_SNAKE_CASE : Tuple = giou_cost # Loss coefficients SCREAMING_SNAKE_CASE : Tuple = mask_loss_coefficient SCREAMING_SNAKE_CASE : str = dice_loss_coefficient SCREAMING_SNAKE_CASE : Any = bbox_loss_coefficient SCREAMING_SNAKE_CASE : str = giou_loss_coefficient SCREAMING_SNAKE_CASE : Dict = eos_coefficient SCREAMING_SNAKE_CASE : str = focal_alpha SCREAMING_SNAKE_CASE : Optional[int] = disable_custom_kernels super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @property def _lowerCAmelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" return self.encoder_attention_heads @property def _lowerCAmelCase ( self : Optional[int] ) -> Tuple: """simple docstring""" return self.d_model def _lowerCAmelCase ( self : Tuple ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: SCREAMING_SNAKE_CASE : Any = self.backbone_config.to_dict() SCREAMING_SNAKE_CASE : List[Any] = self.__class__.model_type return output
265
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase =logging.get_logger(__name__) _lowerCamelCase ={ """edbeeching/decision-transformer-gym-hopper-medium""": ( """https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json""" ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class A__ ( __SCREAMING_SNAKE_CASE): _UpperCAmelCase : Optional[int] = """decision_transformer""" _UpperCAmelCase : str = ["""past_key_values"""] _UpperCAmelCase : Any = { """max_position_embeddings""": """n_positions""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , __magic_name__=1_7 , __magic_name__=4 , __magic_name__=1_2_8 , __magic_name__=4_0_9_6 , __magic_name__=True , __magic_name__=1 , __magic_name__=1_0_2_4 , __magic_name__=3 , __magic_name__=1 , __magic_name__=None , __magic_name__="relu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=True , __magic_name__=5_0_2_5_6 , __magic_name__=5_0_2_5_6 , __magic_name__=False , __magic_name__=False , **__magic_name__ , ): lowerCamelCase : Optional[int] = state_dim lowerCamelCase : int = act_dim lowerCamelCase : int = hidden_size lowerCamelCase : Union[str, Any] = max_ep_len lowerCamelCase : Optional[int] = action_tanh lowerCamelCase : Any = vocab_size lowerCamelCase : List[str] = n_positions lowerCamelCase : List[Any] = n_layer lowerCamelCase : Dict = n_head lowerCamelCase : Optional[Any] = n_inner lowerCamelCase : Tuple = activation_function lowerCamelCase : Tuple = resid_pdrop lowerCamelCase : str = embd_pdrop lowerCamelCase : Dict = attn_pdrop lowerCamelCase : Tuple = layer_norm_epsilon lowerCamelCase : Tuple = initializer_range lowerCamelCase : Tuple = scale_attn_weights lowerCamelCase : str = use_cache lowerCamelCase : List[Any] = scale_attn_by_inverse_layer_idx lowerCamelCase : List[str] = reorder_and_upcast_attn lowerCamelCase : Optional[Any] = bos_token_id lowerCamelCase : str = eos_token_id super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
681
0
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { "Visual-Attention-Network/van-base": ( "https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json" ), } class __UpperCAmelCase (__SCREAMING_SNAKE_CASE ): '''simple docstring''' _UpperCamelCase : Any = """van""" def __init__( self , snake_case_=224 , snake_case_=3 , snake_case_=[7, 3, 3, 3] , snake_case_=[4, 2, 2, 2] , snake_case_=[64, 128, 320, 512] , snake_case_=[3, 3, 12, 3] , snake_case_=[8, 8, 4, 4] , snake_case_="gelu" , snake_case_=0.02 , snake_case_=1E-6 , snake_case_=1E-2 , snake_case_=0.0 , snake_case_=0.0 , **snake_case_ , ): '''simple docstring''' super().__init__(**snake_case_ ) A__ : Tuple = image_size A__ : Union[str, Any] = num_channels A__ : Tuple = patch_sizes A__ : Optional[int] = strides A__ : str = hidden_sizes A__ : Any = depths A__ : int = mlp_ratios A__ : Tuple = hidden_act A__ : Dict = initializer_range A__ : List[Any] = layer_norm_eps A__ : Union[str, Any] = layer_scale_init_value A__ : int = drop_path_rate A__ : List[Any] = dropout_rate
363
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig _lowerCamelCase =logging.get_logger(__name__) class A__ : def __init__( self , __magic_name__ , __magic_name__ ): lowerCamelCase : Any = question_encoder lowerCamelCase : Dict = generator lowerCamelCase : Tuple = self.question_encoder def UpperCamelCase__ ( self , __magic_name__ ): if os.path.isfile(__magic_name__ ): raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) lowerCamelCase : Any = os.path.join(__magic_name__ , """question_encoder_tokenizer""" ) lowerCamelCase : str = os.path.join(__magic_name__ , """generator_tokenizer""" ) self.question_encoder.save_pretrained(__magic_name__ ) self.generator.save_pretrained(__magic_name__ ) @classmethod def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ): # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer lowerCamelCase : Any = kwargs.pop("""config""" , __magic_name__ ) if config is None: lowerCamelCase : Tuple = RagConfig.from_pretrained(__magic_name__ ) lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained( __magic_name__ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" ) lowerCamelCase : Any = AutoTokenizer.from_pretrained( __magic_name__ , config=config.generator , subfolder="""generator_tokenizer""" ) return cls(question_encoder=__magic_name__ , generator=__magic_name__ ) def __call__( self , *__magic_name__ , **__magic_name__ ): return self.current_tokenizer(*__magic_name__ , **__magic_name__ ) def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ): return self.generator.batch_decode(*__magic_name__ , **__magic_name__ ) def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ): return self.generator.decode(*__magic_name__ , **__magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : Union[str, Any] = self.question_encoder def UpperCamelCase__ ( self ): lowerCamelCase : str = self.generator def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "longest" , __magic_name__ = None , __magic_name__ = True , **__magic_name__ , ): warnings.warn( """`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """ """regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """ """context manager to prepare your targets. See the documentation of your specific tokenizer for more """ """details""" , __magic_name__ , ) if max_length is None: lowerCamelCase : int = self.current_tokenizer.model_max_length lowerCamelCase : int = self( __magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , max_length=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: lowerCamelCase : int = self.current_tokenizer.model_max_length lowerCamelCase : Dict = self( text_target=__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , padding=__magic_name__ , max_length=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , ) lowerCamelCase : List[Any] = labels["""input_ids"""] return model_inputs
681
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available SCREAMING_SNAKE_CASE : List[Any] = { "configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"], "tokenization_tapas": ["TapasTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Optional[int] = [ "TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST", "TapasForMaskedLM", "TapasForQuestionAnswering", "TapasForSequenceClassification", "TapasModel", "TapasPreTrainedModel", "load_tf_weights_in_tapas", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Any = [ "TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST", "TFTapasForMaskedLM", "TFTapasForQuestionAnswering", "TFTapasForSequenceClassification", "TFTapasModel", "TFTapasPreTrainedModel", ] if TYPE_CHECKING: from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig from .tokenization_tapas import TapasTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tapas import ( TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasPreTrainedModel, load_tf_weights_in_tapas, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_tapas import ( TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TFTapasForMaskedLM, TFTapasForQuestionAnswering, TFTapasForSequenceClassification, TFTapasModel, TFTapasPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
419
import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def _a ( lowerCamelCase, lowerCamelCase ): lowerCamelCase : List[Any] = F'''{sampling_rate}''' lowerCamelCase : Optional[int] = """1""" lowerCamelCase : Any = """f32le""" lowerCamelCase : Any = [ """ffmpeg""", """-i""", """pipe:0""", """-ac""", ac, """-ar""", ar, """-f""", format_for_conversion, """-hide_banner""", """-loglevel""", """quiet""", """pipe:1""", ] try: with subprocess.Popen(lowerCamelCase, stdin=subprocess.PIPE, stdout=subprocess.PIPE ) as ffmpeg_process: lowerCamelCase : Optional[int] = ffmpeg_process.communicate(lowerCamelCase ) except FileNotFoundError as error: raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error lowerCamelCase : Union[str, Any] = output_stream[0] lowerCamelCase : Optional[Any] = np.frombuffer(lowerCamelCase, np.floataa ) if audio.shape[0] == 0: raise ValueError("""Malformed soundfile""" ) return audio def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase = "f32le", ): lowerCamelCase : Dict = F'''{sampling_rate}''' lowerCamelCase : List[Any] = """1""" if format_for_conversion == "s16le": lowerCamelCase : Any = 2 elif format_for_conversion == "f32le": lowerCamelCase : Dict = 4 else: raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) lowerCamelCase : Dict = platform.system() if system == "Linux": lowerCamelCase : Union[str, Any] = """alsa""" lowerCamelCase : List[Any] = """default""" elif system == "Darwin": lowerCamelCase : List[Any] = """avfoundation""" lowerCamelCase : List[Any] = """:0""" elif system == "Windows": lowerCamelCase : int = """dshow""" lowerCamelCase : Any = """default""" lowerCamelCase : Any = [ """ffmpeg""", """-f""", format_, """-i""", input_, """-ac""", ac, """-ar""", ar, """-f""", format_for_conversion, """-fflags""", """nobuffer""", """-hide_banner""", """-loglevel""", """quiet""", """pipe:1""", ] lowerCamelCase : List[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample lowerCamelCase : Any = _ffmpeg_stream(lowerCamelCase, lowerCamelCase ) for item in iterator: yield item def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = "f32le", ): if stream_chunk_s is not None: lowerCamelCase : int = stream_chunk_s else: lowerCamelCase : Dict = chunk_length_s lowerCamelCase : Optional[Any] = ffmpeg_microphone(lowerCamelCase, lowerCamelCase, format_for_conversion=lowerCamelCase ) if format_for_conversion == "s16le": lowerCamelCase : Optional[int] = np.intaa lowerCamelCase : Optional[Any] = 2 elif format_for_conversion == "f32le": lowerCamelCase : int = np.floataa lowerCamelCase : Any = 4 else: raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) if stride_length_s is None: lowerCamelCase : Any = chunk_length_s / 6 lowerCamelCase : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(lowerCamelCase, (int, float) ): lowerCamelCase : Optional[int] = [stride_length_s, stride_length_s] lowerCamelCase : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample lowerCamelCase : Optional[int] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample lowerCamelCase : List[Any] = datetime.datetime.now() lowerCamelCase : List[Any] = datetime.timedelta(seconds=lowerCamelCase ) for item in chunk_bytes_iter(lowerCamelCase, lowerCamelCase, stride=(stride_left, stride_right), stream=lowerCamelCase ): # Put everything back in numpy scale lowerCamelCase : Dict = np.frombuffer(item["""raw"""], dtype=lowerCamelCase ) lowerCamelCase : List[Any] = ( item["""stride"""][0] // size_of_sample, item["""stride"""][1] // size_of_sample, ) lowerCamelCase : Tuple = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = False ): lowerCamelCase : Optional[int] = B"""""" lowerCamelCase , lowerCamelCase : str = stride if stride_left + stride_right >= chunk_len: raise ValueError( F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' ) lowerCamelCase : str = 0 for raw in iterator: acc += raw if stream and len(lowerCamelCase ) < chunk_len: lowerCamelCase : Optional[int] = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(lowerCamelCase ) >= chunk_len: # We are flushing the accumulator lowerCamelCase : str = (_stride_left, stride_right) lowerCamelCase : Dict = {"""raw""": acc[:chunk_len], """stride""": stride} if stream: lowerCamelCase : Optional[int] = False yield item lowerCamelCase : str = stride_left lowerCamelCase : Tuple = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(lowerCamelCase ) > stride_left: lowerCamelCase : List[str] = {"""raw""": acc, """stride""": (_stride_left, 0)} if stream: lowerCamelCase : List[Any] = False yield item def _a ( lowerCamelCase, lowerCamelCase ): lowerCamelCase : Optional[int] = 2**24 # 16Mo try: with subprocess.Popen(lowerCamelCase, stdout=subprocess.PIPE, bufsize=lowerCamelCase ) as ffmpeg_process: while True: lowerCamelCase : Any = ffmpeg_process.stdout.read(lowerCamelCase ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
681
0
'''simple docstring''' def __lowercase (_SCREAMING_SNAKE_CASE :List[str] , _SCREAMING_SNAKE_CASE :List[Any] = False ): if n == 2: return True if not n % 2 or n < 2: return False if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit return False if n > 3_31_70_44_06_46_79_88_73_85_96_19_81 and not allow_probable: raise ValueError( '''Warning: upper bound of deterministic test is exceeded. ''' '''Pass allow_probable=True to allow probabilistic test. ''' '''A return value of True indicates a probable prime.''' ) # array bounds provided by analysis SCREAMING_SNAKE_CASE : int = [ 20_47, 1_37_36_53, 25_32_60_01, 32_15_03_17_51, 2_15_23_02_89_87_47, 3_47_47_49_66_03_83, 3_41_55_00_71_72_83_21, 1, 3_82_51_23_05_65_46_41_30_51, 1, 1, 31_86_65_85_78_34_03_11_51_16_74_61, 3_31_70_44_06_46_79_88_73_85_96_19_81, ] SCREAMING_SNAKE_CASE : Tuple = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41] for idx, _p in enumerate(_SCREAMING_SNAKE_CASE , 1 ): if n < _p: # then we have our last prime to check SCREAMING_SNAKE_CASE : List[str] = primes[:idx] break SCREAMING_SNAKE_CASE : Optional[int] = n - 1, 0 # break up n -1 into a power of 2 (s) and # remaining odd component # essentially, solve for d * 2 ** s == n - 1 while d % 2 == 0: d //= 2 s += 1 for prime in plist: SCREAMING_SNAKE_CASE : List[Any] = False for r in range(_SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE : Tuple = pow(_SCREAMING_SNAKE_CASE , d * 2**r , _SCREAMING_SNAKE_CASE ) # see article for analysis explanation for m if (r == 0 and m == 1) or ((m + 1) % n == 0): SCREAMING_SNAKE_CASE : str = True # this loop will not determine compositeness break if pr: continue # if pr is False, then the above loop never evaluated to true, # and the n MUST be composite return False return True def __lowercase (): assert not miller_rabin(5_61 ) assert miller_rabin(5_63 ) # 2047 assert not miller_rabin(83_82_01 ) assert miller_rabin(83_82_07 ) # 1_373_653 assert not miller_rabin(17_31_60_01 ) assert miller_rabin(17_31_60_17 ) # 25_326_001 assert not miller_rabin(30_78_38_66_41 ) assert miller_rabin(30_78_38_66_53 ) # 3_215_031_751 assert not miller_rabin(1_71_30_45_57_48_01 ) assert miller_rabin(1_71_30_45_57_48_19 ) # 2_152_302_898_747 assert not miller_rabin(2_77_97_99_72_83_07 ) assert miller_rabin(2_77_97_99_72_83_27 ) # 3_474_749_660_383 assert not miller_rabin(1_13_85_00_23_90_94_41 ) assert miller_rabin(1_13_85_00_23_90_95_27 ) # 341_550_071_728_321 assert not miller_rabin(1_27_50_41_01_88_48_80_43_51 ) assert miller_rabin(1_27_50_41_01_88_48_80_43_91 ) # 3_825_123_056_546_413_051 assert not miller_rabin(7_96_66_46_44_58_50_77_87_79_18_67 ) assert miller_rabin(7_96_66_46_44_58_50_77_87_79_19_51 ) # 318_665_857_834_031_151_167_461 assert not miller_rabin(55_28_40_67_74_46_64_78_97_66_03_33 ) assert miller_rabin(55_28_40_67_74_46_64_78_97_66_03_59 ) # 3_317_044_064_679_887_385_961_981 # upper limit for probabilistic test if __name__ == "__main__": test_miller_rabin()
507
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , ) @pytest.mark.usefixtures("""sm_env""") @parameterized_class( [ { """framework""": """pytorch""", """script""": """run_glue_model_parallelism.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, { """framework""": """pytorch""", """script""": """run_glue.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, ]) class A__ ( unittest.TestCase): def UpperCamelCase__ ( self ): if self.framework == "pytorch": subprocess.run( F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=__magic_name__ , ) assert hasattr(self , """env""" ) def UpperCamelCase__ ( self , __magic_name__ ): # configuration for running training on smdistributed Model Parallel lowerCamelCase : Any = { """enabled""": True, """processes_per_host""": 8, } lowerCamelCase : Any = { """enabled""": True, """parameters""": { """microbatches""": 4, """placement_strategy""": """spread""", """pipeline""": """interleaved""", """optimize""": """speed""", """partitions""": 4, """ddp""": True, }, } lowerCamelCase : Optional[Any] = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options} lowerCamelCase : Dict = """trainer""" if self.script == """run_glue.py""" else """smtrainer""" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=__magic_name__ , instance_type=self.instance_type , debugger_hook_config=__magic_name__ , hyperparameters={ **self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path, """max_steps""": 5_0_0, } , metric_definitions=self.env.metric_definitions , distribution=__magic_name__ , py_version="""py36""" , ) def UpperCamelCase__ ( self , __magic_name__ ): TrainingJobAnalytics(__magic_name__ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' ) @parameterized.expand([(1,)] ) def UpperCamelCase__ ( self , __magic_name__ ): # create estimator lowerCamelCase : int = self.create_estimator(__magic_name__ ) # run training estimator.fit() # result dataframe lowerCamelCase : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis lowerCamelCase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) lowerCamelCase : int = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping lowerCamelCase : int = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(F'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __magic_name__ )
681
0
"""simple docstring""" import argparse import hashlib # hashlib is only used inside the Test class import struct class __UpperCAmelCase : '''simple docstring''' def __init__( self , _A ): '''simple docstring''' _SCREAMING_SNAKE_CASE =data _SCREAMING_SNAKE_CASE =[0x6745_2301, 0xefcd_ab89, 0x98ba_dcfe, 0x1032_5476, 0xc3d2_e1f0] @staticmethod def UpperCamelCase_ ( _A , _A ): '''simple docstring''' return ((n << b) | (n >> (3_2 - b))) & 0xffff_ffff def UpperCamelCase_ ( self ): '''simple docstring''' _SCREAMING_SNAKE_CASE =b"""\x80""" + b"""\x00""" * (6_3 - (len(self.data ) + 8) % 6_4) _SCREAMING_SNAKE_CASE =self.data + padding + struct.pack('''>Q''' , 8 * len(self.data ) ) return padded_data def UpperCamelCase_ ( self ): '''simple docstring''' return [ self.padded_data[i : i + 6_4] for i in range(0 , len(self.padded_data ) , 6_4 ) ] def UpperCamelCase_ ( self , _A ): '''simple docstring''' _SCREAMING_SNAKE_CASE =list(struct.unpack('''>16L''' , _A ) ) + [0] * 6_4 for i in range(1_6 , 8_0 ): _SCREAMING_SNAKE_CASE =self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 1_4] ^ w[i - 1_6]) , 1 ) return w def UpperCamelCase_ ( self ): '''simple docstring''' _SCREAMING_SNAKE_CASE =self.padding() _SCREAMING_SNAKE_CASE =self.split_blocks() for block in self.blocks: _SCREAMING_SNAKE_CASE =self.expand_block(_A ) _SCREAMING_SNAKE_CASE =self.h for i in range(0 , 8_0 ): if 0 <= i < 2_0: _SCREAMING_SNAKE_CASE =(b & c) | ((~b) & d) _SCREAMING_SNAKE_CASE =0x5a82_7999 elif 2_0 <= i < 4_0: _SCREAMING_SNAKE_CASE =b ^ c ^ d _SCREAMING_SNAKE_CASE =0x6ed9_eba1 elif 4_0 <= i < 6_0: _SCREAMING_SNAKE_CASE =(b & c) | (b & d) | (c & d) _SCREAMING_SNAKE_CASE =0x8f1b_bcdc elif 6_0 <= i < 8_0: _SCREAMING_SNAKE_CASE =b ^ c ^ d _SCREAMING_SNAKE_CASE =0xca62_c1d6 _SCREAMING_SNAKE_CASE =( self.rotate(_A , 5 ) + f + e + k + expanded_block[i] & 0xffff_ffff, a, self.rotate(_A , 3_0 ), c, d, ) _SCREAMING_SNAKE_CASE =( self.h[0] + a & 0xffff_ffff, self.h[1] + b & 0xffff_ffff, self.h[2] + c & 0xffff_ffff, self.h[3] + d & 0xffff_ffff, self.h[4] + e & 0xffff_ffff, ) return ("{:08x}" * 5).format(*self.h ) def _lowerCAmelCase() -> Dict: _SCREAMING_SNAKE_CASE =B"""Test String""" assert SHAaHash(a ).final_hash() == hashlib.shaa(a ).hexdigest() # noqa: S324 def _lowerCAmelCase() -> int: _SCREAMING_SNAKE_CASE =argparse.ArgumentParser(description='''Process some strings or files''' ) parser.add_argument( '''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , ) parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' ) _SCREAMING_SNAKE_CASE =parser.parse_args() _SCREAMING_SNAKE_CASE =args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file , '''rb''' ) as f: _SCREAMING_SNAKE_CASE =f.read() else: _SCREAMING_SNAKE_CASE =bytes(a , '''utf-8''' ) print(SHAaHash(a ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
255
from __future__ import annotations def _a ( lowerCamelCase ): lowerCamelCase : Union[str, Any] = str(lowerCamelCase ) return n == n[::-1] def _a ( lowerCamelCase = 100_0000 ): lowerCamelCase : Any = 0 for i in range(1, lowerCamelCase ): if is_palindrome(lowerCamelCase ) and is_palindrome(bin(lowerCamelCase ).split("""b""" )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
681
0
"""simple docstring""" import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy _SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__) def lowerCamelCase__ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : Any = None , _lowerCamelCase : List[str] = None , _lowerCamelCase : List[str] = None , _lowerCamelCase : str = None , _lowerCamelCase : int = None , _lowerCamelCase : int = False , ) -> Any: lowerCamelCase_ = bnb_quantization_config.load_in_abit lowerCamelCase_ = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( 'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,' ' make sure you have the latest version of `bitsandbytes` installed.' ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( 'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,' 'make sure you have the latest version of `bitsandbytes` installed.' ) lowerCamelCase_ = [] # custom device map if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(device_map.keys() ) > 1: lowerCamelCase_ = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: lowerCamelCase_ = get_keys_to_not_convert(_lowerCamelCase ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(_lowerCamelCase ) lowerCamelCase_ = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: lowerCamelCase_ = [] lowerCamelCase_ = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(_lowerCamelCase ) # compatibility with peft lowerCamelCase_ = load_in_abit lowerCamelCase_ = load_in_abit lowerCamelCase_ = get_parameter_device(_lowerCamelCase ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( 'It is not recommended to quantize a loaded model. ' 'The model should be instantiated under the `init_empty_weights` context manager.' ) lowerCamelCase_ = replace_with_bnb_layers(_lowerCamelCase , _lowerCamelCase , modules_to_not_convert=_lowerCamelCase ) # convert param to the right dtype lowerCamelCase_ = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: lowerCamelCase_ = name.replace('.weight' , '' ).replace('.bias' , '' ) lowerCamelCase_ = getattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(_lowerCamelCase ): param.to(_lowerCamelCase ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError('No GPU found. A GPU is needed for quantization.' ) logger.info( F'''The model device type is {model_device.type}. However, cuda is needed for quantization.''' 'We move the model to cuda.' ) return model elif weights_location is None: raise RuntimeError( F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' ) else: with init_empty_weights(): lowerCamelCase_ = replace_with_bnb_layers( _lowerCamelCase , _lowerCamelCase , modules_to_not_convert=_lowerCamelCase ) lowerCamelCase_ = get_quantized_model_device_map( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , max_memory=_lowerCamelCase , no_split_module_classes=_lowerCamelCase , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): lowerCamelCase_ = True lowerCamelCase_ = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] ) load_checkpoint_in_model( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=_lowerCamelCase , offload_state_dict=_lowerCamelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(_lowerCamelCase , device_map=_lowerCamelCase , offload_dir=_lowerCamelCase ) def lowerCamelCase__ ( _lowerCamelCase : Dict , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any=None , _lowerCamelCase : Dict=None , _lowerCamelCase : int=None ) -> int: if device_map is None: if torch.cuda.is_available(): lowerCamelCase_ = {"""""": torch.cuda.current_device()} else: raise RuntimeError('No GPU found. A GPU is needed for quantization.' ) logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' ) if isinstance(_lowerCamelCase , _lowerCamelCase ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( 'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or ' '\'sequential\'.' ) lowerCamelCase_ = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) lowerCamelCase_ = {} lowerCamelCase_ = special_dtypes lowerCamelCase_ = no_split_module_classes lowerCamelCase_ = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": lowerCamelCase_ = get_balanced_memory( _lowerCamelCase , low_zero=(device_map == 'balanced_low_0') , max_memory=_lowerCamelCase , **_lowerCamelCase , ) lowerCamelCase_ = max_memory lowerCamelCase_ = infer_auto_device_map(_lowerCamelCase , **_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ): # check if don't have any quantized module on the cpu lowerCamelCase_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules lowerCamelCase_ = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( '\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' ) else: logger.info( 'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' ) del device_map_without_some_modules return device_map def lowerCamelCase__ ( _lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : List[str]=None , _lowerCamelCase : Tuple=None ) -> Optional[int]: if modules_to_not_convert is None: lowerCamelCase_ = [] lowerCamelCase_ = _replace_with_bnb_layers( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) if not has_been_replaced: logger.warning( 'You are loading your model in 8bit or 4bit but no linear modules were found in your model.' ' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.' ' Please double check your model architecture, or submit an issue on github if you think this is' ' a bug.' ) return model def lowerCamelCase__ ( _lowerCamelCase : int , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Tuple=None , ) -> Optional[int]: lowerCamelCase_ = False for name, module in model.named_children(): if current_key_name is None: lowerCamelCase_ = [] current_key_name.append(_lowerCamelCase ) if isinstance(_lowerCamelCase , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` lowerCamelCase_ = """.""".join(_lowerCamelCase ) lowerCamelCase_ = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: lowerCamelCase_ = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: lowerCamelCase_ = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_lowerCamelCase , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: lowerCamelCase_ = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' ) lowerCamelCase_ = module.weight.data if module.bias is not None: lowerCamelCase_ = module.bias.data bnb_module.requires_grad_(_lowerCamelCase ) setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) lowerCamelCase_ = True if len(list(module.children() ) ) > 0: lowerCamelCase_ = _replace_with_bnb_layers( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) lowerCamelCase_ = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def lowerCamelCase__ ( _lowerCamelCase : int ) -> int: # Create a copy of the model with init_empty_weights(): lowerCamelCase_ = deepcopy(_lowerCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager` lowerCamelCase_ = find_tied_parameters(_lowerCamelCase ) # For compatibility with Accelerate < 0.18 if isinstance(_lowerCamelCase , _lowerCamelCase ): lowerCamelCase_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: lowerCamelCase_ = sum(_lowerCamelCase , [] ) lowerCamelCase_ = len(_lowerCamelCase ) > 0 # Check if it is a base model lowerCamelCase_ = False if hasattr(_lowerCamelCase , 'base_model_prefix' ): lowerCamelCase_ = not hasattr(_lowerCamelCase , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head lowerCamelCase_ = list(model.named_children() ) lowerCamelCase_ = [list_modules[-1][0]] # add last module together with tied weights lowerCamelCase_ = set(_lowerCamelCase ) - set(_lowerCamelCase ) lowerCamelCase_ = list(set(_lowerCamelCase ) ) + list(_lowerCamelCase ) # remove ".weight" from the keys lowerCamelCase_ = [""".weight""", """.bias"""] lowerCamelCase_ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: lowerCamelCase_ = name.replace(_lowerCamelCase , '' ) filtered_module_names.append(_lowerCamelCase ) return filtered_module_names def lowerCamelCase__ ( _lowerCamelCase : List[str] ) -> Optional[Any]: for m in model.modules(): if isinstance(_lowerCamelCase , bnb.nn.Linearabit ): return True return False def lowerCamelCase__ ( _lowerCamelCase : Optional[int] ) -> Optional[int]: return next(parameter.parameters() ).device def lowerCamelCase__ ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] ) -> List[Any]: # if it is not quantized, we quantize and offload the quantized weights and the SCB stats if fpaa_statistics is None: set_module_tensor_to_device(_lowerCamelCase , _lowerCamelCase , 0 , dtype=_lowerCamelCase , value=_lowerCamelCase ) lowerCamelCase_ = param_name lowerCamelCase_ = model if "." in tensor_name: lowerCamelCase_ = tensor_name.split('.' ) for split in splits[:-1]: lowerCamelCase_ = getattr(_lowerCamelCase , _lowerCamelCase ) if new_module is None: raise ValueError(F'''{module} has no attribute {split}.''' ) lowerCamelCase_ = new_module lowerCamelCase_ = splits[-1] # offload weights lowerCamelCase_ = False offload_weight(module._parameters[tensor_name] , _lowerCamelCase , _lowerCamelCase , index=_lowerCamelCase ) if hasattr(module._parameters[tensor_name] , 'SCB' ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , _lowerCamelCase , index=_lowerCamelCase , ) else: offload_weight(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , index=_lowerCamelCase ) offload_weight(_lowerCamelCase , param_name.replace('weight' , 'SCB' ) , _lowerCamelCase , index=_lowerCamelCase ) set_module_tensor_to_device(_lowerCamelCase , _lowerCamelCase , 'meta' , dtype=_lowerCamelCase , value=torch.empty(*param.size() ) )
549
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def _a ( lowerCamelCase, lowerCamelCase=False ): lowerCamelCase : Dict = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("""module.cls_token""", """vit.embeddings.cls_token"""), ("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""module.pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""module.norm.weight""", """layernorm.weight"""), ("""module.norm.bias""", """layernorm.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" lowerCamelCase : Any = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase=False ): for i in range(config.num_hidden_layers ): if base_model: lowerCamelCase : Optional[Any] = """""" else: lowerCamelCase : Optional[int] = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCamelCase : Dict = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' ) lowerCamelCase : List[str] = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase : Union[str, Any] = in_proj_weight[ : config.hidden_size, : ] lowerCamelCase : Optional[int] = in_proj_bias[: config.hidden_size] lowerCamelCase : Optional[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase : List[str] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCamelCase : Union[str, Any] = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase : Any = in_proj_bias[-config.hidden_size :] def _a ( lowerCamelCase ): lowerCamelCase : Tuple = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(lowerCamelCase, lowerCamelCase ) def _a ( lowerCamelCase ): # projection head is used in the self-supervised pre-training in MSN, # for downstream task it's not needed. lowerCamelCase : Any = [ """module.fc.fc1.weight""", """module.fc.fc1.bias""", """module.fc.bn1.weight""", """module.fc.bn1.bias""", """module.fc.bn1.running_mean""", """module.fc.bn1.running_var""", """module.fc.bn1.num_batches_tracked""", """module.fc.fc2.weight""", """module.fc.fc2.bias""", """module.fc.bn2.weight""", """module.fc.bn2.bias""", """module.fc.bn2.running_mean""", """module.fc.bn2.running_var""", """module.fc.bn2.num_batches_tracked""", """module.fc.fc3.weight""", """module.fc.fc3.bias""", ] for k in ignore_keys: state_dict.pop(lowerCamelCase, lowerCamelCase ) def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ): lowerCamelCase : Dict = dct.pop(lowerCamelCase ) lowerCamelCase : str = val def _a ( lowerCamelCase, lowerCamelCase ): lowerCamelCase : Any = ViTMSNConfig() lowerCamelCase : Tuple = 1000 lowerCamelCase : List[Any] = """datasets/huggingface/label-files""" lowerCamelCase : Optional[Any] = """imagenet-1k-id2label.json""" lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase, lowerCamelCase ), """r""" ) ) lowerCamelCase : List[Any] = {int(lowerCamelCase ): v for k, v in idalabel.items()} lowerCamelCase : Optional[int] = idalabel lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: lowerCamelCase : int = 384 lowerCamelCase : Optional[int] = 1536 lowerCamelCase : Tuple = 6 elif "l16" in checkpoint_url: lowerCamelCase : Dict = 1024 lowerCamelCase : List[Any] = 4096 lowerCamelCase : Optional[int] = 24 lowerCamelCase : str = 16 lowerCamelCase : str = 0.1 elif "b4" in checkpoint_url: lowerCamelCase : Union[str, Any] = 4 elif "l7" in checkpoint_url: lowerCamelCase : Tuple = 7 lowerCamelCase : Optional[int] = 1024 lowerCamelCase : List[Any] = 4096 lowerCamelCase : Tuple = 24 lowerCamelCase : Dict = 16 lowerCamelCase : str = 0.1 lowerCamelCase : List[Any] = ViTMSNModel(lowerCamelCase ) lowerCamelCase : Dict = torch.hub.load_state_dict_from_url(lowerCamelCase, map_location="""cpu""" )["""target_encoder"""] lowerCamelCase : Any = ViTImageProcessor(size=config.image_size ) remove_projection_head(lowerCamelCase ) lowerCamelCase : Dict = create_rename_keys(lowerCamelCase, base_model=lowerCamelCase ) for src, dest in rename_keys: rename_key(lowerCamelCase, lowerCamelCase, lowerCamelCase ) read_in_q_k_v(lowerCamelCase, lowerCamelCase, base_model=lowerCamelCase ) model.load_state_dict(lowerCamelCase ) model.eval() lowerCamelCase : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase : Dict = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw ) lowerCamelCase : Union[str, Any] = ViTImageProcessor( size=config.image_size, image_mean=lowerCamelCase, image_std=lowerCamelCase ) lowerCamelCase : Tuple = image_processor(images=lowerCamelCase, return_tensors="""pt""" ) # forward pass torch.manual_seed(2 ) lowerCamelCase : int = model(**lowerCamelCase ) lowerCamelCase : Union[str, Any] = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: lowerCamelCase : Union[str, Any] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] ) elif "b16" in checkpoint_url: lowerCamelCase : Tuple = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] ) elif "l16" in checkpoint_url: lowerCamelCase : List[str] = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] ) elif "b4" in checkpoint_url: lowerCamelCase : Tuple = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] ) else: lowerCamelCase : List[str] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3], lowerCamelCase, atol=1e-4 ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCamelCase ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(lowerCamelCase ) if __name__ == "__main__": _lowerCamelCase =argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar""", type=str, help="""URL of the checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) _lowerCamelCase =parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
681
0
"""simple docstring""" def a_ ( lowercase__ :Dict ): if not numbers: return 0 if not isinstance(lowercase__, (list, tuple) ) or not all( isinstance(lowercase__, lowercase__ ) for number in numbers ): raise ValueError("""numbers must be an iterable of integers""" ) __lowerCamelCase = numbers[0] for i in range(1, len(lowercase__ ) ): # update the maximum and minimum subarray products __lowerCamelCase = numbers[i] if number < 0: __lowerCamelCase = min_till_now, max_till_now __lowerCamelCase = max(lowercase__, max_till_now * number ) __lowerCamelCase = min(lowercase__, min_till_now * number ) # update the maximum product found till now __lowerCamelCase = max(lowercase__, lowercase__ ) return max_prod
281
def _a ( lowerCamelCase ): if num < 0: return False lowerCamelCase : int = num lowerCamelCase : int = 0 while num > 0: lowerCamelCase : str = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
681
0
"""simple docstring""" from __future__ import annotations lowercase__ : Optional[int] = list[tuple[int, int]] lowercase__ : int = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowercase__ : Dict = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class _UpperCAmelCase : def __init__( self : int , lowercase_ : int , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : Tuple , lowercase_ : List[str] , ): snake_case_ : List[Any] = pos_x snake_case_ : Dict = pos_y snake_case_ : str = (pos_y, pos_x) snake_case_ : List[str] = goal_x snake_case_ : List[Any] = goal_y snake_case_ : Optional[Any] = g_cost snake_case_ : str = parent snake_case_ : Dict = self.calculate_heuristic() def _snake_case ( self : Tuple ): snake_case_ : Optional[Any] = abs(self.pos_x - self.goal_x ) snake_case_ : Any = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self : Tuple , lowercase_ : Tuple ): return self.f_cost < other.f_cost class _UpperCAmelCase : def __init__( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] ): snake_case_ : Union[str, Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowercase_ ) snake_case_ : Optional[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , lowercase_ ) snake_case_ : Any = [self.start] snake_case_ : list[Node] = [] snake_case_ : Optional[int] = False def _snake_case ( self : List[Any] ): while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() snake_case_ : List[str] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: snake_case_ : List[str] = True return self.retrace_path(lowercase_ ) self.closed_nodes.append(lowercase_ ) snake_case_ : Optional[int] = self.get_successors(lowercase_ ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(lowercase_ ) else: # retrieve the best current path snake_case_ : Any = self.open_nodes.pop(self.open_nodes.index(lowercase_ ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(lowercase_ ) else: self.open_nodes.append(lowercase_ ) if not self.reached: return [self.start.pos] return None def _snake_case ( self : Optional[int] , lowercase_ : Optional[int] ): snake_case_ : Dict = [] for action in delta: snake_case_ : str = parent.pos_x + action[1] snake_case_ : Optional[int] = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowercase_ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( lowercase_ , lowercase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowercase_ , ) ) return successors def _snake_case ( self : Any , lowercase_ : Optional[Any] ): snake_case_ : Tuple = node snake_case_ : Dict = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) snake_case_ : Any = current_node.parent path.reverse() return path if __name__ == "__main__": lowercase__ : Tuple = (0, 0) lowercase__ : Optional[Any] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print('''------''') lowercase__ : Tuple = GreedyBestFirst(init, goal) lowercase__ : List[str] = greedy_bf.search() if path: for pos_x, pos_y in path: lowercase__ : List[str] = 2 for elem in grid: print(elem)
123
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_torch_available from ...utils import OptionalDependencyNotAvailable _lowerCamelCase ={ """configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""], """tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase =[ """GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTNeoXJapaneseForCausalLM""", """GPTNeoXJapaneseLayer""", """GPTNeoXJapaneseModel""", """GPTNeoXJapanesePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox_japanese import ( GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseLayer, GPTNeoXJapaneseModel, GPTNeoXJapanesePreTrainedModel, ) else: import sys _lowerCamelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
681
0
'''simple docstring''' import pytest lowerCAmelCase : Optional[Any] = """__dummy_dataset1__""" lowerCAmelCase : Optional[Any] = """ import json import os import datasets REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\" URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"} class __DummyDataset1__(datasets.GeneratorBasedBuilder): def _info(self): features = datasets.Features( { \"tokens\": datasets.Sequence(datasets.Value(\"string\")), \"ner_tags\": datasets.Sequence( datasets.features.ClassLabel( names=[ \"O\", \"B-PER\", \"I-PER\", \"B-ORG\", \"I-ORG\", \"B-LOC\", \"I-LOC\", ] ) ), \"langs\": datasets.Sequence(datasets.Value(\"string\")), \"spans\": datasets.Sequence(datasets.Value(\"string\")), } ) return datasets.DatasetInfo(features=features) def _split_generators(self, dl_manager): dl_path = dl_manager.download(URLS) return [ datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}), datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}), ] def _generate_examples(self, filepath): with open(filepath, \"r\", encoding=\"utf-8\") as f: for i, line in enumerate(f): yield i, json.loads(line) """ @pytest.fixture def _A ( ) -> Union[str, Any]: return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def _A ( ) -> Tuple: return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def _A ( A ,A ,A ) -> str: lowercase : Union[str, Any] = dataset_loading_script_name lowercase : Dict = tmp_path / """datasets""" / script_name script_dir.mkdir(parents=A ) lowercase : str = script_dir / F'''{script_name}.py''' with open(A ,"w" ) as f: f.write(A ) return str(A )
372
import copy import random from transformers import CLIPTokenizer class A__ ( __SCREAMING_SNAKE_CASE): def __init__( self , *__magic_name__ , **__magic_name__ ): super().__init__(*__magic_name__ , **__magic_name__ ) lowerCamelCase : Dict = {} def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , **__magic_name__ ): lowerCamelCase : Any = super().add_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ ) if num_added_tokens == 0: raise ValueError( F'''The tokenizer already contains the token {placeholder_token}. Please pass a different''' """ `placeholder_token` that is not already in the tokenizer.""" ) def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=1 , **__magic_name__ ): lowerCamelCase : List[Any] = [] if num_vec_per_token == 1: self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ ) output.append(__magic_name__ ) else: lowerCamelCase : Dict = [] for i in range(__magic_name__ ): lowerCamelCase : Optional[Any] = placeholder_token + F'''_{i}''' self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ ) output.append(__magic_name__ ) # handle cases where there is a new placeholder token that contains the current placeholder token but is larger for token in self.token_map: if token in placeholder_token: raise ValueError( F'''The tokenizer already has placeholder token {token} that can get confused with''' F''' {placeholder_token}keep placeholder tokens independent''' ) lowerCamelCase : Any = output def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=False , __magic_name__=1.0 ): if isinstance(__magic_name__ , __magic_name__ ): lowerCamelCase : List[str] = [] for i in range(len(__magic_name__ ) ): output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__magic_name__ ) ) return output for placeholder_token in self.token_map: if placeholder_token in text: lowerCamelCase : List[str] = self.token_map[placeholder_token] lowerCamelCase : Optional[Any] = tokens[: 1 + int(len(__magic_name__ ) * prop_tokens_to_load )] if vector_shuffle: lowerCamelCase : Union[str, Any] = copy.copy(__magic_name__ ) random.shuffle(__magic_name__ ) lowerCamelCase : str = text.replace(__magic_name__ , """ """.join(__magic_name__ ) ) return text def __call__( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ): return super().__call__( self.replace_placeholder_tokens_in_text( __magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , ) def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ): return super().encode( self.replace_placeholder_tokens_in_text( __magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , )
681
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: a_ = None a_ = logging.get_logger(__name__) a_ = '▁' a_ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} a_ = { 'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}, 'tokenizer_file': { 'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json' }, } a_ = { 'google/pegasus-xsum': 512, } class _UpperCamelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowerCamelCase__ =VOCAB_FILES_NAMES lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ =PegasusTokenizer lowerCamelCase__ =["""input_ids""", """attention_mask"""] def __init__( self : int , a : Any=None , a : str=None , a : List[str]="<pad>" , a : Optional[Any]="</s>" , a : List[Any]="<unk>" , a : Dict="<mask_2>" , a : Union[str, Any]="<mask_1>" , a : str=None , a : Any=103 , **a : Optional[int] , ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = offset if additional_special_tokens is not None: if not isinstance(a , a ): raise TypeError( F"additional_special_tokens should be of type {type(a )}, but is" F" {type(a )}" ) SCREAMING_SNAKE_CASE : Optional[int] = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ F"<unk_{i}>" for i in range(len(a ) , self.offset - 1 ) ] if len(set(a ) ) != len(a ): raise ValueError( "Please make sure that the provided additional_special_tokens do not contain an incorrectly" F" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." ) SCREAMING_SNAKE_CASE : int = additional_special_tokens_extended else: SCREAMING_SNAKE_CASE : List[str] = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [F"<unk_{i}>" for i in range(2 , self.offset )] super().__init__( a , tokenizer_file=a , pad_token=a , eos_token=a , unk_token=a , mask_token=a , mask_token_sent=a , offset=a , additional_special_tokens=a , **a , ) SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_file SCREAMING_SNAKE_CASE : Optional[Any] = False if not self.vocab_file else True def __UpperCamelCase ( self : List[Any] , a : Tuple ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ): raise ValueError( "There should be 3 special tokens: mask_token, pad_token, and eos_token +" F" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}" ) return [1 if x in all_special_ids else 0 for x in seq] def __UpperCamelCase ( self : Optional[int] , a : Optional[int] , a : Optional[Any] = None , a : Optional[Any] = False ) -> int: """simple docstring""" if already_has_special_tokens: return self._special_token_mask(a ) elif token_ids_a is None: return self._special_token_mask(a ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def __UpperCamelCase ( self : List[str] , a : int , a : Any=None ) -> Union[str, Any]: """simple docstring""" if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def __UpperCamelCase ( self : List[str] , a : Tuple , a : Tuple = None ) -> List[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(a ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return SCREAMING_SNAKE_CASE : int = os.path.join( a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a ): copyfile(self.vocab_file , a ) return (out_vocab_file,)
25
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class A__ ( unittest.TestCase): def __init__( self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=1_8 , __magic_name__=3_0 , __magic_name__=4_0_0 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=[0.48_145_466, 0.4_578_275, 0.40_821_073] , __magic_name__=[0.26_862_954, 0.26_130_258, 0.27_577_711] , __magic_name__=True , ): lowerCamelCase : Union[str, Any] = size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4} lowerCamelCase : str = crop_size if crop_size is not None else {"""height""": 1_8, """width""": 1_8} lowerCamelCase : Optional[int] = parent lowerCamelCase : Union[str, Any] = batch_size lowerCamelCase : str = num_channels lowerCamelCase : Any = image_size lowerCamelCase : Optional[int] = min_resolution lowerCamelCase : Union[str, Any] = max_resolution lowerCamelCase : Union[str, Any] = do_resize lowerCamelCase : int = size lowerCamelCase : int = do_center_crop lowerCamelCase : Union[str, Any] = crop_size lowerCamelCase : Union[str, Any] = do_normalize lowerCamelCase : Dict = image_mean lowerCamelCase : Optional[Any] = image_std lowerCamelCase : Union[str, Any] = do_convert_rgb def UpperCamelCase__ ( self ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def UpperCamelCase__ ( self , __magic_name__=False , __magic_name__=False , __magic_name__=False ): assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: lowerCamelCase : Tuple = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) ) else: lowerCamelCase : Dict = [] for i in range(self.batch_size ): lowerCamelCase , lowerCamelCase : int = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 ) image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension lowerCamelCase : int = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs] if torchify: lowerCamelCase : int = [torch.from_numpy(__magic_name__ ) for x in image_inputs] return image_inputs @require_torch @require_vision class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase): _UpperCAmelCase : Any = ChineseCLIPImageProcessor if is_vision_available() else None def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = ChineseCLIPImageProcessingTester(self , do_center_crop=__magic_name__ ) @property def UpperCamelCase__ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) ) self.assertTrue(hasattr(__magic_name__ , """size""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_center_crop""" ) ) self.assertTrue(hasattr(__magic_name__ , """center_crop""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) ) self.assertTrue(hasattr(__magic_name__ , """image_mean""" ) ) self.assertTrue(hasattr(__magic_name__ , """image_std""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_convert_rgb""" ) ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 2_2_4, """width""": 2_2_4} ) self.assertEqual(image_processor.crop_size , {"""height""": 1_8, """width""": 1_8} ) lowerCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} ) self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): # Initialize image_processing lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , Image.Image ) # Test not batched input lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCamelCase : Optional[Any] = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def UpperCamelCase__ ( self ): # Initialize image_processing lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ , numpify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , np.ndarray ) # Test not batched input lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCamelCase : Tuple = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def UpperCamelCase__ ( self ): # Initialize image_processing lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase : Any = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ , torchify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , torch.Tensor ) # Test not batched input lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCamelCase : str = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) @require_torch @require_vision class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase): _UpperCAmelCase : Tuple = ChineseCLIPImageProcessor if is_vision_available() else None def UpperCamelCase__ ( self ): lowerCamelCase : Union[str, Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__magic_name__ ) lowerCamelCase : Any = 3 @property def UpperCamelCase__ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase__ ( self ): lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) ) self.assertTrue(hasattr(__magic_name__ , """size""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_center_crop""" ) ) self.assertTrue(hasattr(__magic_name__ , """center_crop""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) ) self.assertTrue(hasattr(__magic_name__ , """image_mean""" ) ) self.assertTrue(hasattr(__magic_name__ , """image_std""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_convert_rgb""" ) ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): # Initialize image_processing lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , Image.Image ) # Test not batched input lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCamelCase : Optional[Any] = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
681
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) snake_case_ : List[str] = { '''configuration_blip''': [ '''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BlipConfig''', '''BlipTextConfig''', '''BlipVisionConfig''', ], '''processing_blip''': ['''BlipProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : List[Any] = ['''BlipImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Any = [ '''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BlipModel''', '''BlipPreTrainedModel''', '''BlipForConditionalGeneration''', '''BlipForQuestionAnswering''', '''BlipVisionModel''', '''BlipTextModel''', '''BlipForImageTextRetrieval''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : List[str] = [ '''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFBlipModel''', '''TFBlipPreTrainedModel''', '''TFBlipForConditionalGeneration''', '''TFBlipForQuestionAnswering''', '''TFBlipVisionModel''', '''TFBlipTextModel''', '''TFBlipForImageTextRetrieval''', ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys snake_case_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
138
from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A__ : def __init__( self , __magic_name__ , __magic_name__=3 , __magic_name__=3_2 , __magic_name__=3 , __magic_name__=1_0 , __magic_name__=[1_0, 2_0, 3_0, 4_0] , __magic_name__=[1, 1, 2, 1] , __magic_name__=True , __magic_name__=True , __magic_name__="relu" , __magic_name__=3 , __magic_name__=None , ): lowerCamelCase : Tuple = parent lowerCamelCase : Tuple = batch_size lowerCamelCase : List[Any] = image_size lowerCamelCase : Optional[Any] = num_channels lowerCamelCase : Dict = embeddings_size lowerCamelCase : Optional[int] = hidden_sizes lowerCamelCase : Union[str, Any] = depths lowerCamelCase : Optional[Any] = is_training lowerCamelCase : Union[str, Any] = use_labels lowerCamelCase : Dict = hidden_act lowerCamelCase : Any = num_labels lowerCamelCase : int = scope lowerCamelCase : Optional[Any] = len(__magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase : Tuple = None if self.use_labels: lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) lowerCamelCase : Tuple = self.get_config() return config, pixel_values, labels def UpperCamelCase__ ( self ): return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ): lowerCamelCase : Dict = TFResNetModel(config=__magic_name__ ) lowerCamelCase : Tuple = model(__magic_name__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ): lowerCamelCase : str = self.num_labels lowerCamelCase : Dict = TFResNetForImageClassification(__magic_name__ ) lowerCamelCase : Union[str, Any] = model(__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = self.prepare_config_and_inputs() lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = config_and_inputs lowerCamelCase : List[str] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase): _UpperCAmelCase : Any = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () _UpperCAmelCase : List[str] = ( {"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification} if is_tf_available() else {} ) _UpperCAmelCase : Optional[Any] = False _UpperCAmelCase : Optional[Any] = False _UpperCAmelCase : Dict = False _UpperCAmelCase : List[Any] = False _UpperCAmelCase : Any = False def UpperCamelCase__ ( self ): lowerCamelCase : int = TFResNetModelTester(self ) lowerCamelCase : str = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ ) def UpperCamelCase__ ( self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase__ ( self ): return @unittest.skip(reason="""ResNet does not use inputs_embeds""" ) def UpperCamelCase__ ( self ): pass @unittest.skip(reason="""ResNet does not support input and output embeddings""" ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase : List[str] = model_class(__magic_name__ ) lowerCamelCase : str = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase : Tuple = [*signature.parameters.keys()] lowerCamelCase : List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCamelCase__ ( self ): def check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ): lowerCamelCase : Any = model_class(__magic_name__ ) lowerCamelCase : List[Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) lowerCamelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase : Union[str, Any] = self.model_tester.num_stages self.assertEqual(len(__magic_name__ ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : Tuple = ["""basic""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: lowerCamelCase : Union[str, Any] = layer_type lowerCamelCase : str = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase : int = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__magic_name__ ) @slow def UpperCamelCase__ ( self ): for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase : Any = TFResNetModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def _a ( ): lowerCamelCase : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class A__ ( unittest.TestCase): @cached_property def UpperCamelCase__ ( self ): return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) lowerCamelCase : List[str] = self.default_image_processor lowerCamelCase : str = prepare_img() lowerCamelCase : Tuple = image_processor(images=__magic_name__ , return_tensors="""tf""" ) # forward pass lowerCamelCase : Tuple = model(**__magic_name__ ) # verify the logits lowerCamelCase : Optional[Any] = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , __magic_name__ ) lowerCamelCase : Optional[Any] = tf.constant([-11.1_069, -9.7_877, -8.3_777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __magic_name__ , atol=1e-4 ) )
681
0
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class lowerCAmelCase__ : '''simple docstring''' _SCREAMING_SNAKE_CASE : Optional[int] = None _SCREAMING_SNAKE_CASE : Optional[jnp.ndarray] = None _SCREAMING_SNAKE_CASE : Optional[jnp.ndarray] = None # sigma(t_i) @classmethod def _lowerCAmelCase ( cls : Optional[int] ) -> List[str]: """simple docstring""" return cls() @dataclass class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' _SCREAMING_SNAKE_CASE : jnp.ndarray _SCREAMING_SNAKE_CASE : jnp.ndarray _SCREAMING_SNAKE_CASE : KarrasVeSchedulerState class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): '''simple docstring''' @property def _lowerCAmelCase ( self : int ) -> int: """simple docstring""" return True @register_to_config def __init__( self : Dict , _SCREAMING_SNAKE_CASE : int = 0.0_2 , _SCREAMING_SNAKE_CASE : Optional[int] = 100 , _SCREAMING_SNAKE_CASE : str = 1.0_0_7 , _SCREAMING_SNAKE_CASE : Tuple = 80 , _SCREAMING_SNAKE_CASE : Dict = 0.0_5 , _SCREAMING_SNAKE_CASE : Union[str, Any] = 50 , ) -> Optional[Any]: """simple docstring""" pass def _lowerCAmelCase ( self : str ) -> int: """simple docstring""" return KarrasVeSchedulerState.create() def _lowerCAmelCase ( self : Dict , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[int] = () ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = jnp.arange(0 , _SCREAMING_SNAKE_CASE )[::-1].copy() SCREAMING_SNAKE_CASE : int = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=_SCREAMING_SNAKE_CASE , schedule=jnp.array(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa ) , timesteps=_SCREAMING_SNAKE_CASE , ) def _lowerCAmelCase ( self : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int , ) -> Union[str, Any]: """simple docstring""" if self.config.s_min <= sigma <= self.config.s_max: SCREAMING_SNAKE_CASE : Dict = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 ) else: SCREAMING_SNAKE_CASE : Dict = 0 # sample eps ~ N(0, S_noise^2 * I) SCREAMING_SNAKE_CASE : List[Any] = random.split(_SCREAMING_SNAKE_CASE , num=1 ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.config.s_noise * random.normal(key=_SCREAMING_SNAKE_CASE , shape=sample.shape ) SCREAMING_SNAKE_CASE : List[Any] = sigma + gamma * sigma SCREAMING_SNAKE_CASE : str = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def _lowerCAmelCase ( self : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any] = True , ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = sample_hat + sigma_hat * model_output SCREAMING_SNAKE_CASE : Dict = (sample_hat - pred_original_sample) / sigma_hat SCREAMING_SNAKE_CASE : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=_SCREAMING_SNAKE_CASE , derivative=_SCREAMING_SNAKE_CASE , state=_SCREAMING_SNAKE_CASE ) def _lowerCAmelCase ( self : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str = True , ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : str = sample_prev + sigma_prev * model_output SCREAMING_SNAKE_CASE : str = (sample_prev - pred_original_sample) / sigma_prev SCREAMING_SNAKE_CASE : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=_SCREAMING_SNAKE_CASE , derivative=_SCREAMING_SNAKE_CASE , state=_SCREAMING_SNAKE_CASE ) def _lowerCAmelCase ( self : Any , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]: """simple docstring""" raise NotImplementedError()
265
import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ): # Initialise PyTorch model lowerCamelCase : str = MobileBertConfig.from_json_file(lowerCamelCase ) print(F'''Building PyTorch model from configuration: {config}''' ) lowerCamelCase : Tuple = MobileBertForPreTraining(lowerCamelCase ) # Load weights from tf checkpoint lowerCamelCase : Tuple = load_tf_weights_in_mobilebert(lowerCamelCase, lowerCamelCase, lowerCamelCase ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict(), lowerCamelCase ) if __name__ == "__main__": _lowerCamelCase =argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--mobilebert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained MobileBERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) _lowerCamelCase =parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
681
0
"""simple docstring""" import unittest from transformers import BertGenerationTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _UpperCamelCase = "▁" _UpperCamelCase = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece class __UpperCAmelCase (__SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' _UpperCamelCase : str = BertGenerationTokenizer _UpperCamelCase : Tuple = False _UpperCamelCase : List[Any] = True def lowerCamelCase ( self ): '''simple docstring''' super().setUp() A__ : int = BertGenerationTokenizer(snake_case_ , keep_accents=snake_case_ ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase ( self ): '''simple docstring''' A__ : List[str] = """<s>""" A__ : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ ) def lowerCamelCase ( self ): '''simple docstring''' A__ : List[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<unk>""" ) self.assertEqual(vocab_keys[1] , """<s>""" ) self.assertEqual(vocab_keys[-1] , """<pad>""" ) self.assertEqual(len(snake_case_ ) , 1_002 ) def lowerCamelCase ( self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_000 ) def lowerCamelCase ( self ): '''simple docstring''' A__ : Tuple = BertGenerationTokenizer(snake_case_ , keep_accents=snake_case_ ) A__ : Optional[Any] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(snake_case_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(snake_case_ ) , [285, 46, 10, 170, 382] , ) A__ : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( snake_case_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) A__ : Optional[Any] = tokenizer.convert_tokens_to_ids(snake_case_ ) self.assertListEqual( snake_case_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) A__ : int = tokenizer.convert_ids_to_tokens(snake_case_ ) self.assertListEqual( snake_case_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) @cached_property def lowerCamelCase ( self ): '''simple docstring''' return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" ) @slow def lowerCamelCase ( self ): '''simple docstring''' A__ : List[Any] = """Hello World!""" A__ : Any = [18_536, 2_260, 101] self.assertListEqual(snake_case_ , self.big_tokenizer.encode(snake_case_ ) ) @slow def lowerCamelCase ( self ): '''simple docstring''' A__ : str = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth""" ) A__ : str = [ 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, ] self.assertListEqual(snake_case_ , self.big_tokenizer.encode(snake_case_ ) ) @require_torch @slow def lowerCamelCase ( self ): '''simple docstring''' import torch from transformers import BertGenerationConfig, BertGenerationEncoder # Build sequence A__ : Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:10] A__ : Dict = """ """.join(snake_case_ ) A__ : Any = self.big_tokenizer.encode_plus(snake_case_ , return_tensors="""pt""" , return_token_type_ids=snake_case_ ) A__ : List[str] = self.big_tokenizer.batch_encode_plus( [sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=snake_case_ ) A__ : Tuple = BertGenerationConfig() A__ : Optional[int] = BertGenerationEncoder(snake_case_ ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**snake_case_ ) model(**snake_case_ ) @slow def lowerCamelCase ( self ): '''simple docstring''' A__ : Any = {"""input_ids""": [[39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114], [448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case_ , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
363
import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def _a ( lowerCamelCase ): # vision encoder if "img_encoder.pos_embed" in name: lowerCamelCase : Tuple = name.replace("""img_encoder.pos_embed""", """vision_model.embeddings.position_embeddings""" ) if "img_encoder.patch_embed.proj" in name: lowerCamelCase : Union[str, Any] = name.replace("""img_encoder.patch_embed.proj""", """vision_model.embeddings.patch_embeddings.projection""" ) if "img_encoder.patch_embed.norm" in name: lowerCamelCase : Optional[int] = name.replace("""img_encoder.patch_embed.norm""", """vision_model.embeddings.layernorm""" ) if "img_encoder.layers" in name: lowerCamelCase : List[str] = name.replace("""img_encoder.layers""", """vision_model.encoder.stages""" ) if "blocks" in name and "res" not in name: lowerCamelCase : List[Any] = name.replace("""blocks""", """layers""" ) if "attn" in name and "pre_assign" not in name: lowerCamelCase : Optional[int] = name.replace("""attn""", """self_attn""" ) if "proj" in name and "self_attn" in name and "text" not in name: lowerCamelCase : Optional[int] = name.replace("""proj""", """out_proj""" ) if "pre_assign_attn.attn.proj" in name: lowerCamelCase : Any = name.replace("""pre_assign_attn.attn.proj""", """pre_assign_attn.attn.out_proj""" ) if "norm1" in name: lowerCamelCase : Optional[Any] = name.replace("""norm1""", """layer_norm1""" ) if "norm2" in name and "pre_assign" not in name: lowerCamelCase : Union[str, Any] = name.replace("""norm2""", """layer_norm2""" ) if "img_encoder.norm" in name: lowerCamelCase : Optional[int] = name.replace("""img_encoder.norm""", """vision_model.layernorm""" ) # text encoder if "text_encoder.token_embedding" in name: lowerCamelCase : int = name.replace("""text_encoder.token_embedding""", """text_model.embeddings.token_embedding""" ) if "text_encoder.positional_embedding" in name: lowerCamelCase : Optional[Any] = name.replace("""text_encoder.positional_embedding""", """text_model.embeddings.position_embedding.weight""" ) if "text_encoder.transformer.resblocks." in name: lowerCamelCase : Optional[Any] = name.replace("""text_encoder.transformer.resblocks.""", """text_model.encoder.layers.""" ) if "ln_1" in name: lowerCamelCase : Optional[Any] = name.replace("""ln_1""", """layer_norm1""" ) if "ln_2" in name: lowerCamelCase : str = name.replace("""ln_2""", """layer_norm2""" ) if "c_fc" in name: lowerCamelCase : Any = name.replace("""c_fc""", """fc1""" ) if "c_proj" in name: lowerCamelCase : Tuple = name.replace("""c_proj""", """fc2""" ) if "text_encoder" in name: lowerCamelCase : List[str] = name.replace("""text_encoder""", """text_model""" ) if "ln_final" in name: lowerCamelCase : Tuple = name.replace("""ln_final""", """final_layer_norm""" ) # projection layers if "img_projector.linear_hidden." in name: lowerCamelCase : Optional[int] = name.replace("""img_projector.linear_hidden.""", """visual_projection.""" ) if "img_projector.linear_out." in name: lowerCamelCase : Tuple = name.replace("""img_projector.linear_out.""", """visual_projection.3.""" ) if "text_projector.linear_hidden" in name: lowerCamelCase : Tuple = name.replace("""text_projector.linear_hidden""", """text_projection""" ) if "text_projector.linear_out" in name: lowerCamelCase : Tuple = name.replace("""text_projector.linear_out""", """text_projection.3""" ) return name def _a ( lowerCamelCase, lowerCamelCase ): for key in orig_state_dict.copy().keys(): lowerCamelCase : Tuple = orig_state_dict.pop(lowerCamelCase ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors lowerCamelCase : Any = key.split(""".""" ) lowerCamelCase , lowerCamelCase : Optional[Any] = int(key_split[2] ), int(key_split[4] ) lowerCamelCase : List[Any] = config.vision_config.hidden_size if "weight" in key: lowerCamelCase : int = val[:dim, :] lowerCamelCase : List[str] = val[dim : dim * 2, :] lowerCamelCase : Dict = val[-dim:, :] else: lowerCamelCase : List[Any] = val[:dim] lowerCamelCase : List[Any] = val[dim : dim * 2] lowerCamelCase : Tuple = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors lowerCamelCase : str = key.split(""".""" ) lowerCamelCase : Optional[int] = int(key_split[3] ) lowerCamelCase : List[str] = config.text_config.hidden_size if "weight" in key: lowerCamelCase : Optional[int] = val[:dim, :] lowerCamelCase : Any = val[ dim : dim * 2, : ] lowerCamelCase : Optional[Any] = val[-dim:, :] else: lowerCamelCase : Union[str, Any] = val[:dim] lowerCamelCase : Optional[int] = val[dim : dim * 2] lowerCamelCase : Union[str, Any] = val[-dim:] else: lowerCamelCase : List[Any] = rename_key(lowerCamelCase ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): lowerCamelCase : Any = val.squeeze_() else: lowerCamelCase : Union[str, Any] = val return orig_state_dict def _a ( ): lowerCamelCase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase : List[str] = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw ) return im @torch.no_grad() def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase="groupvit-gcc-yfcc", lowerCamelCase=False ): lowerCamelCase : int = GroupViTConfig() lowerCamelCase : Dict = GroupViTModel(lowerCamelCase ).eval() lowerCamelCase : Optional[int] = torch.load(lowerCamelCase, map_location="""cpu""" )["""model"""] lowerCamelCase : Tuple = convert_state_dict(lowerCamelCase, lowerCamelCase ) lowerCamelCase , lowerCamelCase : Tuple = model.load_state_dict(lowerCamelCase, strict=lowerCamelCase ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCamelCase ) == 0) # verify result lowerCamelCase : int = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" ) lowerCamelCase : int = prepare_img() lowerCamelCase : int = processor(text=["""a photo of a cat""", """a photo of a dog"""], images=lowerCamelCase, padding=lowerCamelCase, return_tensors="""pt""" ) with torch.no_grad(): lowerCamelCase : int = model(**lowerCamelCase ) if model_name == "groupvit-gcc-yfcc": lowerCamelCase : Any = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] ) elif model_name == "groupvit-gcc-redcaps": lowerCamelCase : Any = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] ) else: raise ValueError(F'''Model name {model_name} not supported.''' ) assert torch.allclose(outputs.logits_per_image, lowerCamelCase, atol=1e-3 ) processor.save_pretrained(lowerCamelCase ) model.save_pretrained(lowerCamelCase ) print("""Successfully saved processor and model to""", lowerCamelCase ) if push_to_hub: print("""Pushing to the hub...""" ) processor.push_to_hub(lowerCamelCase, organization="""nielsr""" ) model.push_to_hub(lowerCamelCase, organization="""nielsr""" ) if __name__ == "__main__": _lowerCamelCase =argparse.ArgumentParser() parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model.""" ) parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""") parser.add_argument( """--model_name""", default="""groupvit-gccy-fcc""", type=str, help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""", ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""", ) _lowerCamelCase =parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
681
0
from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , ): """simple docstring""" a_ : int = coefficient_matrix.shape a_ : List[str] = constant_matrix.shape if rowsa != colsa: a_ : Optional[int] = F"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}""" raise ValueError(SCREAMING_SNAKE_CASE_ ) if colsa != 1: a_ : List[str] = F"""Constant matrix must be nx1 but received {rowsa}x{colsa}""" raise ValueError(SCREAMING_SNAKE_CASE_ ) if rowsa != rowsa: a_ : int = ( """Coefficient and constant matrices dimensions must be nxn and nx1 but """ F"""received {rowsa}x{colsa} and {rowsa}x{colsa}""" ) raise ValueError(SCREAMING_SNAKE_CASE_ ) if len(SCREAMING_SNAKE_CASE_ ) != rowsa: a_ : Optional[Any] = ( """Number of initial values must be equal to number of rows in coefficient """ F"""matrix but received {len(SCREAMING_SNAKE_CASE_ )} and {rowsa}""" ) raise ValueError(SCREAMING_SNAKE_CASE_ ) if iterations <= 0: raise ValueError("""Iterations must be at least 1""" ) a_ : NDArray[floataa] = np.concatenate( (coefficient_matrix, constant_matrix) , axis=1 ) a_ : Optional[int] = table.shape strictly_diagonally_dominant(SCREAMING_SNAKE_CASE_ ) # Iterates the whole matrix for given number of times for _ in range(SCREAMING_SNAKE_CASE_ ): a_ : str = [] for row in range(SCREAMING_SNAKE_CASE_ ): a_ : Dict = 0 for col in range(SCREAMING_SNAKE_CASE_ ): if col == row: a_ : Any = table[row][col] elif col == cols - 1: a_ : Any = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] a_ : Optional[int] = (temp + val) / denom new_val.append(SCREAMING_SNAKE_CASE_ ) a_ : Optional[int] = new_val return [float(SCREAMING_SNAKE_CASE_ ) for i in new_val] def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ): """simple docstring""" a_ : int = table.shape a_ : List[str] = True for i in range(0 , SCREAMING_SNAKE_CASE_ ): a_ : Dict = 0 for j in range(0 , cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
419
from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class A__ : # setable values _UpperCAmelCase : Optional[int] = None _UpperCAmelCase : Optional[jnp.ndarray] = None _UpperCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i) @classmethod def UpperCamelCase__ ( cls ): return cls() @dataclass class A__ ( __SCREAMING_SNAKE_CASE): _UpperCAmelCase : jnp.ndarray _UpperCAmelCase : jnp.ndarray _UpperCAmelCase : KarrasVeSchedulerState class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): @property def UpperCamelCase__ ( self ): return True @register_to_config def __init__( self , __magic_name__ = 0.02 , __magic_name__ = 1_0_0 , __magic_name__ = 1.007 , __magic_name__ = 8_0 , __magic_name__ = 0.05 , __magic_name__ = 5_0 , ): pass def UpperCamelCase__ ( self ): return KarrasVeSchedulerState.create() def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = () ): lowerCamelCase : Dict = jnp.arange(0 , __magic_name__ )[::-1].copy() lowerCamelCase : int = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=__magic_name__ , schedule=jnp.array(__magic_name__ , dtype=jnp.floataa ) , timesteps=__magic_name__ , ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ): if self.config.s_min <= sigma <= self.config.s_max: lowerCamelCase : Dict = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 ) else: lowerCamelCase : Dict = 0 # sample eps ~ N(0, S_noise^2 * I) lowerCamelCase : List[Any] = random.split(__magic_name__ , num=1 ) lowerCamelCase : Union[str, Any] = self.config.s_noise * random.normal(key=__magic_name__ , shape=sample.shape ) lowerCamelCase : List[Any] = sigma + gamma * sigma lowerCamelCase : str = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = True , ): lowerCamelCase : Optional[Any] = sample_hat + sigma_hat * model_output lowerCamelCase : Dict = (sample_hat - pred_original_sample) / sigma_hat lowerCamelCase : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=__magic_name__ , derivative=__magic_name__ , state=__magic_name__ ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = True , ): lowerCamelCase : str = sample_prev + sigma_prev * model_output lowerCamelCase : str = (sample_prev - pred_original_sample) / sigma_prev lowerCamelCase : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=__magic_name__ , derivative=__magic_name__ , state=__magic_name__ ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): raise NotImplementedError()
681
0
'''simple docstring''' snake_case_ = """ # Transformers 설치 방법 ! pip install transformers datasets # 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요. # ! pip install git+https://github.com/huggingface/transformers.git """ snake_case_ = [{"""type""": """code""", """content""": INSTALL_CONTENT}] snake_case_ = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
507
from itertools import product from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros def _a ( lowerCamelCase, lowerCamelCase ): lowerCamelCase : List[str] = k_size // 2 lowerCamelCase , lowerCamelCase : Optional[int] = mgrid[0 - center : k_size - center, 0 - center : k_size - center] lowerCamelCase : Optional[Any] = 1 / (2 * pi * sigma) * exp(-(square(lowerCamelCase ) + square(lowerCamelCase )) / (2 * square(lowerCamelCase )) ) return g def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ): lowerCamelCase , lowerCamelCase : Union[str, Any] = image.shape[0], image.shape[1] # dst image height and width lowerCamelCase : Dict = height - k_size + 1 lowerCamelCase : str = width - k_size + 1 # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows lowerCamelCase : Tuple = zeros((dst_height * dst_width, k_size * k_size) ) lowerCamelCase : List[Any] = 0 for i, j in product(range(lowerCamelCase ), range(lowerCamelCase ) ): lowerCamelCase : Dict = ravel(image[i : i + k_size, j : j + k_size] ) lowerCamelCase : Union[str, Any] = window row += 1 # turn the kernel into shape(k*k, 1) lowerCamelCase : Dict = gen_gaussian_kernel(lowerCamelCase, lowerCamelCase ) lowerCamelCase : str = ravel(lowerCamelCase ) # reshape and get the dst image lowerCamelCase : List[str] = dot(lowerCamelCase, lowerCamelCase ).reshape(lowerCamelCase, lowerCamelCase ).astype(lowerCamelCase ) return dst if __name__ == "__main__": # read original image _lowerCamelCase =imread(R"""../image_data/lena.jpg""") # turn image in gray scale value _lowerCamelCase =cvtColor(img, COLOR_BGR2GRAY) # get values with two different mask size _lowerCamelCase =gaussian_filter(gray, 3, sigma=1) _lowerCamelCase =gaussian_filter(gray, 5, sigma=0.8) # show result images imshow("""gaussian filter with 3x3 mask""", gaussianaxa) imshow("""gaussian filter with 5x5 mask""", gaussianaxa) waitKey()
681
0
"""simple docstring""" import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def _lowerCAmelCase(a : int , a : List[str] , a : List[str]=None ) -> List[Any]: # set parameter of one layer assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match""" _SCREAMING_SNAKE_CASE =nn.Parameter(a ) if bias is not None: assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match""" _SCREAMING_SNAKE_CASE =nn.Parameter(a ) def _lowerCAmelCase(a : int , a : Any , a : Union[str, Any] ) -> Tuple: # set torch weights for 1-to-1 comparison _SCREAMING_SNAKE_CASE =np.asarray(weights[0] ) _SCREAMING_SNAKE_CASE =np.asarray(weights[1] ) _SCREAMING_SNAKE_CASE =np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(a ).transpose(1 , 2 ).contiguous().view(-1 , a ) , ) set_param( torch_layer.self_attention.value , torch.tensor(a ).transpose(1 , 2 ).contiguous().view(-1 , a ) , ) set_param( torch_layer.output.dense , torch.tensor(a ).view(-1 , a ).contiguous().transpose(0 , 1 ) , ) def _lowerCAmelCase(a : Union[str, Any] , a : Dict , a : Dict ) -> int: # set torch weights for 1-to-1 comparison _SCREAMING_SNAKE_CASE =np.asarray(weights[0] ) _SCREAMING_SNAKE_CASE =np.asarray(weights[1] ) _SCREAMING_SNAKE_CASE =np.asarray(weights[2] ) _SCREAMING_SNAKE_CASE =np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(a ).transpose(1 , 2 ).contiguous().view(-1 , a ) , ) set_param( torch_layer.self_attention.key , torch.tensor(a ).transpose(1 , 2 ).contiguous().view(-1 , a ) , ) set_param( torch_layer.self_attention.value , torch.tensor(a ).transpose(1 , 2 ).contiguous().view(-1 , a ) , ) set_param( torch_layer.output.dense , torch.tensor(a ).view(-1 , a ).contiguous().transpose(0 , 1 ) , ) def _lowerCAmelCase(a : List[Any] , a : str , a : Any ) -> Optional[int]: # layernorm 1 _SCREAMING_SNAKE_CASE =weights[0][0][0] _SCREAMING_SNAKE_CASE =np.asarray(layer_norm_a[0] ) _SCREAMING_SNAKE_CASE =np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(a ) , torch.tensor(a ) , ) # lsh weights + output _SCREAMING_SNAKE_CASE =weights[0][1] if len(a ) < 4: set_layer_weights_in_torch_lsh(a , torch_block.attention , a ) else: set_layer_weights_in_torch_local(a , torch_block.attention , a ) # intermediate weighs _SCREAMING_SNAKE_CASE =weights[2][0][1][2] # Chunked Feed Forward if len(a ) == 4: _SCREAMING_SNAKE_CASE =intermediate_weights[2] # layernorm 2 _SCREAMING_SNAKE_CASE =np.asarray(intermediate_weights[0][0] ) _SCREAMING_SNAKE_CASE =np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(a ) , torch.tensor(a ) , ) # intermediate dense _SCREAMING_SNAKE_CASE =np.asarray(intermediate_weights[1][0] ) _SCREAMING_SNAKE_CASE =np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(a ).transpose(0 , 1 ).contiguous() , torch.tensor(a ) , ) # intermediate out _SCREAMING_SNAKE_CASE =np.asarray(intermediate_weights[4][0] ) _SCREAMING_SNAKE_CASE =np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(a ).transpose(0 , 1 ).contiguous() , torch.tensor(a ) , ) def _lowerCAmelCase(a : Optional[Any] , a : str , a : List[Any] ) -> Optional[int]: # reformer model _SCREAMING_SNAKE_CASE =torch_model.reformer # word embeds _SCREAMING_SNAKE_CASE =np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(a ) , ) if isinstance(weights[3] , a ): _SCREAMING_SNAKE_CASE =torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): _SCREAMING_SNAKE_CASE =np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), f"""{position_embeddings[emb_idx]} emb does not match""" _SCREAMING_SNAKE_CASE =nn.Parameter(torch.tensor(a ) ) _SCREAMING_SNAKE_CASE =weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( a ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): _SCREAMING_SNAKE_CASE =trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(a , a , a ) # output layer norm _SCREAMING_SNAKE_CASE =np.asarray(weights[7][0] ) _SCREAMING_SNAKE_CASE =np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(a ) , torch.tensor(a ) , ) # output embeddings _SCREAMING_SNAKE_CASE =np.asarray(weights[9][0] ) _SCREAMING_SNAKE_CASE =np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(a ).transpose(0 , 1 ).contiguous() , torch.tensor(a ) , ) def _lowerCAmelCase(a : int , a : Tuple , a : Union[str, Any] ) -> Any: # Initialise PyTorch model _SCREAMING_SNAKE_CASE =ReformerConfig.from_json_file(a ) print(f"""Building PyTorch model from configuration: {config}""" ) _SCREAMING_SNAKE_CASE =ReformerModelWithLMHead(a ) with open(a , '''rb''' ) as f: _SCREAMING_SNAKE_CASE =pickle.load(a )["""weights"""] set_model_weights_in_torch(a , a , config.hidden_size ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , a ) if __name__ == "__main__": UpperCAmelCase_ : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained Reformer model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) UpperCAmelCase_ : Tuple = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
255
import pytest _lowerCamelCase ="""__dummy_dataset1__""" _lowerCamelCase =""" import json import os import datasets REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\" URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"} class __DummyDataset1__(datasets.GeneratorBasedBuilder): def _info(self): features = datasets.Features( { \"tokens\": datasets.Sequence(datasets.Value(\"string\")), \"ner_tags\": datasets.Sequence( datasets.features.ClassLabel( names=[ \"O\", \"B-PER\", \"I-PER\", \"B-ORG\", \"I-ORG\", \"B-LOC\", \"I-LOC\", ] ) ), \"langs\": datasets.Sequence(datasets.Value(\"string\")), \"spans\": datasets.Sequence(datasets.Value(\"string\")), } ) return datasets.DatasetInfo(features=features) def _split_generators(self, dl_manager): dl_path = dl_manager.download(URLS) return [ datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}), datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}), ] def _generate_examples(self, filepath): with open(filepath, \"r\", encoding=\"utf-8\") as f: for i, line in enumerate(f): yield i, json.loads(line) """ @pytest.fixture def _a ( ): return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def _a ( ): return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ): lowerCamelCase : Union[str, Any] = dataset_loading_script_name lowerCamelCase : Dict = tmp_path / """datasets""" / script_name script_dir.mkdir(parents=lowerCamelCase ) lowerCamelCase : str = script_dir / F'''{script_name}.py''' with open(lowerCamelCase, """w""" ) as f: f.write(lowerCamelCase ) return str(lowerCamelCase )
681
0
"""simple docstring""" import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetrImageProcessor class a ( unittest.TestCase ): def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict=7 , __SCREAMING_SNAKE_CASE : Any=3 , __SCREAMING_SNAKE_CASE : Dict=30 , __SCREAMING_SNAKE_CASE : Optional[Any]=400 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Optional[int]=1 / 255 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Optional[int]=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Union[str, Any]=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : List[Any]=True , ) -> str: # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p lowerCamelCase_ = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333} lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = num_channels lowerCamelCase_ = min_resolution lowerCamelCase_ = max_resolution lowerCamelCase_ = do_resize lowerCamelCase_ = size lowerCamelCase_ = do_rescale lowerCamelCase_ = rescale_factor lowerCamelCase_ = do_normalize lowerCamelCase_ = image_mean lowerCamelCase_ = image_std lowerCamelCase_ = do_pad def UpperCamelCase ( self : Any ) -> Optional[int]: return { "do_resize": self.do_resize, "size": self.size, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_pad": self.do_pad, } def UpperCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any]=False ) -> Optional[Any]: if not batched: lowerCamelCase_ = image_inputs[0] if isinstance(__SCREAMING_SNAKE_CASE , Image.Image ): lowerCamelCase_ = image.size else: lowerCamelCase_ = image.shape[1], image.shape[2] if w < h: lowerCamelCase_ = int(self.size['shortest_edge'] * h / w ) lowerCamelCase_ = self.size["""shortest_edge"""] elif w > h: lowerCamelCase_ = self.size["""shortest_edge"""] lowerCamelCase_ = int(self.size['shortest_edge'] * w / h ) else: lowerCamelCase_ = self.size["""shortest_edge"""] lowerCamelCase_ = self.size["""shortest_edge"""] else: lowerCamelCase_ = [] for image in image_inputs: lowerCamelCase_ = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowerCamelCase_ = max(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : item[0] )[0] lowerCamelCase_ = max(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class a ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): SCREAMING_SNAKE_CASE : Union[str, Any] = DetrImageProcessor if is_vision_available() else None def UpperCamelCase ( self : Tuple ) -> Any: lowerCamelCase_ = DetrImageProcessingTester(self ) @property def UpperCamelCase ( self : str ) -> str: return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase ( self : Dict ) -> Optional[int]: lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'image_mean' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'image_std' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'do_normalize' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'do_rescale' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'rescale_factor' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'do_resize' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'size' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'do_pad' ) ) def UpperCamelCase ( self : str ) -> List[str]: lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} ) self.assertEqual(image_processor.do_pad , __SCREAMING_SNAKE_CASE ) lowerCamelCase_ = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__SCREAMING_SNAKE_CASE ) self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} ) self.assertEqual(image_processor.do_pad , __SCREAMING_SNAKE_CASE ) def UpperCamelCase ( self : Optional[Any] ) -> str: pass def UpperCamelCase ( self : int ) -> Tuple: # Initialize image_processing lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values lowerCamelCase_ = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCamelCase_ = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE ) lowerCamelCase_ = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCamelCase ( self : List[str] ) -> int: # Initialize image_processing lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values lowerCamelCase_ = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCamelCase_ = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values lowerCamelCase_ = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]: # Initialize image_processing lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values lowerCamelCase_ = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCamelCase_ = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values lowerCamelCase_ = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def UpperCamelCase ( self : Optional[int] ) -> List[str]: # prepare image and target lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f: lowerCamelCase_ = json.loads(f.read() ) lowerCamelCase_ = {"""image_id""": 39769, """annotations""": target} # encode them lowerCamelCase_ = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' ) lowerCamelCase_ = image_processing(images=__SCREAMING_SNAKE_CASE , annotations=__SCREAMING_SNAKE_CASE , return_tensors='pt' ) # verify pixel values lowerCamelCase_ = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['pixel_values'].shape , __SCREAMING_SNAKE_CASE ) lowerCamelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) ) # verify area lowerCamelCase_ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __SCREAMING_SNAKE_CASE ) ) # verify boxes lowerCamelCase_ = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , __SCREAMING_SNAKE_CASE ) lowerCamelCase_ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __SCREAMING_SNAKE_CASE , atol=1e-3 ) ) # verify image_id lowerCamelCase_ = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __SCREAMING_SNAKE_CASE ) ) # verify is_crowd lowerCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __SCREAMING_SNAKE_CASE ) ) # verify class_labels lowerCamelCase_ = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __SCREAMING_SNAKE_CASE ) ) # verify orig_size lowerCamelCase_ = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __SCREAMING_SNAKE_CASE ) ) # verify size lowerCamelCase_ = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __SCREAMING_SNAKE_CASE ) ) @slow def UpperCamelCase ( self : Tuple ) -> str: # prepare image, target and masks_path lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f: lowerCamelCase_ = json.loads(f.read() ) lowerCamelCase_ = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target} lowerCamelCase_ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' ) # encode them lowerCamelCase_ = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' ) lowerCamelCase_ = image_processing(images=__SCREAMING_SNAKE_CASE , annotations=__SCREAMING_SNAKE_CASE , masks_path=__SCREAMING_SNAKE_CASE , return_tensors='pt' ) # verify pixel values lowerCamelCase_ = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['pixel_values'].shape , __SCREAMING_SNAKE_CASE ) lowerCamelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) ) # verify area lowerCamelCase_ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __SCREAMING_SNAKE_CASE ) ) # verify boxes lowerCamelCase_ = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , __SCREAMING_SNAKE_CASE ) lowerCamelCase_ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __SCREAMING_SNAKE_CASE , atol=1e-3 ) ) # verify image_id lowerCamelCase_ = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __SCREAMING_SNAKE_CASE ) ) # verify is_crowd lowerCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __SCREAMING_SNAKE_CASE ) ) # verify class_labels lowerCamelCase_ = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __SCREAMING_SNAKE_CASE ) ) # verify masks lowerCamelCase_ = 822873 self.assertEqual(encoding['labels'][0]['masks'].sum().item() , __SCREAMING_SNAKE_CASE ) # verify orig_size lowerCamelCase_ = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __SCREAMING_SNAKE_CASE ) ) # verify size lowerCamelCase_ = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __SCREAMING_SNAKE_CASE ) )
549
import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""): _lowerCamelCase ={ """linear""": PIL.Image.Resampling.BILINEAR, """bilinear""": PIL.Image.Resampling.BILINEAR, """bicubic""": PIL.Image.Resampling.BICUBIC, """lanczos""": PIL.Image.Resampling.LANCZOS, """nearest""": PIL.Image.Resampling.NEAREST, } else: _lowerCamelCase ={ """linear""": PIL.Image.LINEAR, """bilinear""": PIL.Image.BILINEAR, """bicubic""": PIL.Image.BICUBIC, """lanczos""": PIL.Image.LANCZOS, """nearest""": PIL.Image.NEAREST, } def _a ( lowerCamelCase ): lowerCamelCase : Optional[Any] = (images / 2 + 0.5).clamp(0, 1 ) lowerCamelCase : Optional[Any] = images.cpu().permute(0, 2, 3, 1 ).float().numpy() lowerCamelCase : Any = numpy_to_pil(lowerCamelCase ) return images def _a ( lowerCamelCase ): if images.ndim == 3: lowerCamelCase : Optional[Any] = images[None, ...] lowerCamelCase : List[Any] = (images * 255).round().astype("""uint8""" ) if images.shape[-1] == 1: # special case for grayscale (single channel) images lowerCamelCase : Optional[int] = [Image.fromarray(image.squeeze(), mode="""L""" ) for image in images] else: lowerCamelCase : int = [Image.fromarray(lowerCamelCase ) for image in images] return pil_images
681
0
"""simple docstring""" import warnings from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401 warnings.warn( 'The `inpainting.py` script is outdated. Please use directly `from diffusers import' ' StableDiffusionInpaintPipeline` instead.' )
281
from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class A__ ( nn.Module): def __init__( self , __magic_name__ = 1_6 , __magic_name__ = 8_8 , __magic_name__ = None , __magic_name__ = 1 , __magic_name__ = 0.0 , __magic_name__ = 3_2 , __magic_name__ = None , __magic_name__ = False , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "geglu" , __magic_name__ = None , ): super().__init__() lowerCamelCase : Any = nn.ModuleList( [ TransformeraDModel( num_attention_heads=__magic_name__ , attention_head_dim=__magic_name__ , in_channels=__magic_name__ , num_layers=__magic_name__ , dropout=__magic_name__ , norm_num_groups=__magic_name__ , cross_attention_dim=__magic_name__ , attention_bias=__magic_name__ , sample_size=__magic_name__ , num_vector_embeds=__magic_name__ , activation_fn=__magic_name__ , num_embeds_ada_norm=__magic_name__ , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference lowerCamelCase : Any = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` lowerCamelCase : List[Any] = [7_7, 2_5_7] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` lowerCamelCase : Optional[int] = [1, 0] def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__ = True , ): lowerCamelCase : List[Any] = hidden_states lowerCamelCase : Dict = [] lowerCamelCase : List[Any] = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens lowerCamelCase : Dict = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] lowerCamelCase : Optional[int] = self.transformer_index_for_condition[i] lowerCamelCase : List[Any] = self.transformers[transformer_index]( __magic_name__ , encoder_hidden_states=__magic_name__ , timestep=__magic_name__ , cross_attention_kwargs=__magic_name__ , return_dict=__magic_name__ , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] lowerCamelCase : Any = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) lowerCamelCase : Dict = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=__magic_name__ )
681
0