code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class A_ : """simple docstring""" a__ = XGLMConfig a__ = {} a__ = '''gelu''' def __init__( self :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Tuple=14 , lowerCAmelCase__ :Dict=7 , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :int=99 , lowerCAmelCase__ :Tuple=32 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :List[str]=4 , lowerCAmelCase__ :Any=37 , lowerCAmelCase__ :List[Any]="gelu" , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Tuple=512 , lowerCAmelCase__ :Union[str, Any]=0.0_2 , ) -> Tuple: '''simple docstring''' snake_case_ : Dict = parent snake_case_ : str = batch_size snake_case_ : Tuple = seq_length snake_case_ : List[Any] = is_training snake_case_ : Tuple = use_input_mask snake_case_ : Dict = use_labels snake_case_ : Tuple = vocab_size snake_case_ : Any = d_model snake_case_ : Optional[Any] = num_hidden_layers snake_case_ : int = num_attention_heads snake_case_ : str = ffn_dim snake_case_ : int = activation_function snake_case_ : Dict = activation_dropout snake_case_ : List[Any] = attention_dropout snake_case_ : Union[str, Any] = max_position_embeddings snake_case_ : List[str] = initializer_range snake_case_ : List[Any] = None snake_case_ : Dict = 0 snake_case_ : Optional[Any] = 2 snake_case_ : int = 1 def _A ( self :Any ) -> Union[str, Any]: '''simple docstring''' return XGLMConfig.from_pretrained("facebook/xglm-564M" ) def _A ( self :Optional[int] ) -> int: '''simple docstring''' snake_case_ : Dict = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) snake_case_ : Optional[int] = None if self.use_input_mask: snake_case_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ : str = self.get_config() snake_case_ : Tuple = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def _A ( self :List[Any] ) -> str: '''simple docstring''' return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowerCAmelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowerCAmelCase__ , ) def _A ( self :str ) -> Optional[int]: '''simple docstring''' snake_case_ : Optional[int] = self.prepare_config_and_inputs() ( ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ) : Optional[Any] = config_and_inputs snake_case_ : List[Any] = { "input_ids": input_ids, "head_mask": head_mask, } return config, inputs_dict @require_tf class A_ (a_ , a_ , unittest.TestCase ): """simple docstring""" a__ = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () a__ = (TFXGLMForCausalLM,) if is_tf_available() else () a__ = ( {'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {} ) a__ = False a__ = False a__ = False def _A ( self :List[Any] ) -> int: '''simple docstring''' snake_case_ : Union[str, Any] = TFXGLMModelTester(self ) snake_case_ : Any = ConfigTester(self , config_class=lowerCAmelCase__ , n_embd=37 ) def _A ( self :Optional[int] ) -> Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() @slow def _A ( self :Dict ) -> Optional[Any]: '''simple docstring''' for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ : List[Any] = TFXGLMModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) @unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." ) def _A ( self :Optional[Any] ) -> Dict: '''simple docstring''' super().test_resize_token_embeddings() @require_tf class A_ (unittest.TestCase ): """simple docstring""" @slow def _A ( self :Any , lowerCAmelCase__ :Any=True ) -> str: '''simple docstring''' snake_case_ : Any = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) snake_case_ : List[str] = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off snake_case_ : str = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581] # fmt: on snake_case_ : str = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , lowerCAmelCase__ ) @slow def _A ( self :Union[str, Any] ) -> List[str]: '''simple docstring''' snake_case_ : List[str] = XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) snake_case_ : Optional[Any] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) tf.random.set_seed(0 ) snake_case_ : List[Any] = tokenizer("Today is a nice day and" , return_tensors="tf" ) snake_case_ : Any = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(":/CPU:0" ): snake_case_ : Tuple = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ , seed=[7, 0] ) snake_case_ : Union[str, Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCAmelCase__ ) snake_case_ : Dict = ( "Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due" ) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) @slow def _A ( self :Union[str, Any] ) -> Dict: '''simple docstring''' snake_case_ : Union[str, Any] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) snake_case_ : List[str] = XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) snake_case_ : str = "left" # use different length sentences to test batching snake_case_ : Tuple = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When", "Hello, my dog is a little", ] snake_case_ : Dict = tokenizer(lowerCAmelCase__ , return_tensors="tf" , padding=lowerCAmelCase__ ) snake_case_ : Union[str, Any] = inputs["input_ids"] snake_case_ : Dict = model.generate(input_ids=lowerCAmelCase__ , attention_mask=inputs["attention_mask"] , max_new_tokens=12 ) snake_case_ : Tuple = tokenizer(sentences[0] , return_tensors="tf" ).input_ids snake_case_ : List[str] = model.generate(input_ids=lowerCAmelCase__ , max_new_tokens=12 ) snake_case_ : Union[str, Any] = tokenizer(sentences[1] , return_tensors="tf" ).input_ids snake_case_ : Tuple = model.generate(input_ids=lowerCAmelCase__ , max_new_tokens=12 ) snake_case_ : int = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) snake_case_ : Tuple = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase__ ) snake_case_ : List[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase__ ) snake_case_ : List[str] = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When left padding is applied, the sequence will be " "a single", "Hello, my dog is a little bit of a shy one, but he is very friendly", ] self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , [non_padded_sentence, padded_sentence] )
653
'''simple docstring''' from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase : Dict = logging.get_logger(__name__) # TODO Update this __lowerCamelCase : int = { '''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''', # See all ESM models at https://huggingface.co/models?filter=esm } class A_ (a_ ): """simple docstring""" a__ = '''esm''' def __init__( self :Dict , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :str=None , lowerCAmelCase__ :int=768 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :Dict=12 , lowerCAmelCase__ :Union[str, Any]=3_072 , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :List[Any]=1_026 , lowerCAmelCase__ :int=0.0_2 , lowerCAmelCase__ :Optional[int]=1E-1_2 , lowerCAmelCase__ :List[str]="absolute" , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :List[str]=False , lowerCAmelCase__ :List[Any]=False , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :str=None , **lowerCAmelCase__ :Union[str, Any] , ) -> Any: '''simple docstring''' super().__init__(pad_token_id=lowerCAmelCase__ , mask_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) snake_case_ : str = vocab_size snake_case_ : str = hidden_size snake_case_ : List[str] = num_hidden_layers snake_case_ : List[str] = num_attention_heads snake_case_ : Any = intermediate_size snake_case_ : Optional[Any] = hidden_dropout_prob snake_case_ : Tuple = attention_probs_dropout_prob snake_case_ : List[Any] = max_position_embeddings snake_case_ : str = initializer_range snake_case_ : List[Any] = layer_norm_eps snake_case_ : str = position_embedding_type snake_case_ : Optional[int] = use_cache snake_case_ : str = emb_layer_norm_before snake_case_ : List[Any] = token_dropout snake_case_ : str = is_folding_model if is_folding_model: if esmfold_config is None: logger.info("No esmfold_config supplied for folding model, using default values." ) snake_case_ : Optional[Any] = EsmFoldConfig() elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): snake_case_ : Union[str, Any] = EsmFoldConfig(**lowerCAmelCase__ ) snake_case_ : Optional[Any] = esmfold_config if vocab_list is None: logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" ) snake_case_ : List[str] = get_default_vocab_list() else: snake_case_ : List[str] = vocab_list else: snake_case_ : List[Any] = None snake_case_ : int = None if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , lowerCAmelCase__ ): raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" ) def _A ( self :Optional[int] ) -> List[Any]: '''simple docstring''' snake_case_ : Any = super().to_dict() if isinstance(self.esmfold_config , lowerCAmelCase__ ): snake_case_ : Optional[int] = self.esmfold_config.to_dict() return output @dataclass class A_ : """simple docstring""" a__ = None a__ = True a__ = False a__ = False a__ = False a__ = 0 a__ = True a__ = False a__ = 128 a__ = None def _A ( self :Dict ) -> int: '''simple docstring''' if self.trunk is None: snake_case_ : Dict = TrunkConfig() elif isinstance(self.trunk , lowerCAmelCase__ ): snake_case_ : int = TrunkConfig(**self.trunk ) def _A ( self :Optional[int] ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Tuple = asdict(self ) snake_case_ : Optional[int] = self.trunk.to_dict() return output @dataclass class A_ : """simple docstring""" a__ = 48 a__ = 1024 a__ = 128 a__ = 32 a__ = 32 a__ = 32 a__ = 0 a__ = 0 a__ = False a__ = 4 a__ = 128 a__ = None def _A ( self :List[Any] ) -> Union[str, Any]: '''simple docstring''' if self.structure_module is None: snake_case_ : Optional[int] = StructureModuleConfig() elif isinstance(self.structure_module , lowerCAmelCase__ ): snake_case_ : List[str] = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( "`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got" F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( "`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got" F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' ) snake_case_ : Dict = self.sequence_state_dim // self.sequence_head_width snake_case_ : Optional[int] = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( "`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got" F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( "`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got" F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' ) if self.dropout >= 0.4: raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' ) def _A ( self :Tuple ) -> List[str]: '''simple docstring''' snake_case_ : int = asdict(self ) snake_case_ : Dict = self.structure_module.to_dict() return output @dataclass class A_ : """simple docstring""" a__ = 384 a__ = 128 a__ = 16 a__ = 128 a__ = 12 a__ = 4 a__ = 8 a__ = 0.1 a__ = 8 a__ = 1 a__ = 2 a__ = 7 a__ = 10 a__ = 1E-8 a__ = 1E5 def _A ( self :Dict ) -> Dict: '''simple docstring''' return asdict(self ) def __UpperCAmelCase ( )-> int: """simple docstring""" return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
653
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: __lowerCamelCase : str = None __lowerCamelCase : List[Any] = logging.get_logger(__name__) __lowerCamelCase : Dict = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} __lowerCamelCase : Any = { '''vocab_file''': { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''', }, '''tokenizer_file''': { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''', }, } __lowerCamelCase : List[str] = { '''camembert-base''': 512, } __lowerCamelCase : int = '''▁''' class A_ (a_ ): """simple docstring""" a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = ['''input_ids''', '''attention_mask'''] a__ = CamembertTokenizer def __init__( self :int , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :List[str]=None , lowerCAmelCase__ :Tuple="<s>" , lowerCAmelCase__ :Tuple="</s>" , lowerCAmelCase__ :Any="</s>" , lowerCAmelCase__ :Optional[int]="<s>" , lowerCAmelCase__ :Optional[Any]="<unk>" , lowerCAmelCase__ :int="<pad>" , lowerCAmelCase__ :List[str]="<mask>" , lowerCAmelCase__ :Union[str, Any]=["<s>NOTUSED", "</s>NOTUSED"] , **lowerCAmelCase__ :Tuple , ) -> Union[str, Any]: '''simple docstring''' snake_case_ : str = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token super().__init__( lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , **lowerCAmelCase__ , ) snake_case_ : int = vocab_file snake_case_ : str = False if not self.vocab_file else True def _A ( self :int , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case_ : Dict = [self.cls_token_id] snake_case_ : Dict = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _A ( self :int , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]: '''simple docstring''' snake_case_ : List[Any] = [self.sep_token_id] snake_case_ : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _A ( self :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(lowerCAmelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case_ : List[str] = os.path.join( lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ): copyfile(self.vocab_file , lowerCAmelCase__ ) return (out_vocab_file,)
653
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase : Any = { '''configuration_longformer''': [ '''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongformerConfig''', '''LongformerOnnxConfig''', ], '''tokenization_longformer''': ['''LongformerTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Any = ['''LongformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = [ '''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LongformerForMaskedLM''', '''LongformerForMultipleChoice''', '''LongformerForQuestionAnswering''', '''LongformerForSequenceClassification''', '''LongformerForTokenClassification''', '''LongformerModel''', '''LongformerPreTrainedModel''', '''LongformerSelfAttention''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Any = [ '''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLongformerForMaskedLM''', '''TFLongformerForMultipleChoice''', '''TFLongformerForQuestionAnswering''', '''TFLongformerForSequenceClassification''', '''TFLongformerForTokenClassification''', '''TFLongformerModel''', '''TFLongformerPreTrainedModel''', '''TFLongformerSelfAttention''', ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys __lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
653
1
'''simple docstring''' from collections import deque from .hash_table import HashTable class A_ (a_ ): """simple docstring""" def __init__( self :List[str] , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Dict ) -> Union[str, Any]: '''simple docstring''' super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ ) def _A ( self :Optional[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Optional[int] = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(lowerCAmelCase__ ) snake_case_ : Tuple = self.values[key] def _A ( self :int ) -> Dict: '''simple docstring''' return ( sum(self.charge_factor - len(lowerCAmelCase__ ) for slot in self.values ) / self.size_table * self.charge_factor ) def _A ( self :str , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple=None ) -> Any: '''simple docstring''' if not ( len(self.values[key] ) == self.charge_factor and self.values.count(lowerCAmelCase__ ) == 0 ): return key return super()._collision_resolution(lowerCAmelCase__ , lowerCAmelCase__ )
653
'''simple docstring''' import inspect import tempfile import unittest from huggingface_hub import hf_hub_download from transformers import is_torch_available from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin __lowerCamelCase : Optional[int] = 1E-4 if is_torch_available(): import torch from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder @require_torch class A_ : """simple docstring""" def __init__( self :Tuple , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[Any]=16 , lowerCAmelCase__ :Any=13 , lowerCAmelCase__ :Optional[Any]=7 , lowerCAmelCase__ :str=14 , lowerCAmelCase__ :Union[str, Any]=10 , lowerCAmelCase__ :Tuple=19 , lowerCAmelCase__ :Optional[Any]=5 , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Any=16 , lowerCAmelCase__ :str=2 , lowerCAmelCase__ :List[Any]=4 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Optional[int]=[1, 2, 3, 4, 5] , lowerCAmelCase__ :str=25 , lowerCAmelCase__ :Optional[Any]=5 , ) -> Dict: '''simple docstring''' snake_case_ : List[str] = d_model snake_case_ : Dict = parent snake_case_ : Optional[Any] = batch_size snake_case_ : Optional[Any] = prediction_length snake_case_ : str = context_length snake_case_ : Tuple = cardinality snake_case_ : List[str] = num_time_features snake_case_ : Optional[Any] = lags_sequence snake_case_ : Union[str, Any] = embedding_dimension snake_case_ : Optional[Any] = is_training snake_case_ : Optional[Any] = hidden_size snake_case_ : Any = num_hidden_layers snake_case_ : Optional[Any] = num_attention_heads snake_case_ : int = intermediate_size snake_case_ : Any = hidden_act snake_case_ : Union[str, Any] = hidden_dropout_prob snake_case_ : Dict = attention_probs_dropout_prob snake_case_ : List[str] = context_length snake_case_ : Any = prediction_length + label_length snake_case_ : Union[str, Any] = label_length snake_case_ : List[Any] = moving_average snake_case_ : str = autocorrelation_factor def _A ( self :List[Any] ) -> Any: '''simple docstring''' return AutoformerConfig( d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , ) def _A ( self :Union[str, Any] , lowerCAmelCase__ :Optional[Any] ) -> Dict: '''simple docstring''' snake_case_ : Any = config.context_length + max(config.lags_sequence ) snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, 1] , config.cardinality[0] ) snake_case_ : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] ) snake_case_ : List[Any] = floats_tensor([self.batch_size, _past_length] ) snake_case_ : Dict = floats_tensor([self.batch_size, _past_length] ) > 0.5 # decoder inputs snake_case_ : List[Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] ) snake_case_ : List[Any] = floats_tensor([self.batch_size, config.prediction_length] ) snake_case_ : int = { "past_values": past_values, "static_categorical_features": static_categorical_features, "past_time_features": past_time_features, "past_observed_mask": past_observed_mask, "future_time_features": future_time_features, "future_values": future_values, } return inputs_dict def _A ( self :Dict ) -> Tuple: '''simple docstring''' snake_case_ : str = self.get_config() snake_case_ : int = self.prepare_autoformer_inputs_dict(lowerCAmelCase__ ) return config, inputs_dict def _A ( self :Optional[int] ) -> Dict: '''simple docstring''' snake_case_, snake_case_ : Union[str, Any] = self.prepare_config_and_inputs() return config, inputs_dict def _A ( self :Tuple , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int] ) -> List[str]: '''simple docstring''' snake_case_ : Dict = AutoformerModel(config=lowerCAmelCase__ ).to(lowerCAmelCase__ ).eval() snake_case_ : Optional[int] = model(**lowerCAmelCase__ ) snake_case_ : Any = outputs.encoder_last_hidden_state snake_case_ : Dict = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : Optional[Any] = model.get_encoder() encoder.save_pretrained(lowerCAmelCase__ ) snake_case_ : Tuple = AutoformerEncoder.from_pretrained(lowerCAmelCase__ ).to(lowerCAmelCase__ ) snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : List[str] = model.create_network_inputs(**lowerCAmelCase__ ) snake_case_, snake_case_ : Optional[int] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] ) snake_case_ : List[Any] = torch.cat( (transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , ) snake_case_ : Optional[int] = encoder(inputs_embeds=lowerCAmelCase__ )[0] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 ) snake_case_ : Any = ( torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 ) .unsqueeze(1 ) .repeat(1 , config.prediction_length , 1 ) ) snake_case_ : List[str] = torch.zeros( [transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , ) snake_case_ : Optional[Any] = torch.cat( ( torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) snake_case_ : Any = torch.cat( ( torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : List[Any] = model.get_decoder() decoder.save_pretrained(lowerCAmelCase__ ) snake_case_ : int = AutoformerDecoder.from_pretrained(lowerCAmelCase__ ).to(lowerCAmelCase__ ) snake_case_ : Tuple = decoder( trend=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 ) @require_torch class A_ (a_ , a_ , unittest.TestCase ): """simple docstring""" a__ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else () a__ = (AutoformerForPrediction,) if is_torch_available() else () a__ = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {} a__ = False a__ = False a__ = False a__ = False a__ = False a__ = False def _A ( self :Dict ) -> int: '''simple docstring''' snake_case_ : Tuple = AutoformerModelTester(self ) snake_case_ : str = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ ) def _A ( self :List[str] ) -> Tuple: '''simple docstring''' self.config_tester.run_common_tests() def _A ( self :List[Any] ) -> Union[str, Any]: '''simple docstring''' snake_case_, snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: snake_case_ : List[Any] = model_class(lowerCAmelCase__ ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCAmelCase__ ) snake_case_, snake_case_ : str = model_class.from_pretrained(lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ ) self.assertEqual(info["missing_keys"] , [] ) def _A ( self :Optional[int] ) -> Tuple: '''simple docstring''' snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*lowerCAmelCase__ ) @unittest.skip(reason="Model has no tokens embeddings" ) def _A ( self :str ) -> str: '''simple docstring''' pass def _A ( self :Optional[Any] ) -> Union[str, Any]: '''simple docstring''' snake_case_ : List[Any] = inspect.signature(getattr(lowerCAmelCase__ , "forward" ) ) # The main input is the name of the argument after `self` snake_case_ : Dict = list(model_signature.parameters.keys() )[1] self.assertEqual(AutoformerModel.main_input_name , lowerCAmelCase__ ) def _A ( self :Optional[Any] ) -> Optional[int]: '''simple docstring''' snake_case_, snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : Tuple = model_class(lowerCAmelCase__ ) snake_case_ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ : Optional[Any] = [*signature.parameters.keys()] snake_case_ : Dict = [ "past_values", "past_time_features", "past_observed_mask", "static_categorical_features", "static_real_features", "future_values", "future_time_features", ] if model.__class__.__name__ in ["AutoformerForPrediction"]: expected_arg_names.append("future_observed_mask" ) expected_arg_names.extend( [ "decoder_attention_mask", "head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs", "past_key_values", "output_hidden_states", "output_attentions", "use_cache", "return_dict", ] ) self.assertListEqual(arg_names[: len(lowerCAmelCase__ )] , lowerCAmelCase__ ) def _A ( self :int ) -> Any: '''simple docstring''' snake_case_, snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Union[str, Any] = True snake_case_ : List[str] = getattr(self.model_tester , "seq_length" , lowerCAmelCase__ ) snake_case_ : Dict = getattr(self.model_tester , "decoder_seq_length" , lowerCAmelCase__ ) snake_case_ : Union[str, Any] = getattr(self.model_tester , "encoder_seq_length" , lowerCAmelCase__ ) snake_case_ : Union[str, Any] = getattr(self.model_tester , "d_model" , lowerCAmelCase__ ) snake_case_ : Dict = getattr(self.model_tester , "num_attention_heads" , lowerCAmelCase__ ) snake_case_ : Optional[int] = d_model // num_attention_heads for model_class in self.all_model_classes: snake_case_ : Any = True snake_case_ : Any = False snake_case_ : Dict = True snake_case_ : List[str] = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): snake_case_ : Tuple = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) snake_case_ : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] snake_case_ : Optional[int] = True snake_case_ : Any = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): snake_case_ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) snake_case_ : str = outputs.encoder_attentions self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) snake_case_ : Tuple = len(lowerCAmelCase__ ) snake_case_ : List[str] = 7 if "last_hidden_state" in outputs: correct_outlen += 1 if "trend" in outputs: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned if "loss" in outputs: correct_outlen += 1 if "params" in outputs: correct_outlen += 1 self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) # decoder attentions snake_case_ : Optional[int] = outputs.decoder_attentions self.assertIsInstance(lowerCAmelCase__ , (list, tuple) ) self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # cross attentions snake_case_ : List[Any] = outputs.cross_attentions self.assertIsInstance(lowerCAmelCase__ , (list, tuple) ) self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # Check attention is always last and order is fine snake_case_ : Optional[int] = True snake_case_ : List[Any] = True snake_case_ : Union[str, Any] = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): snake_case_ : List[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) self.assertEqual(out_len + 2 , len(lowerCAmelCase__ ) ) snake_case_ : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) @is_flaky() def _A ( self :Any ) -> Optional[Any]: '''simple docstring''' super().test_retain_grad_hidden_states_attentions() def __UpperCAmelCase ( __magic_name__="train-batch.pt" )-> int: """simple docstring""" snake_case_ : List[str] = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" ,filename=__magic_name__ ,repo_type="dataset" ) snake_case_ : List[str] = torch.load(__magic_name__ ,map_location=__magic_name__ ) return batch @require_torch @slow class A_ (unittest.TestCase ): """simple docstring""" def _A ( self :str ) -> Any: '''simple docstring''' snake_case_ : Optional[int] = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ ) snake_case_ : List[str] = prepare_batch() with torch.no_grad(): snake_case_ : int = model( past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0] snake_case_ : Optional[int] = torch.Size( (64, model.config.prediction_length + model.config.label_length, model.config.feature_size) ) self.assertEqual(output.shape , lowerCAmelCase__ ) snake_case_ : Optional[Any] = torch.tensor( [[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=lowerCAmelCase__ ) self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) ) def _A ( self :Any ) -> str: '''simple docstring''' snake_case_ : str = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ ) snake_case_ : Optional[Any] = prepare_batch("val-batch.pt" ) with torch.no_grad(): snake_case_ : Tuple = model( past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state snake_case_ : Dict = torch.Size((64, model.config.context_length, model.config.d_model) ) self.assertEqual(output.shape , lowerCAmelCase__ ) snake_case_ : Any = torch.tensor( [[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=lowerCAmelCase__ ) self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) ) def _A ( self :List[str] ) -> Any: '''simple docstring''' snake_case_ : List[Any] = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ ) snake_case_ : str = prepare_batch("val-batch.pt" ) with torch.no_grad(): snake_case_ : Optional[Any] = model.generate( static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , ) snake_case_ : List[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) ) self.assertEqual(outputs.sequences.shape , lowerCAmelCase__ ) snake_case_ : Dict = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=lowerCAmelCase__ ) snake_case_ : Optional[Any] = outputs.sequences.mean(dim=1 ) self.assertTrue(torch.allclose(mean_prediction[0, -3:] , lowerCAmelCase__ , rtol=1E-1 ) )
653
1
'''simple docstring''' import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class A_ : """simple docstring""" def __init__( self :Tuple , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Any=sys.maxsize ) -> str: '''simple docstring''' snake_case_ : str = "bilinear" snake_case_ : Any = max_size snake_case_ : Dict = short_edge_length def __call__( self :Optional[Any] , lowerCAmelCase__ :Union[str, Any] ) -> Tuple: '''simple docstring''' snake_case_ : str = [] for img in imgs: snake_case_, snake_case_ : Dict = img.shape[:2] # later: provide list and randomly choose index for resize snake_case_ : str = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img snake_case_ : Optional[int] = size * 1.0 / min(lowerCAmelCase__ , lowerCAmelCase__ ) if h < w: snake_case_, snake_case_ : Tuple = size, scale * w else: snake_case_, snake_case_ : Dict = scale * h, size if max(lowerCAmelCase__ , lowerCAmelCase__ ) > self.max_size: snake_case_ : Any = self.max_size * 1.0 / max(lowerCAmelCase__ , lowerCAmelCase__ ) snake_case_ : Tuple = newh * scale snake_case_ : Optional[Any] = neww * scale snake_case_ : Dict = int(neww + 0.5 ) snake_case_ : Union[str, Any] = int(newh + 0.5 ) if img.dtype == np.uinta: snake_case_ : str = Image.fromarray(lowerCAmelCase__ ) snake_case_ : int = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) snake_case_ : Union[str, Any] = np.asarray(lowerCAmelCase__ ) else: snake_case_ : int = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw snake_case_ : int = nn.functional.interpolate( lowerCAmelCase__ , (newh, neww) , mode=self.interp_method , align_corners=lowerCAmelCase__ ).squeeze(0 ) img_augs.append(lowerCAmelCase__ ) return img_augs class A_ : """simple docstring""" def __init__( self :List[Any] , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]: '''simple docstring''' snake_case_ : Dict = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) snake_case_ : List[Any] = cfg.INPUT.FORMAT snake_case_ : Union[str, Any] = cfg.SIZE_DIVISIBILITY snake_case_ : List[str] = cfg.PAD_VALUE snake_case_ : List[Any] = cfg.INPUT.MAX_SIZE_TEST snake_case_ : List[Any] = cfg.MODEL.DEVICE snake_case_ : Tuple = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) snake_case_ : str = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) snake_case_ : Dict = lambda lowerCAmelCase__ : (x - self.pixel_mean) / self.pixel_std def _A ( self :Any , lowerCAmelCase__ :Any ) -> int: '''simple docstring''' snake_case_ : Dict = tuple(max(lowerCAmelCase__ ) for s in zip(*[img.shape for img in images] ) ) snake_case_ : Any = [im.shape[-2:] for im in images] snake_case_ : Optional[int] = [ nn.functional.pad( lowerCAmelCase__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(lowerCAmelCase__ , lowerCAmelCase__ ) ] return torch.stack(lowerCAmelCase__ ), torch.tensor(lowerCAmelCase__ ) def __call__( self :Tuple , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Dict=False ) -> List[str]: '''simple docstring''' with torch.no_grad(): if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): snake_case_ : Dict = [images] if single_image: assert len(lowerCAmelCase__ ) == 1 for i in range(len(lowerCAmelCase__ ) ): if isinstance(images[i] , torch.Tensor ): images.insert(lowerCAmelCase__ , images.pop(lowerCAmelCase__ ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( lowerCAmelCase__ , torch.as_tensor(img_tensorize(images.pop(lowerCAmelCase__ ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge snake_case_ : List[Any] = torch.tensor([im.shape[:2] for im in images] ) snake_case_ : List[Any] = self.aug(lowerCAmelCase__ ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic snake_case_ : Any = [self.normalizer(lowerCAmelCase__ ) for x in images] # now pad them to do the following operations snake_case_, snake_case_ : int = self.pad(lowerCAmelCase__ ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad snake_case_ : Optional[Any] = torch.true_divide(lowerCAmelCase__ , lowerCAmelCase__ ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Union[str, Any]: """simple docstring""" boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> List[str]: """simple docstring""" assert torch.isfinite(__magic_name__ ).all(), "Box tensor contains infinite or NaN!" snake_case_, snake_case_ : Optional[int] = box_size tensor[:, 0].clamp_(min=0 ,max=__magic_name__ ) tensor[:, 1].clamp_(min=0 ,max=__magic_name__ ) tensor[:, 2].clamp_(min=0 ,max=__magic_name__ ) tensor[:, 3].clamp_(min=0 ,max=__magic_name__ )
653
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A_ (a_ , unittest.TestCase ): """simple docstring""" a__ = RobertaTokenizer a__ = RobertaTokenizerFast a__ = True a__ = {'''cls_token''': '''<s>'''} def _A ( self :Optional[int] ) -> List[Any]: '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt snake_case_ : List[Any] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] snake_case_ : Tuple = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) ) snake_case_ : List[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] snake_case_ : int = {"unk_token": "<unk>"} snake_case_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowerCAmelCase__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowerCAmelCase__ ) ) def _A ( self :Optional[Any] , **lowerCAmelCase__ :str ) -> str: '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def _A ( self :Any , **lowerCAmelCase__ :Tuple ) -> Optional[int]: '''simple docstring''' kwargs.update(self.special_tokens_map ) return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def _A ( self :Optional[int] , lowerCAmelCase__ :str ) -> Optional[int]: '''simple docstring''' snake_case_ : int = "lower newer" snake_case_ : Tuple = "lower newer" return input_text, output_text def _A ( self :Tuple ) -> Union[str, Any]: '''simple docstring''' snake_case_ : str = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) snake_case_ : Dict = "lower newer" snake_case_ : int = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"] snake_case_ : str = tokenizer.tokenize(lowerCAmelCase__ ) # , add_prefix_space=True) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) snake_case_ : List[str] = tokens + [tokenizer.unk_token] snake_case_ : Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ ) def _A ( self :Any ) -> str: '''simple docstring''' snake_case_ : List[str] = self.get_tokenizer() self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=lowerCAmelCase__ ) , [0, 31_414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=lowerCAmelCase__ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , ) @slow def _A ( self :str ) -> List[str]: '''simple docstring''' snake_case_ : Tuple = self.tokenizer_class.from_pretrained("roberta-base" ) snake_case_ : List[str] = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase__ ) snake_case_ : List[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase__ ) snake_case_ : List[str] = tokenizer.encode( "sequence builders" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ ) snake_case_ : Union[str, Any] = tokenizer.encode( "sequence builders" , "multi-sequence build" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ ) snake_case_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ ) snake_case_ : Any = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def _A ( self :List[Any] ) -> Any: '''simple docstring''' snake_case_ : Optional[Any] = self.get_tokenizer() snake_case_ : Tuple = "Encode this sequence." snake_case_ : Optional[Any] = tokenizer.byte_encoder[" ".encode("utf-8" )[0]] # Testing encoder arguments snake_case_ : str = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ ) snake_case_ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ ) snake_case_ : List[Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ ) snake_case_ : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) tokenizer.add_special_tokens({"bos_token": "<s>"} ) snake_case_ : str = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) snake_case_ : int = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ ) # Testing spaces after special tokens snake_case_ : List[Any] = "<mask>" tokenizer.add_special_tokens( {"mask_token": AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )} ) # mask token has a left space snake_case_ : str = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) snake_case_ : List[str] = "Encode <mask> sequence" snake_case_ : List[Any] = "Encode <mask>sequence" snake_case_ : Tuple = tokenizer.encode(lowerCAmelCase__ ) snake_case_ : int = encoded.index(lowerCAmelCase__ ) snake_case_ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) snake_case_ : List[str] = tokenizer.encode(lowerCAmelCase__ ) snake_case_ : Union[str, Any] = encoded.index(lowerCAmelCase__ ) snake_case_ : int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def _A ( self :Tuple ) -> Tuple: '''simple docstring''' pass def _A ( self :int ) -> Optional[Any]: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case_ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) snake_case_ : List[Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) snake_case_ : Any = "A, <mask> AllenNLP sentence." snake_case_ : str = tokenizer_r.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ ) snake_case_ : int = tokenizer_p.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) snake_case_ : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) snake_case_ : str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) def _A ( self :int ) -> Tuple: '''simple docstring''' for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): snake_case_ : str = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) snake_case_ : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) snake_case_ : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["add_prefix_space"] , lowerCAmelCase__ ) self.assertEqual(post_processor_state["add_prefix_space"] , lowerCAmelCase__ ) self.assertEqual(post_processor_state["trim_offsets"] , lowerCAmelCase__ ) def _A ( self :List[str] ) -> List[Any]: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case_ : str = "hello" # `hello` is a token in the vocabulary of `pretrained_name` snake_case_ : Tuple = F'''{text_of_1_token} {text_of_1_token}''' snake_case_ : Any = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , ) snake_case_ : List[str] = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) snake_case_ : Tuple = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , ) snake_case_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , ) snake_case_ : Dict = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) snake_case_ : str = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , ) snake_case_ : Tuple = F''' {text}''' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) snake_case_ : Dict = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ) + 1, 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , ) snake_case_ : Any = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) snake_case_ : Any = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , ) snake_case_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) snake_case_ : Optional[int] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
653
1
'''simple docstring''' import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class A_ : """simple docstring""" def __init__( self :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str]=2 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :List[str]=7 , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :List[str]=99 , lowerCAmelCase__ :Union[str, Any]=36 , lowerCAmelCase__ :Dict=3 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Optional[int]=37 , lowerCAmelCase__ :Dict="gelu" , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Optional[int]=512 , lowerCAmelCase__ :Union[str, Any]=16 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :Any=0.0_2 , lowerCAmelCase__ :Dict=6 , lowerCAmelCase__ :Optional[int]=6 , lowerCAmelCase__ :Any=3 , lowerCAmelCase__ :int=4 , lowerCAmelCase__ :int=None , lowerCAmelCase__ :Any=1_000 , ) -> Any: '''simple docstring''' snake_case_ : Optional[int] = parent snake_case_ : Union[str, Any] = batch_size snake_case_ : Optional[int] = num_channels snake_case_ : List[Any] = image_size snake_case_ : Optional[int] = patch_size snake_case_ : Union[str, Any] = text_seq_length snake_case_ : Dict = is_training snake_case_ : Optional[Any] = use_input_mask snake_case_ : Union[str, Any] = use_token_type_ids snake_case_ : Dict = use_labels snake_case_ : List[str] = vocab_size snake_case_ : Optional[Any] = hidden_size snake_case_ : List[str] = num_hidden_layers snake_case_ : int = num_attention_heads snake_case_ : List[str] = intermediate_size snake_case_ : str = hidden_act snake_case_ : Optional[Any] = hidden_dropout_prob snake_case_ : Optional[int] = attention_probs_dropout_prob snake_case_ : Union[str, Any] = max_position_embeddings snake_case_ : List[Any] = type_vocab_size snake_case_ : Union[str, Any] = type_sequence_label_size snake_case_ : List[Any] = initializer_range snake_case_ : Union[str, Any] = coordinate_size snake_case_ : int = shape_size snake_case_ : Tuple = num_labels snake_case_ : List[Any] = num_choices snake_case_ : List[str] = scope snake_case_ : Dict = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) snake_case_ : str = text_seq_length snake_case_ : Optional[int] = (image_size // patch_size) ** 2 + 1 snake_case_ : str = self.text_seq_length + self.image_seq_length def _A ( self :Union[str, Any] ) -> Tuple: '''simple docstring''' snake_case_ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) snake_case_ : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: snake_case_ : Optional[Any] = bbox[i, j, 3] snake_case_ : Any = bbox[i, j, 1] snake_case_ : Tuple = t if bbox[i, j, 2] < bbox[i, j, 0]: snake_case_ : str = bbox[i, j, 2] snake_case_ : Dict = bbox[i, j, 0] snake_case_ : Union[str, Any] = t snake_case_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ : Dict = None if self.use_input_mask: snake_case_ : str = random_attention_mask([self.batch_size, self.text_seq_length] ) snake_case_ : Any = None if self.use_token_type_ids: snake_case_ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) snake_case_ : Union[str, Any] = None snake_case_ : str = None if self.use_labels: snake_case_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) snake_case_ : str = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def _A ( self :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] ) -> Optional[Any]: '''simple docstring''' snake_case_ : Tuple = LayoutLMvaModel(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() # text + image snake_case_ : Tuple = model(lowerCAmelCase__ , pixel_values=lowerCAmelCase__ ) snake_case_ : Optional[int] = model( lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ ) snake_case_ : Optional[int] = model(lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ ) snake_case_ : int = model(lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only snake_case_ : List[Any] = model(lowerCAmelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only snake_case_ : Union[str, Any] = model(pixel_values=lowerCAmelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def _A ( self :str , lowerCAmelCase__ :str , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple ) -> List[Any]: '''simple docstring''' snake_case_ : str = self.num_labels snake_case_ : List[Any] = LayoutLMvaForSequenceClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case_ : Optional[int] = model( lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] ) -> str: '''simple docstring''' snake_case_ : Optional[int] = self.num_labels snake_case_ : str = LayoutLMvaForTokenClassification(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case_ : List[Any] = model( lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def _A ( self :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :str ) -> Tuple: '''simple docstring''' snake_case_ : List[str] = LayoutLMvaForQuestionAnswering(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case_ : List[Any] = model( lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _A ( self :int ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Dict = self.prepare_config_and_inputs() ( ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ) : Optional[Any] = config_and_inputs snake_case_ : Tuple = { "input_ids": input_ids, "bbox": bbox, "pixel_values": pixel_values, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class A_ (a_ , a_ , unittest.TestCase ): """simple docstring""" a__ = False a__ = False a__ = False a__ = ( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) a__ = ( {'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel} if is_torch_available() else {} ) def _A ( self :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] ) -> List[str]: '''simple docstring''' return True def _A ( self :List[Any] ) -> str: '''simple docstring''' snake_case_ : Tuple = LayoutLMvaModelTester(self ) snake_case_ : Optional[int] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 ) def _A ( self :Tuple , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any]=False ) -> Any: '''simple docstring''' snake_case_ : List[str] = copy.deepcopy(lowerCAmelCase__ ) if model_class in get_values(lowerCAmelCase__ ): snake_case_ : Optional[Any] = { k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous() if isinstance(lowerCAmelCase__ , torch.Tensor ) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(lowerCAmelCase__ ): snake_case_ : Union[str, Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ ) elif model_class in get_values(lowerCAmelCase__ ): snake_case_ : List[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ ) snake_case_ : str = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ ) elif model_class in [ *get_values(lowerCAmelCase__ ), ]: snake_case_ : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ ) elif model_class in [ *get_values(lowerCAmelCase__ ), ]: snake_case_ : List[str] = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCAmelCase__ , ) return inputs_dict def _A ( self :Any ) -> Any: '''simple docstring''' self.config_tester.run_common_tests() def _A ( self :int ) -> int: '''simple docstring''' snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def _A ( self :Any ) -> Dict: '''simple docstring''' snake_case_ : Dict = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: snake_case_ : int = type self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def _A ( self :int ) -> str: '''simple docstring''' snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ ) def _A ( self :List[Any] ) -> Optional[Any]: '''simple docstring''' snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ ) def _A ( self :int ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ ) @slow def _A ( self :Tuple ) -> List[Any]: '''simple docstring''' for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ : str = LayoutLMvaModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) def __UpperCAmelCase ( )-> List[str]: """simple docstring""" snake_case_ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch class A_ (unittest.TestCase ): """simple docstring""" @cached_property def _A ( self :Union[str, Any] ) -> Optional[Any]: '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase__ ) if is_vision_available() else None @slow def _A ( self :Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Optional[int] = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(lowerCAmelCase__ ) snake_case_ : Optional[Any] = self.default_image_processor snake_case_ : Optional[int] = prepare_img() snake_case_ : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).pixel_values.to(lowerCAmelCase__ ) snake_case_ : List[str] = torch.tensor([[1, 2]] ) snake_case_ : Any = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 ) # forward pass snake_case_ : Any = model( input_ids=input_ids.to(lowerCAmelCase__ ) , bbox=bbox.to(lowerCAmelCase__ ) , pixel_values=pixel_values.to(lowerCAmelCase__ ) , ) # verify the logits snake_case_ : Optional[Any] = torch.Size((1, 199, 768) ) self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase__ ) snake_case_ : str = torch.tensor( [[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(lowerCAmelCase__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
653
'''simple docstring''' import math def __UpperCAmelCase ( __magic_name__ )-> bool: """simple docstring""" snake_case_ : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(__magic_name__ ) def __UpperCAmelCase ( __magic_name__ = 1 / 1_2345 )-> int: """simple docstring""" snake_case_ : Any = 0 snake_case_ : int = 0 snake_case_ : Union[str, Any] = 3 while True: snake_case_ : Any = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(__magic_name__ ): snake_case_ : Optional[Any] = int(__magic_name__ ) total_partitions += 1 if check_partition_perfect(__magic_name__ ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(__magic_name__ ) integer += 1 if __name__ == "__main__": print(f'''{solution() = }''')
653
1
'''simple docstring''' def __UpperCAmelCase ( )-> str: """simple docstring""" snake_case_ : str = [] snake_case_ : List[Any] = 1 while len(__magic_name__ ) < 1E6: constant.append(str(__magic_name__ ) ) i += 1 snake_case_ : str = "".join(__magic_name__ ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[999] ) * int(constant[9999] ) * int(constant[9_9999] ) * int(constant[99_9999] ) ) if __name__ == "__main__": print(solution())
653
'''simple docstring''' import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() __lowerCamelCase : int = logging.get_logger() @dataclass class A_ : """simple docstring""" a__ = 42 a__ = field(default_factory=a_ ) a__ = field(default_factory=a_ ) def _A ( self :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tensor , lowerCAmelCase__ :Tensor ) -> int: '''simple docstring''' snake_case_ : int = len(list(m.modules() ) ) == 1 or isinstance(lowerCAmelCase__ , nn.Convad ) or isinstance(lowerCAmelCase__ , nn.BatchNormad ) if has_not_submodules: self.traced.append(lowerCAmelCase__ ) def __call__( self :List[Any] , lowerCAmelCase__ :Tensor ) -> Union[str, Any]: '''simple docstring''' for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(lowerCAmelCase__ ) [x.remove() for x in self.handles] return self @property def _A ( self :int ) -> List[Any]: '''simple docstring''' return list(filter(lambda lowerCAmelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class A_ : """simple docstring""" a__ = 42 a__ = 42 a__ = 0 a__ = field(default_factory=a_ ) a__ = field(default_factory=a_ ) def __call__( self :Tuple , lowerCAmelCase__ :Tensor ) -> Tuple: '''simple docstring''' snake_case_ : List[Any] = Tracker(self.dest )(lowerCAmelCase__ ).parametrized snake_case_ : Tuple = Tracker(self.src )(lowerCAmelCase__ ).parametrized snake_case_ : List[str] = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.src_skip , lowerCAmelCase__ ) ) snake_case_ : Tuple = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.dest_skip , lowerCAmelCase__ ) ) if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ): raise Exception( F'''Numbers of operations are different. Source module has {len(lowerCAmelCase__ )} operations while''' F''' destination module has {len(lowerCAmelCase__ )}.''' ) for dest_m, src_m in zip(lowerCAmelCase__ , lowerCAmelCase__ ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F'''Transfered from={src_m} to={dest_m}''' ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ = True )-> Optional[int]: """simple docstring""" print(F'''Converting {name}...''' ) with torch.no_grad(): snake_case_ : List[str] = timm.create_model(__magic_name__ ,pretrained=__magic_name__ ).eval() snake_case_ : Optional[int] = ResNetForImageClassification(__magic_name__ ).eval() snake_case_ : Dict = ModuleTransfer(src=__magic_name__ ,dest=__magic_name__ ) snake_case_ : Optional[int] = torch.randn((1, 3, 224, 224) ) module_transfer(__magic_name__ ) assert torch.allclose(from_model(__magic_name__ ) ,our_model(__magic_name__ ).logits ), "The model logits don't match the original one." snake_case_ : str = F'''resnet{'-'.join(name.split('resnet' ) )}''' print(__magic_name__ ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add model" ,use_temp_dir=__magic_name__ ,) # we can use the convnext one snake_case_ : Optional[Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add image processor" ,use_temp_dir=__magic_name__ ,) print(F'''Pushed {checkpoint_name}''' ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = None ,__magic_name__ = True )-> Tuple: """simple docstring""" snake_case_ : List[str] = "imagenet-1k-id2label.json" snake_case_ : Optional[Any] = 1000 snake_case_ : List[Any] = (1, num_labels) snake_case_ : Optional[Any] = "huggingface/label-files" snake_case_ : Dict = num_labels snake_case_ : List[Any] = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) ) snake_case_ : List[str] = {int(__magic_name__ ): v for k, v in idalabel.items()} snake_case_ : Any = idalabel snake_case_ : List[Any] = {v: k for k, v in idalabel.items()} snake_case_ : Optional[int] = partial(__magic_name__ ,num_labels=__magic_name__ ,idalabel=__magic_name__ ,labelaid=__magic_name__ ) snake_case_ : Optional[int] = { "resnet18": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ), "resnet26": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ), "resnet34": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ), "resnet50": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ), "resnet101": ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ), "resnet152": ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ), } if model_name: convert_weight_and_push(__magic_name__ ,names_to_config[model_name] ,__magic_name__ ,__magic_name__ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ) return config, expected_shape if __name__ == "__main__": __lowerCamelCase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default=None, type=str, help=( '''The name of the model you wish to convert, it must be one of the supported resnet* architecture,''' ''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=Path, required=True, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=True, type=bool, required=False, help='''If True, push model and image processor to the hub.''', ) __lowerCamelCase : Tuple = parser.parse_args() __lowerCamelCase : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
653
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase : Dict = logging.get_logger(__name__) __lowerCamelCase : str = { '''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''', '''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''', '''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''', '''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''', '''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''', '''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''', } class A_ (a_ ): """simple docstring""" a__ = '''rwkv''' a__ = {'''max_position_embeddings''': '''context_length'''} def __init__( self :int , lowerCAmelCase__ :Any=50_277 , lowerCAmelCase__ :str=1_024 , lowerCAmelCase__ :Tuple=4_096 , lowerCAmelCase__ :Tuple=32 , lowerCAmelCase__ :List[str]=None , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :Optional[Any]=1E-5 , lowerCAmelCase__ :Any=0 , lowerCAmelCase__ :Dict=0 , lowerCAmelCase__ :List[str]=6 , lowerCAmelCase__ :List[Any]=False , lowerCAmelCase__ :Union[str, Any]=True , **lowerCAmelCase__ :Tuple , ) -> Tuple: '''simple docstring''' snake_case_ : Union[str, Any] = vocab_size snake_case_ : Any = context_length snake_case_ : List[Any] = hidden_size snake_case_ : Any = num_hidden_layers snake_case_ : Tuple = attention_hidden_size if attention_hidden_size is not None else hidden_size snake_case_ : Dict = intermediate_size if intermediate_size is not None else 4 * hidden_size snake_case_ : int = layer_norm_epsilon snake_case_ : Tuple = rescale_every snake_case_ : List[Any] = use_cache snake_case_ : str = bos_token_id snake_case_ : List[Any] = eos_token_id super().__init__( tie_word_embeddings=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
653
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase : List[Any] = logging.get_logger(__name__) __lowerCamelCase : Dict = { '''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''', } class A_ (a_ ): """simple docstring""" a__ = '''roc_bert''' def __init__( self :Dict , lowerCAmelCase__ :Optional[Any]=30_522 , lowerCAmelCase__ :Dict=768 , lowerCAmelCase__ :str=12 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :Optional[Any]=3_072 , lowerCAmelCase__ :Any="gelu" , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :List[str]=512 , lowerCAmelCase__ :int=2 , lowerCAmelCase__ :Optional[int]=0.0_2 , lowerCAmelCase__ :Tuple=1E-1_2 , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :List[str]=0 , lowerCAmelCase__ :Optional[Any]="absolute" , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :List[str]=768 , lowerCAmelCase__ :Optional[Any]=910 , lowerCAmelCase__ :str=512 , lowerCAmelCase__ :int=24_858 , lowerCAmelCase__ :List[Any]=True , **lowerCAmelCase__ :int , ) -> List[str]: '''simple docstring''' snake_case_ : int = vocab_size snake_case_ : Dict = max_position_embeddings snake_case_ : int = hidden_size snake_case_ : str = num_hidden_layers snake_case_ : Union[str, Any] = num_attention_heads snake_case_ : int = intermediate_size snake_case_ : Optional[Any] = hidden_act snake_case_ : Optional[int] = hidden_dropout_prob snake_case_ : List[Any] = attention_probs_dropout_prob snake_case_ : Dict = initializer_range snake_case_ : str = type_vocab_size snake_case_ : Tuple = layer_norm_eps snake_case_ : Optional[Any] = use_cache snake_case_ : Optional[Any] = enable_pronunciation snake_case_ : List[Any] = enable_shape snake_case_ : Optional[int] = pronunciation_embed_dim snake_case_ : Dict = pronunciation_vocab_size snake_case_ : int = shape_embed_dim snake_case_ : Any = shape_vocab_size snake_case_ : Optional[int] = concat_input snake_case_ : List[Any] = position_embedding_type snake_case_ : Any = classifier_dropout super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
653
1
'''simple docstring''' import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class A_ (unittest.TestCase ): """simple docstring""" def _A ( self :Dict ) -> int: '''simple docstring''' snake_case_ : Union[str, Any] = inspect.getfile(accelerate.test_utils ) snake_case_ : Optional[Any] = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] ) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 snake_case_ : int = test_metrics @require_cpu def _A ( self :Optional[Any] ) -> Any: '''simple docstring''' debug_launcher(self.test_metrics.main , num_processes=1 ) @require_cpu def _A ( self :str ) -> Optional[int]: '''simple docstring''' debug_launcher(self.test_metrics.main ) @require_single_gpu def _A ( self :Union[str, Any] ) -> Tuple: '''simple docstring''' self.test_metrics.main() @require_multi_gpu def _A ( self :Union[str, Any] ) -> str: '''simple docstring''' print(F'''Found {torch.cuda.device_count()} devices.''' ) snake_case_ : str = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() )
653
'''simple docstring''' def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int: """simple docstring""" def update_area_of_max_square(__magic_name__ ,__magic_name__ ) -> int: # BASE CASE if row >= rows or col >= cols: return 0 snake_case_ : str = update_area_of_max_square(__magic_name__ ,col + 1 ) snake_case_ : Dict = update_area_of_max_square(row + 1 ,col + 1 ) snake_case_ : int = update_area_of_max_square(row + 1 ,__magic_name__ ) if mat[row][col]: snake_case_ : str = 1 + min([right, diagonal, down] ) snake_case_ : Tuple = max(largest_square_area[0] ,__magic_name__ ) return sub_problem_sol else: return 0 snake_case_ : Union[str, Any] = [0] update_area_of_max_square(0 ,0 ) return largest_square_area[0] def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int: """simple docstring""" def update_area_of_max_square_using_dp_array( __magic_name__ ,__magic_name__ ,__magic_name__ ) -> int: if row >= rows or col >= cols: return 0 if dp_array[row][col] != -1: return dp_array[row][col] snake_case_ : Dict = update_area_of_max_square_using_dp_array(__magic_name__ ,col + 1 ,__magic_name__ ) snake_case_ : List[Any] = update_area_of_max_square_using_dp_array(row + 1 ,col + 1 ,__magic_name__ ) snake_case_ : Any = update_area_of_max_square_using_dp_array(row + 1 ,__magic_name__ ,__magic_name__ ) if mat[row][col]: snake_case_ : int = 1 + min([right, diagonal, down] ) snake_case_ : Tuple = max(largest_square_area[0] ,__magic_name__ ) snake_case_ : Optional[Any] = sub_problem_sol return sub_problem_sol else: return 0 snake_case_ : List[Any] = [0] snake_case_ : Optional[int] = [[-1] * cols for _ in range(__magic_name__ )] update_area_of_max_square_using_dp_array(0 ,0 ,__magic_name__ ) return largest_square_area[0] def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int: """simple docstring""" snake_case_ : Dict = [[0] * (cols + 1) for _ in range(rows + 1 )] snake_case_ : Dict = 0 for row in range(rows - 1 ,-1 ,-1 ): for col in range(cols - 1 ,-1 ,-1 ): snake_case_ : List[str] = dp_array[row][col + 1] snake_case_ : Any = dp_array[row + 1][col + 1] snake_case_ : Any = dp_array[row + 1][col] if mat[row][col] == 1: snake_case_ : Any = 1 + min(__magic_name__ ,__magic_name__ ,__magic_name__ ) snake_case_ : str = max(dp_array[row][col] ,__magic_name__ ) else: snake_case_ : Optional[Any] = 0 return largest_square_area def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int: """simple docstring""" snake_case_ : str = [0] * (cols + 1) snake_case_ : Tuple = [0] * (cols + 1) snake_case_ : List[str] = 0 for row in range(rows - 1 ,-1 ,-1 ): for col in range(cols - 1 ,-1 ,-1 ): snake_case_ : Optional[Any] = current_row[col + 1] snake_case_ : Optional[int] = next_row[col + 1] snake_case_ : Dict = next_row[col] if mat[row][col] == 1: snake_case_ : Union[str, Any] = 1 + min(__magic_name__ ,__magic_name__ ,__magic_name__ ) snake_case_ : Any = max(current_row[col] ,__magic_name__ ) else: snake_case_ : Dict = 0 snake_case_ : Optional[Any] = current_row return largest_square_area if __name__ == "__main__": import doctest doctest.testmod() print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
653
1
'''simple docstring''' from string import ascii_uppercase __lowerCamelCase : Optional[Any] = {char: i for i, char in enumerate(ascii_uppercase)} __lowerCamelCase : List[str] = dict(enumerate(ascii_uppercase)) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str: """simple docstring""" snake_case_ : Tuple = len(__magic_name__ ) snake_case_ : str = 0 while True: if x == i: snake_case_ : List[str] = 0 if len(__magic_name__ ) == len(__magic_name__ ): break key += key[i] i += 1 return key def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str: """simple docstring""" snake_case_ : str = "" snake_case_ : List[Any] = 0 for letter in message: if letter == " ": cipher_text += " " else: snake_case_ : Optional[Any] = (dicta[letter] - dicta[key_new[i]]) % 26 i += 1 cipher_text += dicta[x] return cipher_text def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str: """simple docstring""" snake_case_ : Dict = "" snake_case_ : Dict = 0 for letter in cipher_text: if letter == " ": or_txt += " " else: snake_case_ : str = (dicta[letter] + dicta[key_new[i]] + 26) % 26 i += 1 or_txt += dicta[x] return or_txt def __UpperCAmelCase ( )-> None: """simple docstring""" snake_case_ : List[str] = "THE GERMAN ATTACK" snake_case_ : List[str] = "SECRET" snake_case_ : Optional[int] = generate_key(__magic_name__ ,__magic_name__ ) snake_case_ : Any = cipher_text(__magic_name__ ,__magic_name__ ) print(F'''Encrypted Text = {s}''' ) print(F'''Original Text = {original_text(__magic_name__ ,__magic_name__ )}''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
653
'''simple docstring''' import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def __UpperCAmelCase ( __magic_name__ ,__magic_name__=7 )-> Tuple: """simple docstring""" snake_case_ : List[str] = None if token is not None: snake_case_ : List[str] = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''} # The id of a workflow (not of a workflow run) snake_case_ : Dict = "636036" snake_case_ : List[str] = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs''' # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}''' snake_case_ : Optional[Any] = requests.get(__magic_name__ ,headers=__magic_name__ ).json() return result["workflow_runs"] def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]: """simple docstring""" snake_case_ : str = get_daily_ci_runs(__magic_name__ ) snake_case_ : Optional[int] = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": snake_case_ : Dict = workflow_run["id"] break return workflow_run_id def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[Any]: """simple docstring""" snake_case_ : Optional[Any] = get_last_daily_ci_runs(__magic_name__ ) if workflow_run_id is not None: snake_case_ : Union[str, Any] = get_artifacts_links(worflow_run_id=__magic_name__ ,token=__magic_name__ ) for artifact_name in artifact_names: if artifact_name in artifacts_links: snake_case_ : Union[str, Any] = artifacts_links[artifact_name] download_artifact( artifact_name=__magic_name__ ,artifact_url=__magic_name__ ,output_dir=__magic_name__ ,token=__magic_name__ ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[Any]: """simple docstring""" get_last_daily_ci_artifacts(__magic_name__ ,__magic_name__ ,__magic_name__ ) snake_case_ : Union[str, Any] = {} for artifact_name in artifact_names: snake_case_ : Any = os.path.join(__magic_name__ ,F'''{artifact_name}.zip''' ) if os.path.isfile(__magic_name__ ): snake_case_ : Tuple = {} with zipfile.ZipFile(__magic_name__ ) as z: for filename in z.namelist(): if not os.path.isdir(__magic_name__ ): # read the file with z.open(__magic_name__ ) as f: snake_case_ : Optional[Any] = f.read().decode("UTF-8" ) return results
653
1
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) __lowerCamelCase : Tuple = logging.getLogger(__name__) @dataclass class A_ : """simple docstring""" a__ = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) a__ = field( default=a_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) a__ = field( default=a_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) a__ = field( default=a_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) a__ = field(default=a_ , metadata={'''help''': '''Whether tp freeze the encoder.'''} ) a__ = field(default=a_ , metadata={'''help''': '''Whether to freeze the embeddings.'''} ) @dataclass class A_ : """simple docstring""" a__ = field( metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} ) a__ = field( default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , ) a__ = field( default=1024 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) a__ = field( default=128 , metadata={ '''help''': ( '''The maximum total sequence length for target text after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) a__ = field( default=142 , metadata={ '''help''': ( '''The maximum total sequence length for validation target text after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded. ''' '''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used ''' '''during ``evaluate`` and ``predict``.''' ) } , ) a__ = field( default=142 , metadata={ '''help''': ( '''The maximum total sequence length for test target text after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) a__ = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} ) a__ = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} ) a__ = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} ) a__ = field(default=a_ , metadata={'''help''': '''Source language id for translation.'''} ) a__ = field(default=a_ , metadata={'''help''': '''Target language id for translation.'''} ) a__ = field(default=a_ , metadata={'''help''': '''# num_beams to use for evaluation.'''} ) a__ = field( default=a_ , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Optional[Any]: """simple docstring""" logger.info(F'''***** {split} metrics *****''' ) for key in sorted(metrics.keys() ): logger.info(F''' {key} = {metrics[key]}''' ) save_json(__magic_name__ ,os.path.join(__magic_name__ ,F'''{split}_results.json''' ) ) def __UpperCAmelCase ( )-> Optional[Any]: """simple docstring""" snake_case_ : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. snake_case_, snake_case_, snake_case_ : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: snake_case_, snake_case_, snake_case_ : Optional[int] = parser.parse_args_into_dataclasses() check_output_dir(__magic_name__ ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) ,training_args.fpaa ,) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info("Training/evaluation parameters %s" ,__magic_name__ ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. snake_case_ : Union[str, Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,) snake_case_ : Any = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout") for p in extra_model_params: if getattr(__magic_name__ ,__magic_name__ ,__magic_name__ ): assert hasattr(__magic_name__ ,__magic_name__ ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute''' setattr(__magic_name__ ,__magic_name__ ,getattr(__magic_name__ ,__magic_name__ ) ) snake_case_ : int = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,) snake_case_ : Tuple = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path ,from_tf=".ckpt" in model_args.model_name_or_path ,config=__magic_name__ ,cache_dir=model_args.cache_dir ,) # use task specific params use_task_specific_params(__magic_name__ ,data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: snake_case_ : Any = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(__magic_name__ ,(MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(__magic_name__ ,__magic_name__ ): snake_case_ : Union[str, Any] = tokenizer.lang_code_to_id[data_args.tgt_lang] else: snake_case_ : str = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(__magic_name__ ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) snake_case_ : Optional[Any] = SeqaSeqDataset # Get datasets snake_case_ : List[Any] = ( dataset_class( __magic_name__ ,type_path="train" ,data_dir=data_args.data_dir ,n_obs=data_args.n_train ,max_target_length=data_args.max_target_length ,max_source_length=data_args.max_source_length ,prefix=model.config.prefix or "" ,) if training_args.do_train else None ) snake_case_ : List[Any] = ( dataset_class( __magic_name__ ,type_path="val" ,data_dir=data_args.data_dir ,n_obs=data_args.n_val ,max_target_length=data_args.val_max_target_length ,max_source_length=data_args.max_source_length ,prefix=model.config.prefix or "" ,) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) snake_case_ : List[str] = ( dataset_class( __magic_name__ ,type_path="test" ,data_dir=data_args.data_dir ,n_obs=data_args.n_test ,max_target_length=data_args.test_max_target_length ,max_source_length=data_args.max_source_length ,prefix=model.config.prefix or "" ,) if training_args.do_predict else None ) # Initialize our Trainer snake_case_ : Any = ( build_compute_metrics_fn(data_args.task ,__magic_name__ ) if training_args.predict_with_generate else None ) snake_case_ : List[str] = SeqaSeqTrainer( model=__magic_name__ ,args=__magic_name__ ,data_args=__magic_name__ ,train_dataset=__magic_name__ ,eval_dataset=__magic_name__ ,data_collator=SeqaSeqDataCollator( __magic_name__ ,__magic_name__ ,model.config.decoder_start_token_id ,training_args.tpu_num_cores ) ,compute_metrics=__magic_name__ ,tokenizer=__magic_name__ ,) snake_case_ : int = {} # Training if training_args.do_train: logger.info("*** Train ***" ) snake_case_ : List[str] = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) snake_case_ : Any = train_result.metrics snake_case_ : Tuple = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics("train" ,__magic_name__ ,training_args.output_dir ) all_metrics.update(__magic_name__ ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir ,"trainer_state.json" ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) snake_case_ : Any = trainer.evaluate(metric_key_prefix="val" ) snake_case_ : Dict = data_args.n_val snake_case_ : int = round(metrics["val_loss"] ,4 ) if trainer.is_world_process_zero(): handle_metrics("val" ,__magic_name__ ,training_args.output_dir ) all_metrics.update(__magic_name__ ) if training_args.do_predict: logger.info("*** Predict ***" ) snake_case_ : Optional[Any] = trainer.predict(test_dataset=__magic_name__ ,metric_key_prefix="test" ) snake_case_ : Optional[int] = test_output.metrics snake_case_ : Union[str, Any] = data_args.n_test if trainer.is_world_process_zero(): snake_case_ : Optional[Any] = round(metrics["test_loss"] ,4 ) handle_metrics("test" ,__magic_name__ ,training_args.output_dir ) all_metrics.update(__magic_name__ ) if training_args.predict_with_generate: snake_case_ : Dict = tokenizer.batch_decode( test_output.predictions ,skip_special_tokens=__magic_name__ ,clean_up_tokenization_spaces=__magic_name__ ) snake_case_ : Optional[int] = lmap(str.strip ,__magic_name__ ) write_txt_file(__magic_name__ ,os.path.join(training_args.output_dir ,"test_generations.txt" ) ) if trainer.is_world_process_zero(): save_json(__magic_name__ ,os.path.join(training_args.output_dir ,"all_results.json" ) ) return all_metrics def __UpperCAmelCase ( __magic_name__ )-> Optional[int]: """simple docstring""" main() if __name__ == "__main__": main()
653
'''simple docstring''' from string import ascii_uppercase __lowerCamelCase : Optional[Any] = {char: i for i, char in enumerate(ascii_uppercase)} __lowerCamelCase : List[str] = dict(enumerate(ascii_uppercase)) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str: """simple docstring""" snake_case_ : Tuple = len(__magic_name__ ) snake_case_ : str = 0 while True: if x == i: snake_case_ : List[str] = 0 if len(__magic_name__ ) == len(__magic_name__ ): break key += key[i] i += 1 return key def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str: """simple docstring""" snake_case_ : str = "" snake_case_ : List[Any] = 0 for letter in message: if letter == " ": cipher_text += " " else: snake_case_ : Optional[Any] = (dicta[letter] - dicta[key_new[i]]) % 26 i += 1 cipher_text += dicta[x] return cipher_text def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str: """simple docstring""" snake_case_ : Dict = "" snake_case_ : Dict = 0 for letter in cipher_text: if letter == " ": or_txt += " " else: snake_case_ : str = (dicta[letter] + dicta[key_new[i]] + 26) % 26 i += 1 or_txt += dicta[x] return or_txt def __UpperCAmelCase ( )-> None: """simple docstring""" snake_case_ : List[str] = "THE GERMAN ATTACK" snake_case_ : List[str] = "SECRET" snake_case_ : Optional[int] = generate_key(__magic_name__ ,__magic_name__ ) snake_case_ : Any = cipher_text(__magic_name__ ,__magic_name__ ) print(F'''Encrypted Text = {s}''' ) print(F'''Original Text = {original_text(__magic_name__ ,__magic_name__ )}''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
653
1
'''simple docstring''' import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __lowerCamelCase : List[Any] = get_tests_dir('''fixtures/spiece.model''') @require_sentencepiece @require_tokenizers class A_ (a_ , unittest.TestCase ): """simple docstring""" a__ = AlbertTokenizer a__ = AlbertTokenizerFast a__ = True a__ = True a__ = True def _A ( self :Optional[int] ) -> List[str]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing snake_case_ : str = AlbertTokenizer(lowerCAmelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def _A ( self :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Any: '''simple docstring''' snake_case_ : Any = "this is a test" snake_case_ : Dict = "this is a test" return input_text, output_text def _A ( self :Any ) -> Any: '''simple docstring''' snake_case_ : int = "<pad>" snake_case_ : int = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ ) def _A ( self :Optional[int] ) -> str: '''simple docstring''' snake_case_ : Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<pad>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "▁eloquent" ) self.assertEqual(len(lowerCAmelCase__ ) , 30_000 ) def _A ( self :Union[str, Any] ) -> List[str]: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 30_000 ) def _A ( self :List[Any] ) -> Union[str, Any]: '''simple docstring''' if not self.test_rust_tokenizer: return snake_case_ : Optional[int] = self.get_tokenizer() snake_case_ : Union[str, Any] = self.get_rust_tokenizer() snake_case_ : Dict = "I was born in 92000, and this is falsé." snake_case_ : List[str] = tokenizer.tokenize(lowerCAmelCase__ ) snake_case_ : Optional[int] = rust_tokenizer.tokenize(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) snake_case_ : int = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) snake_case_ : Optional[int] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) snake_case_ : int = self.get_rust_tokenizer() snake_case_ : int = tokenizer.encode(lowerCAmelCase__ ) snake_case_ : Optional[int] = rust_tokenizer.encode(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def _A ( self :str ) -> Dict: '''simple docstring''' snake_case_ : List[Any] = AlbertTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ ) snake_case_ : str = tokenizer.tokenize("This is a test" ) self.assertListEqual(lowerCAmelCase__ , ["▁this", "▁is", "▁a", "▁test"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [48, 25, 21, 1_289] ) snake_case_ : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowerCAmelCase__ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] ) snake_case_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] ) snake_case_ : Dict = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ ) self.assertListEqual( lowerCAmelCase__ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , ) def _A ( self :Tuple ) -> Optional[int]: '''simple docstring''' snake_case_ : Optional[int] = AlbertTokenizer(lowerCAmelCase__ ) snake_case_ : Optional[Any] = tokenizer.encode("sequence builders" ) snake_case_ : Dict = tokenizer.encode("multi-sequence build" ) snake_case_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ ) snake_case_ : Dict = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def _A ( self :List[Any] ) -> Tuple: '''simple docstring''' snake_case_ : Tuple = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase__ , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
653
'''simple docstring''' import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Dict: """simple docstring""" snake_case_ : Tuple = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") snake_case_ : Union[str, Any] = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(__magic_name__ ): os.makedirs(__magic_name__ ) snake_case_ : str = model.state_dict() def to_tf_var_name(__magic_name__ ): for patt, repl in iter(__magic_name__ ): snake_case_ : List[str] = name.replace(__magic_name__ ,__magic_name__ ) return F'''bert/{name}''' def create_tf_var(__magic_name__ ,__magic_name__ ,__magic_name__ ): snake_case_ : List[Any] = tf.dtypes.as_dtype(tensor.dtype ) snake_case_ : Union[str, Any] = tf.get_variable(dtype=__magic_name__ ,shape=tensor.shape ,name=__magic_name__ ,initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(__magic_name__ ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: snake_case_ : Optional[int] = to_tf_var_name(__magic_name__ ) snake_case_ : Dict = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): snake_case_ : List[Any] = torch_tensor.T snake_case_ : Union[str, Any] = create_tf_var(tensor=__magic_name__ ,name=__magic_name__ ,session=__magic_name__ ) tf.keras.backend.set_value(__magic_name__ ,__magic_name__ ) snake_case_ : List[str] = session.run(__magic_name__ ) print(F'''Successfully created {tf_name}: {np.allclose(__magic_name__ ,__magic_name__ )}''' ) snake_case_ : Any = tf.train.Saver(tf.trainable_variables() ) saver.save(__magic_name__ ,os.path.join(__magic_name__ ,model_name.replace("-" ,"_" ) + ".ckpt" ) ) def __UpperCAmelCase ( __magic_name__=None )-> Optional[Any]: """simple docstring""" snake_case_ : Any = argparse.ArgumentParser() parser.add_argument("--model_name" ,type=__magic_name__ ,required=__magic_name__ ,help="model name e.g. bert-base-uncased" ) parser.add_argument( "--cache_dir" ,type=__magic_name__ ,default=__magic_name__ ,required=__magic_name__ ,help="Directory containing pytorch model" ) parser.add_argument("--pytorch_model_path" ,type=__magic_name__ ,required=__magic_name__ ,help="/path/to/<pytorch-model-name>.bin" ) parser.add_argument("--tf_cache_dir" ,type=__magic_name__ ,required=__magic_name__ ,help="Directory in which to save tensorflow model" ) snake_case_ : Optional[int] = parser.parse_args(__magic_name__ ) snake_case_ : Optional[int] = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name ,state_dict=torch.load(args.pytorch_model_path ) ,cache_dir=args.cache_dir ,) convert_pytorch_checkpoint_to_tf(model=__magic_name__ ,ckpt_dir=args.tf_cache_dir ,model_name=args.model_name ) if __name__ == "__main__": main()
653
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor __lowerCamelCase : Optional[Any] = logging.get_logger(__name__) class A_ (a_ ): """simple docstring""" def __init__( self :Optional[int] , *lowerCAmelCase__ :int , **lowerCAmelCase__ :Dict ) -> None: '''simple docstring''' warnings.warn( "The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use CLIPImageProcessor instead." , lowerCAmelCase__ , ) super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
653
'''simple docstring''' from collections import deque from .hash_table import HashTable class A_ (a_ ): """simple docstring""" def __init__( self :List[str] , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Dict ) -> Union[str, Any]: '''simple docstring''' super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ ) def _A ( self :Optional[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Optional[int] = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(lowerCAmelCase__ ) snake_case_ : Tuple = self.values[key] def _A ( self :int ) -> Dict: '''simple docstring''' return ( sum(self.charge_factor - len(lowerCAmelCase__ ) for slot in self.values ) / self.size_table * self.charge_factor ) def _A ( self :str , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple=None ) -> Any: '''simple docstring''' if not ( len(self.values[key] ) == self.charge_factor and self.values.count(lowerCAmelCase__ ) == 0 ): return key return super()._collision_resolution(lowerCAmelCase__ , lowerCAmelCase__ )
653
1
'''simple docstring''' from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm __lowerCamelCase : Tuple = logging.get_logger(__name__) @dataclass class A_ (a_ ): """simple docstring""" a__ = [ '''no_inference''', '''no_cuda''', '''no_tpu''', '''no_speed''', '''no_memory''', '''no_env_print''', '''no_multi_process''', ] def __init__( self :int , **lowerCAmelCase__ :List[Any] ) -> List[str]: '''simple docstring''' for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: snake_case_ : Union[str, Any] = deprecated_arg[3:] setattr(self , lowerCAmelCase__ , not kwargs.pop(lowerCAmelCase__ ) ) logger.warning( F'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or''' F''' {positive_arg}={kwargs[positive_arg]}''' ) snake_case_ : Dict = kwargs.pop("torchscript" , self.torchscript ) snake_case_ : Optional[int] = kwargs.pop("torch_xla_tpu_print_metrics" , self.torch_xla_tpu_print_metrics ) snake_case_ : Any = kwargs.pop("fp16_opt_level" , self.fpaa_opt_level ) super().__init__(**lowerCAmelCase__ ) a__ = field(default=a_ , metadata={'''help''': '''Trace the models using torchscript'''} ) a__ = field(default=a_ , metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''} ) a__ = field( default='''O1''' , metadata={ '''help''': ( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. ''' '''See details at https://nvidia.github.io/apex/amp.html''' ) } , ) @cached_property def _A ( self :Optional[int] ) -> Tuple["torch.device", int]: '''simple docstring''' requires_backends(self , ["torch"] ) logger.info("PyTorch: setting up devices" ) if not self.cuda: snake_case_ : str = torch.device("cpu" ) snake_case_ : Any = 0 elif is_torch_tpu_available(): snake_case_ : str = xm.xla_device() snake_case_ : Tuple = 0 else: snake_case_ : Any = torch.device("cuda" if torch.cuda.is_available() else "cpu" ) snake_case_ : Any = torch.cuda.device_count() return device, n_gpu @property def _A ( self :Any ) -> Dict: '''simple docstring''' return is_torch_tpu_available() and self.tpu @property def _A ( self :str ) -> int: '''simple docstring''' requires_backends(self , ["torch"] ) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def _A ( self :Optional[int] ) -> "torch.device": '''simple docstring''' requires_backends(self , ["torch"] ) return self._setup_devices[0] @property def _A ( self :Optional[Any] ) -> Optional[int]: '''simple docstring''' requires_backends(self , ["torch"] ) return self._setup_devices[1] @property def _A ( self :List[Any] ) -> Tuple: '''simple docstring''' return self.n_gpu > 0
653
'''simple docstring''' from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar __lowerCamelCase : Dict = TypeVar('''KEY''') __lowerCamelCase : int = TypeVar('''VAL''') @dataclass(frozen=a_ , slots=a_ ) class A_ (Generic[KEY, VAL] ): """simple docstring""" a__ = 42 a__ = 42 class A_ (_Item ): """simple docstring""" def __init__( self :List[Any] ) -> None: '''simple docstring''' super().__init__(lowerCAmelCase__ , lowerCAmelCase__ ) def __bool__( self :Optional[int] ) -> bool: '''simple docstring''' return False __lowerCamelCase : Dict = _DeletedItem() class A_ (MutableMapping[KEY, VAL] ): """simple docstring""" def __init__( self :Dict , lowerCAmelCase__ :int = 8 , lowerCAmelCase__ :float = 0.7_5 ) -> None: '''simple docstring''' snake_case_ : Any = initial_block_size snake_case_ : list[_Item | None] = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 snake_case_ : Tuple = capacity_factor snake_case_ : List[Any] = 0 def _A ( self :Tuple , lowerCAmelCase__ :KEY ) -> int: '''simple docstring''' return hash(lowerCAmelCase__ ) % len(self._buckets ) def _A ( self :Any , lowerCAmelCase__ :int ) -> int: '''simple docstring''' return (ind + 1) % len(self._buckets ) def _A ( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> bool: '''simple docstring''' snake_case_ : Optional[int] = self._buckets[ind] if not stored: snake_case_ : int = _Item(lowerCAmelCase__ , lowerCAmelCase__ ) self._len += 1 return True elif stored.key == key: snake_case_ : Optional[int] = _Item(lowerCAmelCase__ , lowerCAmelCase__ ) return True else: return False def _A ( self :int ) -> bool: '''simple docstring''' snake_case_ : Any = len(self._buckets ) * self._capacity_factor return len(self ) >= int(lowerCAmelCase__ ) def _A ( self :Any ) -> bool: '''simple docstring''' if len(self._buckets ) <= self._initial_block_size: return False snake_case_ : Optional[int] = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def _A ( self :Tuple , lowerCAmelCase__ :int ) -> None: '''simple docstring''' snake_case_ : Tuple = self._buckets snake_case_ : int = [None] * new_size snake_case_ : Any = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def _A ( self :Optional[int] ) -> None: '''simple docstring''' self._resize(len(self._buckets ) * 2 ) def _A ( self :str ) -> None: '''simple docstring''' self._resize(len(self._buckets ) // 2 ) def _A ( self :Optional[int] , lowerCAmelCase__ :KEY ) -> Iterator[int]: '''simple docstring''' snake_case_ : str = self._get_bucket_index(lowerCAmelCase__ ) for _ in range(len(self._buckets ) ): yield ind snake_case_ : List[Any] = self._get_next_ind(lowerCAmelCase__ ) def _A ( self :Union[str, Any] , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> None: '''simple docstring''' for ind in self._iterate_buckets(lowerCAmelCase__ ): if self._try_set(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): break def __setitem__( self :Optional[int] , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> None: '''simple docstring''' if self._is_full(): self._size_up() self._add_item(lowerCAmelCase__ , lowerCAmelCase__ ) def __delitem__( self :List[Any] , lowerCAmelCase__ :KEY ) -> None: '''simple docstring''' for ind in self._iterate_buckets(lowerCAmelCase__ ): snake_case_ : int = self._buckets[ind] if item is None: raise KeyError(lowerCAmelCase__ ) if item is _deleted: continue if item.key == key: snake_case_ : List[str] = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self :List[str] , lowerCAmelCase__ :KEY ) -> VAL: '''simple docstring''' for ind in self._iterate_buckets(lowerCAmelCase__ ): snake_case_ : Optional[Any] = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(lowerCAmelCase__ ) def __len__( self :Optional[Any] ) -> int: '''simple docstring''' return self._len def __iter__( self :List[Any] ) -> Iterator[KEY]: '''simple docstring''' yield from (item.key for item in self._buckets if item) def __repr__( self :Any ) -> str: '''simple docstring''' snake_case_ : Dict = " ,".join( F'''{item.key}: {item.val}''' for item in self._buckets if item ) return F'''HashMap({val_string})'''
653
1
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging __lowerCamelCase : Tuple = logging.get_logger(__name__) __lowerCamelCase : Dict = { '''Visual-Attention-Network/van-base''': ( '''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json''' ), } class A_ (a_ ): """simple docstring""" a__ = '''van''' def __init__( self :Optional[int] , lowerCAmelCase__ :str=224 , lowerCAmelCase__ :Any=3 , lowerCAmelCase__ :List[Any]=[7, 3, 3, 3] , lowerCAmelCase__ :Union[str, Any]=[4, 2, 2, 2] , lowerCAmelCase__ :Dict=[64, 128, 320, 512] , lowerCAmelCase__ :Optional[int]=[3, 3, 12, 3] , lowerCAmelCase__ :List[str]=[8, 8, 4, 4] , lowerCAmelCase__ :List[Any]="gelu" , lowerCAmelCase__ :Optional[Any]=0.0_2 , lowerCAmelCase__ :str=1E-6 , lowerCAmelCase__ :Optional[int]=1E-2 , lowerCAmelCase__ :Union[str, Any]=0.0 , lowerCAmelCase__ :Union[str, Any]=0.0 , **lowerCAmelCase__ :List[Any] , ) -> Union[str, Any]: '''simple docstring''' super().__init__(**lowerCAmelCase__ ) snake_case_ : Any = image_size snake_case_ : Optional[int] = num_channels snake_case_ : int = patch_sizes snake_case_ : Optional[Any] = strides snake_case_ : Optional[Any] = hidden_sizes snake_case_ : Optional[int] = depths snake_case_ : Union[str, Any] = mlp_ratios snake_case_ : Optional[Any] = hidden_act snake_case_ : Union[str, Any] = initializer_range snake_case_ : int = layer_norm_eps snake_case_ : str = layer_scale_init_value snake_case_ : List[Any] = drop_path_rate snake_case_ : List[Any] = dropout_rate
653
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase : str = logging.get_logger(__name__) __lowerCamelCase : Tuple = { '''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''', } class A_ (a_ ): """simple docstring""" a__ = '''gpt_bigcode''' a__ = ['''past_key_values'''] a__ = { '''hidden_size''': '''n_embd''', '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self :List[Any] , lowerCAmelCase__ :Any=50_257 , lowerCAmelCase__ :Dict=1_024 , lowerCAmelCase__ :Optional[int]=768 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :int=12 , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :List[str]="gelu_pytorch_tanh" , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Any=1E-5 , lowerCAmelCase__ :Union[str, Any]=0.0_2 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :int=50_256 , lowerCAmelCase__ :List[str]=50_256 , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :int=True , **lowerCAmelCase__ :Union[str, Any] , ) -> Any: '''simple docstring''' snake_case_ : List[Any] = vocab_size snake_case_ : Any = n_positions snake_case_ : Any = n_embd snake_case_ : Optional[Any] = n_layer snake_case_ : List[Any] = n_head snake_case_ : Tuple = n_inner snake_case_ : str = activation_function snake_case_ : Union[str, Any] = resid_pdrop snake_case_ : Optional[Any] = embd_pdrop snake_case_ : Any = attn_pdrop snake_case_ : List[Any] = layer_norm_epsilon snake_case_ : Tuple = initializer_range snake_case_ : int = scale_attn_weights snake_case_ : Union[str, Any] = use_cache snake_case_ : Dict = attention_softmax_in_fpaa snake_case_ : Any = scale_attention_softmax_in_fpaa snake_case_ : List[str] = multi_query snake_case_ : List[str] = bos_token_id snake_case_ : Any = eos_token_id super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
653
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class A_ (a_ , a_ , a_ , unittest.TestCase ): """simple docstring""" a__ = StableDiffusionInstructPixaPixPipeline a__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''} a__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS a__ = IMAGE_TO_IMAGE_IMAGE_PARAMS a__ = IMAGE_TO_IMAGE_IMAGE_PARAMS def _A ( self :Dict ) -> int: '''simple docstring''' torch.manual_seed(0 ) snake_case_ : Tuple = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) snake_case_ : Tuple = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ ) torch.manual_seed(0 ) snake_case_ : Any = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) snake_case_ : List[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) snake_case_ : Optional[Any] = CLIPTextModel(lowerCAmelCase__ ) snake_case_ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) snake_case_ : List[Any] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def _A ( self :Tuple , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Tuple=0 ) -> List[Any]: '''simple docstring''' snake_case_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ ) snake_case_ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0] snake_case_ : Optional[int] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("RGB" ) if str(lowerCAmelCase__ ).startswith("mps" ): snake_case_ : List[Any] = torch.manual_seed(lowerCAmelCase__ ) else: snake_case_ : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ ) snake_case_ : str = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "image_guidance_scale": 1, "output_type": "numpy", } return inputs def _A ( self :Any ) -> Tuple: '''simple docstring''' snake_case_ : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator snake_case_ : Optional[int] = self.get_dummy_components() snake_case_ : Optional[int] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ ) snake_case_ : Dict = sd_pipe.to(lowerCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) snake_case_ : Dict = self.get_dummy_inputs(lowerCAmelCase__ ) snake_case_ : Optional[int] = sd_pipe(**lowerCAmelCase__ ).images snake_case_ : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) snake_case_ : List[str] = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _A ( self :Optional[int] ) -> Dict: '''simple docstring''' snake_case_ : Any = "cpu" # ensure determinism for the device-dependent torch.Generator snake_case_ : List[Any] = self.get_dummy_components() snake_case_ : List[str] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ ) snake_case_ : List[Any] = sd_pipe.to(lowerCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) snake_case_ : Dict = self.get_dummy_inputs(lowerCAmelCase__ ) snake_case_ : List[Any] = "french fries" snake_case_ : Optional[Any] = sd_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ ) snake_case_ : Dict = output.images snake_case_ : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) snake_case_ : Optional[Any] = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _A ( self :Tuple ) -> List[Any]: '''simple docstring''' snake_case_ : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator snake_case_ : Optional[Any] = self.get_dummy_components() snake_case_ : Any = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ ) snake_case_ : str = sd_pipe.to(lowerCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) snake_case_ : Optional[Any] = self.get_dummy_inputs(lowerCAmelCase__ ) snake_case_ : Optional[Any] = [inputs["prompt"]] * 2 snake_case_ : Any = np.array(inputs["image"] ).astype(np.floataa ) / 2_5_5.0 snake_case_ : Optional[int] = torch.from_numpy(lowerCAmelCase__ ).unsqueeze(0 ).to(lowerCAmelCase__ ) snake_case_ : int = image / 2 + 0.5 snake_case_ : Optional[Any] = image.permute(0 , 3 , 1 , 2 ) snake_case_ : int = image.repeat(2 , 1 , 1 , 1 ) snake_case_ : List[str] = sd_pipe(**lowerCAmelCase__ ).images snake_case_ : Optional[Any] = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) snake_case_ : Tuple = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _A ( self :Any ) -> List[Any]: '''simple docstring''' snake_case_ : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator snake_case_ : Optional[int] = self.get_dummy_components() snake_case_ : Dict = EulerAncestralDiscreteScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" ) snake_case_ : Any = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ ) snake_case_ : Union[str, Any] = sd_pipe.to(lowerCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) snake_case_ : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ ) snake_case_ : List[str] = sd_pipe(**lowerCAmelCase__ ).images snake_case_ : str = image[0, -3:, -3:, -1] snake_case_ : Any = [round(lowerCAmelCase__ , 4 ) for x in image_slice.flatten().tolist()] print(",".join([str(lowerCAmelCase__ ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) snake_case_ : Any = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _A ( self :Tuple ) -> Dict: '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def _A ( self :List[Any] ) -> Any: '''simple docstring''' snake_case_ : str = self.get_dummy_components() snake_case_ : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ ) snake_case_ : Tuple = VaeImageProcessor(do_resize=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ ) snake_case_ : List[Any] = pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) snake_case_ : Union[str, Any] = pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type="pt" ) )[0] snake_case_ : Dict = components["vae"] snake_case_ : Any = self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type="pt" ) for image_param in self.image_latents_params: if image_param in inputs.keys(): snake_case_ : List[str] = vae.encode(inputs[image_param] ).latent_dist.mode() snake_case_ : Union[str, Any] = pipe(**lowerCAmelCase__ )[0] snake_case_ : Optional[Any] = np.abs(out - out_latents_inputs ).max() self.assertLess(lowerCAmelCase__ , 1E-4 , "passing latents as image input generate different result from passing image" ) @slow @require_torch_gpu class A_ (unittest.TestCase ): """simple docstring""" def _A ( self :int ) -> Optional[int]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _A ( self :Optional[Any] , lowerCAmelCase__ :List[str]=0 ) -> Optional[Any]: '''simple docstring''' snake_case_ : List[Any] = torch.manual_seed(lowerCAmelCase__ ) snake_case_ : Dict = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" ) snake_case_ : List[str] = { "prompt": "turn him into a cyborg", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "image_guidance_scale": 1.0, "output_type": "numpy", } return inputs def _A ( self :List[str] ) -> List[str]: '''simple docstring''' snake_case_ : int = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=lowerCAmelCase__ ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() snake_case_ : Optional[Any] = self.get_inputs() snake_case_ : Tuple = pipe(**lowerCAmelCase__ ).images snake_case_ : int = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) snake_case_ : Union[str, Any] = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def _A ( self :Optional[Any] ) -> Tuple: '''simple docstring''' snake_case_ : str = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=lowerCAmelCase__ ) snake_case_ : Any = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() snake_case_ : Any = self.get_inputs() snake_case_ : Optional[Any] = pipe(**lowerCAmelCase__ ).images snake_case_ : str = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) snake_case_ : Dict = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def _A ( self :Dict ) -> Optional[Any]: '''simple docstring''' snake_case_ : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=lowerCAmelCase__ ) snake_case_ : Dict = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() snake_case_ : Any = self.get_inputs() snake_case_ : int = pipe(**lowerCAmelCase__ ).images snake_case_ : Optional[Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) snake_case_ : List[str] = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def _A ( self :Optional[int] ) -> Dict: '''simple docstring''' snake_case_ : Optional[Any] = 0 def callback_fn(lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor ) -> None: snake_case_ : List[str] = True nonlocal number_of_steps number_of_steps += 1 if step == 1: snake_case_ : Optional[Any] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) snake_case_ : Optional[int] = latents[0, -3:, -3:, -1] snake_case_ : List[str] = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 elif step == 2: snake_case_ : Tuple = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) snake_case_ : List[str] = latents[0, -3:, -3:, -1] snake_case_ : Dict = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 snake_case_ : List[Any] = False snake_case_ : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa ) snake_case_ : Tuple = pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() snake_case_ : Tuple = self.get_inputs() pipe(**lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def _A ( self :Union[str, Any] ) -> Any: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() snake_case_ : int = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa ) snake_case_ : Dict = pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() snake_case_ : List[str] = self.get_inputs() snake_case_ : Optional[int] = pipe(**lowerCAmelCase__ ) snake_case_ : str = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def _A ( self :List[str] ) -> List[str]: '''simple docstring''' snake_case_ : Optional[Any] = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 snake_case_ : Any = inputs["image"].resize((504, 504) ) snake_case_ : str = "timbrooks/instruct-pix2pix" snake_case_ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained( lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() snake_case_ : List[str] = pipe(**lowerCAmelCase__ ) snake_case_ : str = output.images[0] snake_case_ : Union[str, Any] = image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) snake_case_ : Optional[Any] = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
653
'''simple docstring''' import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) __lowerCamelCase : Union[str, Any] = logging.getLogger(__name__) def __UpperCAmelCase ( __magic_name__ )-> str: """simple docstring""" snake_case_ : Dict = git.Repo(search_parent_directories=__magic_name__ ) snake_case_ : Optional[int] = { "repo_id": str(__magic_name__ ), "repo_sha": str(repo.head.object.hexsha ), "repo_branch": str(repo.active_branch ), } with open(os.path.join(__magic_name__ ,"git_log.json" ) ,"w" ) as f: json.dump(__magic_name__ ,__magic_name__ ,indent=4 ) def __UpperCAmelCase ( __magic_name__ )-> Tuple: """simple docstring""" if params.n_gpu <= 0: snake_case_ : Any = 0 snake_case_ : Any = -1 snake_case_ : Tuple = True snake_case_ : List[str] = False return assert torch.cuda.is_available() logger.info("Initializing GPUs" ) if params.n_gpu > 1: assert params.local_rank != -1 snake_case_ : Optional[int] = int(os.environ["WORLD_SIZE"] ) snake_case_ : int = int(os.environ["N_GPU_NODE"] ) snake_case_ : Any = int(os.environ["RANK"] ) # number of nodes / node ID snake_case_ : Dict = params.world_size // params.n_gpu_per_node snake_case_ : Optional[int] = params.global_rank // params.n_gpu_per_node snake_case_ : Tuple = True assert params.n_nodes == int(os.environ["N_NODES"] ) assert params.node_id == int(os.environ["NODE_RANK"] ) # local job (single GPU) else: assert params.local_rank == -1 snake_case_ : Optional[int] = 1 snake_case_ : str = 0 snake_case_ : List[Any] = 0 snake_case_ : int = 0 snake_case_ : Dict = 1 snake_case_ : Optional[Any] = 1 snake_case_ : str = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode snake_case_ : str = params.node_id == 0 and params.local_rank == 0 snake_case_ : str = params.n_nodes > 1 # summary snake_case_ : str = F'''--- Global rank: {params.global_rank} - ''' logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes ) logger.info(PREFIX + "Node ID : %i" % params.node_id ) logger.info(PREFIX + "Local rank : %i" % params.local_rank ) logger.info(PREFIX + "World size : %i" % params.world_size ) logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node ) logger.info(PREFIX + "Master : %s" % str(params.is_master ) ) logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) ) logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) ) logger.info(PREFIX + "Hostname : %s" % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info("Initializing PyTorch distributed" ) torch.distributed.init_process_group( init_method="env://" ,backend="nccl" ,) def __UpperCAmelCase ( __magic_name__ )-> Dict: """simple docstring""" np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
653
1
'''simple docstring''' def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> Dict: """simple docstring""" if height >= 1: move_tower(height - 1 ,__magic_name__ ,__magic_name__ ,__magic_name__ ) move_disk(__magic_name__ ,__magic_name__ ) move_tower(height - 1 ,__magic_name__ ,__magic_name__ ,__magic_name__ ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Optional[int]: """simple docstring""" print("moving disk from" ,__magic_name__ ,"to" ,__magic_name__ ) def __UpperCAmelCase ( )-> Tuple: """simple docstring""" snake_case_ : Optional[Any] = int(input("Height of hanoi: " ).strip() ) move_tower(__magic_name__ ,"A" ,"B" ,"C" ) if __name__ == "__main__": main()
653
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import YolosImageProcessor class A_ (unittest.TestCase ): """simple docstring""" def __init__( self :Any , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict=7 , lowerCAmelCase__ :Union[str, Any]=3 , lowerCAmelCase__ :List[str]=30 , lowerCAmelCase__ :List[str]=400 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :str=True , lowerCAmelCase__ :Optional[int]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :Optional[int]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :str=True , lowerCAmelCase__ :int=1 / 255 , lowerCAmelCase__ :int=True , ) -> str: '''simple docstring''' snake_case_ : List[Any] = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333} snake_case_ : Dict = parent snake_case_ : Union[str, Any] = batch_size snake_case_ : Optional[Any] = num_channels snake_case_ : str = min_resolution snake_case_ : Dict = max_resolution snake_case_ : Optional[Any] = do_resize snake_case_ : str = size snake_case_ : Optional[int] = do_normalize snake_case_ : Dict = image_mean snake_case_ : Optional[int] = image_std snake_case_ : List[str] = do_rescale snake_case_ : Dict = rescale_factor snake_case_ : str = do_pad def _A ( self :List[Any] ) -> Dict: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def _A ( self :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str]=False ) -> str: '''simple docstring''' if not batched: snake_case_ : List[str] = image_inputs[0] if isinstance(lowerCAmelCase__ , Image.Image ): snake_case_, snake_case_ : int = image.size else: snake_case_, snake_case_ : Any = image.shape[1], image.shape[2] if w < h: snake_case_ : int = int(self.size["shortest_edge"] * h / w ) snake_case_ : List[Any] = self.size["shortest_edge"] elif w > h: snake_case_ : Optional[int] = self.size["shortest_edge"] snake_case_ : str = int(self.size["shortest_edge"] * w / h ) else: snake_case_ : Tuple = self.size["shortest_edge"] snake_case_ : Dict = self.size["shortest_edge"] else: snake_case_ : List[str] = [] for image in image_inputs: snake_case_, snake_case_ : Any = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) snake_case_ : str = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0] snake_case_ : int = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class A_ (a_ , unittest.TestCase ): """simple docstring""" a__ = YolosImageProcessor if is_vision_available() else None def _A ( self :Optional[Any] ) -> str: '''simple docstring''' snake_case_ : int = YolosImageProcessingTester(self ) @property def _A ( self :List[str] ) -> Any: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _A ( self :List[str] ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase__ , "image_mean" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "image_std" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) ) def _A ( self :List[Any] ) -> Any: '''simple docstring''' snake_case_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} ) self.assertEqual(image_processor.do_pad , lowerCAmelCase__ ) snake_case_ : Optional[int] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase__ ) self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} ) self.assertEqual(image_processor.do_pad , lowerCAmelCase__ ) def _A ( self :List[str] ) -> int: '''simple docstring''' pass def _A ( self :Optional[Any] ) -> Optional[Any]: '''simple docstring''' snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image ) # Test not batched input snake_case_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case_, snake_case_ : int = self.image_processor_tester.get_expected_values(lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ ) snake_case_ : Any = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _A ( self :Dict ) -> Dict: '''simple docstring''' snake_case_ : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , np.ndarray ) # Test not batched input snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case_, snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case_ : Tuple = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values snake_case_, snake_case_ : Dict = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _A ( self :Tuple ) -> Tuple: '''simple docstring''' snake_case_ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , torch.Tensor ) # Test not batched input snake_case_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case_ : List[Any] = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _A ( self :Tuple ) -> Dict: '''simple docstring''' snake_case_ : str = self.image_processing_class(**self.image_processor_dict ) snake_case_ : List[Any] = self.image_processing_class(do_resize=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ , do_rescale=lowerCAmelCase__ ) # create random PyTorch tensors snake_case_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , torch.Tensor ) # Test whether the method "pad" and calling the image processor return the same tensors snake_case_ : Tuple = image_processing_a.pad(lowerCAmelCase__ , return_tensors="pt" ) snake_case_ : Union[str, Any] = image_processing_a(lowerCAmelCase__ , return_tensors="pt" ) self.assertTrue( torch.allclose(encoded_images_with_method["pixel_values"] , encoded_images["pixel_values"] , atol=1E-4 ) ) @slow def _A ( self :str ) -> Any: '''simple docstring''' snake_case_ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: snake_case_ : int = json.loads(f.read() ) snake_case_ : Optional[int] = {"image_id": 39_769, "annotations": target} # encode them snake_case_ : Tuple = YolosImageProcessor.from_pretrained("hustvl/yolos-small" ) snake_case_ : Dict = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors="pt" ) # verify pixel values snake_case_ : Optional[int] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase__ ) snake_case_ : str = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) ) # verify area snake_case_ : Dict = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase__ ) ) # verify boxes snake_case_ : Optional[int] = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase__ ) snake_case_ : Any = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase__ , atol=1E-3 ) ) # verify image_id snake_case_ : Dict = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase__ ) ) # verify is_crowd snake_case_ : int = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase__ ) ) # verify class_labels snake_case_ : List[str] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase__ ) ) # verify orig_size snake_case_ : Any = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase__ ) ) # verify size snake_case_ : List[Any] = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase__ ) ) @slow def _A ( self :Dict ) -> int: '''simple docstring''' snake_case_ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: snake_case_ : Optional[int] = json.loads(f.read() ) snake_case_ : Tuple = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target} snake_case_ : Any = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them snake_case_ : int = YolosImageProcessor(format="coco_panoptic" ) snake_case_ : Union[str, Any] = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors="pt" ) # verify pixel values snake_case_ : Optional[int] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase__ ) snake_case_ : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) ) # verify area snake_case_ : int = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase__ ) ) # verify boxes snake_case_ : Optional[int] = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase__ ) snake_case_ : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase__ , atol=1E-3 ) ) # verify image_id snake_case_ : List[str] = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase__ ) ) # verify is_crowd snake_case_ : Dict = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase__ ) ) # verify class_labels snake_case_ : str = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase__ ) ) # verify masks snake_case_ : Any = 822_873 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCAmelCase__ ) # verify orig_size snake_case_ : int = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase__ ) ) # verify size snake_case_ : Union[str, Any] = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase__ ) )
653
1
'''simple docstring''' import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __lowerCamelCase : Dict = logging.get_logger(__name__) __lowerCamelCase : Any = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } __lowerCamelCase : Union[str, Any] = { '''vocab_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json''' }, '''merges_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt''' }, '''tokenizer_config_file''': { '''facebook/blenderbot_small-90M''': ( '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json''' ) }, } __lowerCamelCase : Dict = {'''facebook/blenderbot_small-90M''': 512} def __UpperCAmelCase ( __magic_name__ )-> List[Any]: """simple docstring""" snake_case_ : Tuple = set() snake_case_ : int = word[0] for char in word[1:]: pairs.add((prev_char, char) ) snake_case_ : List[Any] = char snake_case_ : Optional[int] = set(__magic_name__ ) return pairs class A_ (a_ ): """simple docstring""" a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = ['''input_ids''', '''attention_mask'''] def __init__( self :Dict , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :int="__start__" , lowerCAmelCase__ :Tuple="__end__" , lowerCAmelCase__ :Optional[int]="__unk__" , lowerCAmelCase__ :Union[str, Any]="__null__" , **lowerCAmelCase__ :Any , ) -> str: '''simple docstring''' super().__init__(unk_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , **lowerCAmelCase__ ) with open(lowerCAmelCase__ , encoding="utf-8" ) as vocab_handle: snake_case_ : Tuple = json.load(lowerCAmelCase__ ) snake_case_ : Optional[Any] = {v: k for k, v in self.encoder.items()} with open(lowerCAmelCase__ , encoding="utf-8" ) as merges_handle: snake_case_ : List[Any] = merges_handle.read().split("\n" )[1:-1] snake_case_ : Dict = [tuple(merge.split() ) for merge in merges] snake_case_ : Union[str, Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) ) snake_case_ : str = {} @property def _A ( self :List[str] ) -> int: '''simple docstring''' return len(self.encoder ) def _A ( self :str ) -> Dict: '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def _A ( self :Dict , lowerCAmelCase__ :str ) -> str: '''simple docstring''' if token in self.cache: return self.cache[token] snake_case_ : List[Any] = re.sub("([.,!?()])" , r" \1" , lowerCAmelCase__ ) snake_case_ : List[Any] = re.sub("(')" , r" \1 " , lowerCAmelCase__ ) snake_case_ : int = re.sub(r"\s{2,}" , " " , lowerCAmelCase__ ) if "\n" in token: snake_case_ : Any = token.replace("\n" , " __newln__" ) snake_case_ : int = token.split(" " ) snake_case_ : Union[str, Any] = [] for token in tokens: if not len(lowerCAmelCase__ ): continue snake_case_ : List[Any] = token.lower() snake_case_ : List[Any] = tuple(lowerCAmelCase__ ) snake_case_ : Union[str, Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] ) snake_case_ : List[str] = get_pairs(lowerCAmelCase__ ) if not pairs: words.append(lowerCAmelCase__ ) continue while True: snake_case_ : Tuple = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ , float("inf" ) ) ) if bigram not in self.bpe_ranks: break snake_case_, snake_case_ : Union[str, Any] = bigram snake_case_ : Optional[int] = [] snake_case_ : List[str] = 0 while i < len(lowerCAmelCase__ ): try: snake_case_ : Dict = word.index(lowerCAmelCase__ , lowerCAmelCase__ ) new_word.extend(word[i:j] ) snake_case_ : Tuple = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 snake_case_ : Optional[Any] = tuple(lowerCAmelCase__ ) snake_case_ : int = new_word if len(lowerCAmelCase__ ) == 1: break else: snake_case_ : List[Any] = get_pairs(lowerCAmelCase__ ) snake_case_ : Dict = "@@ ".join(lowerCAmelCase__ ) snake_case_ : Any = word[:-4] snake_case_ : List[str] = word words.append(lowerCAmelCase__ ) return " ".join(lowerCAmelCase__ ) def _A ( self :Any , lowerCAmelCase__ :str ) -> List[str]: '''simple docstring''' snake_case_ : Optional[Any] = [] snake_case_ : str = re.findall(r"\S+\n?" , lowerCAmelCase__ ) for token in words: split_tokens.extend(list(self.bpe(lowerCAmelCase__ ).split(" " ) ) ) return split_tokens def _A ( self :Optional[int] , lowerCAmelCase__ :str ) -> int: '''simple docstring''' snake_case_ : Dict = token.lower() return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) ) def _A ( self :List[str] , lowerCAmelCase__ :int ) -> str: '''simple docstring''' return self.decoder.get(lowerCAmelCase__ , self.unk_token ) def _A ( self :Optional[int] , lowerCAmelCase__ :List[str] ) -> str: '''simple docstring''' snake_case_ : int = " ".join(lowerCAmelCase__ ).replace("@@ " , "" ).strip() return out_string def _A ( self :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(lowerCAmelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case_ : List[str] = os.path.join( lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) snake_case_ : Dict = os.path.join( lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + "\n" ) snake_case_ : Dict = 0 with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' " Please check that the tokenizer is not corrupted!" ) snake_case_ : Tuple = token_index writer.write(" ".join(lowerCAmelCase__ ) + "\n" ) index += 1 return vocab_file, merge_file
653
'''simple docstring''' def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str: """simple docstring""" if not isinstance(__magic_name__ ,__magic_name__ ): raise ValueError("iterations must be defined as integers" ) if not isinstance(__magic_name__ ,__magic_name__ ) or not number >= 1: raise ValueError( "starting number must be\n and integer and be more than 0" ) if not iterations >= 1: raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" ) snake_case_ : Dict = "" while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(__magic_name__ ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
653
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCamelCase : Union[str, Any] = { '''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''], '''processing_git''': ['''GitProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = [ '''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GitForCausalLM''', '''GitModel''', '''GitPreTrainedModel''', '''GitVisionModel''', ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys __lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
653
'''simple docstring''' import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __lowerCamelCase : Tuple = 16 __lowerCamelCase : Optional[int] = 32 def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = 16 )-> int: """simple docstring""" snake_case_ : Optional[int] = AutoTokenizer.from_pretrained("bert-base-cased" ) snake_case_ : str = load_dataset("glue" ,"mrpc" ) def tokenize_function(__magic_name__ ): # max_length=None => use the model max length (it's actually the default) snake_case_ : Dict = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=__magic_name__ ,max_length=__magic_name__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): snake_case_ : Any = datasets.map( __magic_name__ ,batched=__magic_name__ ,remove_columns=["idx", "sentence1", "sentence2"] ,) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library snake_case_ : List[Any] = tokenized_datasets.rename_column("label" ,"labels" ) def collate_fn(__magic_name__ ): # On TPU it's best to pad everything to the same length or training will be very slow. snake_case_ : int = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": snake_case_ : Tuple = 16 elif accelerator.mixed_precision != "no": snake_case_ : str = 8 else: snake_case_ : Optional[Any] = None return tokenizer.pad( __magic_name__ ,padding="longest" ,max_length=__magic_name__ ,pad_to_multiple_of=__magic_name__ ,return_tensors="pt" ,) # Instantiate dataloaders. snake_case_ : str = DataLoader( tokenized_datasets["train"] ,shuffle=__magic_name__ ,collate_fn=__magic_name__ ,batch_size=__magic_name__ ) snake_case_ : Optional[Any] = DataLoader( tokenized_datasets["validation"] ,shuffle=__magic_name__ ,collate_fn=__magic_name__ ,batch_size=__magic_name__ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders __lowerCamelCase : Optional[Any] = mocked_dataloaders # noqa: F811 def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Dict: """simple docstring""" if os.environ.get("TESTING_MOCKED_DATALOADERS" ,__magic_name__ ) == "1": snake_case_ : List[str] = 2 # Initialize accelerator snake_case_ : Union[str, Any] = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs snake_case_ : List[str] = config["lr"] snake_case_ : Dict = int(config["num_epochs"] ) snake_case_ : Dict = int(config["seed"] ) snake_case_ : Optional[int] = int(config["batch_size"] ) snake_case_ : Dict = evaluate.load("glue" ,"mrpc" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=__magic_name__ ) def inner_training_loop(__magic_name__ ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(__magic_name__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) snake_case_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" ,return_dict=__magic_name__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). snake_case_ : Optional[int] = model.to(accelerator.device ) # Instantiate optimizer snake_case_ : List[Any] = AdamW(params=model.parameters() ,lr=__magic_name__ ) snake_case_, snake_case_ : int = get_dataloaders(__magic_name__ ,__magic_name__ ) # Instantiate scheduler snake_case_ : Tuple = get_linear_schedule_with_warmup( optimizer=__magic_name__ ,num_warmup_steps=100 ,num_training_steps=(len(__magic_name__ ) * num_epochs) ,) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : Tuple = accelerator.prepare( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ) # Now we train the model for epoch in range(__magic_name__ ): model.train() for step, batch in enumerate(__magic_name__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) snake_case_ : int = model(**__magic_name__ ) snake_case_ : Any = outputs.loss accelerator.backward(__magic_name__ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__magic_name__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): snake_case_ : Union[str, Any] = model(**__magic_name__ ) snake_case_ : List[str] = outputs.logits.argmax(dim=-1 ) snake_case_, snake_case_ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=__magic_name__ ,references=__magic_name__ ,) snake_case_ : Tuple = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' ,__magic_name__ ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def __UpperCAmelCase ( )-> List[str]: """simple docstring""" snake_case_ : List[Any] = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" ,type=__magic_name__ ,default=__magic_name__ ,choices=["no", "fp16", "bf16", "fp8"] ,help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." ,) parser.add_argument("--cpu" ,action="store_true" ,help="If passed, will train on the CPU." ) snake_case_ : str = parser.parse_args() snake_case_ : Optional[int] = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(__magic_name__ ,__magic_name__ ) if __name__ == "__main__": main()
653
1
'''simple docstring''' import logging import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import librosa import torch from datasets import DatasetDict, load_dataset from packaging import version from torch import nn from transformers import ( HfArgumentParser, Trainer, TrainingArguments, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForPreTraining, is_apex_available, trainer_utils, ) from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''): __lowerCamelCase : Any = True from torch.cuda.amp import autocast __lowerCamelCase : Dict = logging.getLogger(__name__) @dataclass class A_ : """simple docstring""" a__ = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) a__ = field( default=a_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) a__ = field( default=a_ , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} ) a__ = field( default=a_ , metadata={'''help''': '''Whether to log verbose messages or not.'''} , ) a__ = field( default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} ) a__ = field( default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} ) a__ = field( default=0.999995 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> List[Any]: """simple docstring""" logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,handlers=[logging.StreamHandler(sys.stdout )] ,) snake_case_ : Union[str, Any] = logging.WARNING if model_args.verbose_logging: snake_case_ : Dict = logging.DEBUG elif trainer_utils.is_main_process(training_args.local_rank ): snake_case_ : str = logging.INFO logger.setLevel(__magic_name__ ) @dataclass class A_ : """simple docstring""" a__ = field( default=a_ , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} ) a__ = field( default=a_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) a__ = field( default='''train''' , metadata={ '''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\'''' } , ) a__ = field( default='''validation''' , metadata={ '''help''': ( '''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\'''' ) } , ) a__ = field( default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , ) a__ = field( default=a_ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} ) a__ = field( default=1 , metadata={ '''help''': '''The percentage of the train set used as validation set in case there\'s no validation split''' } , ) a__ = field( default=a_ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , ) a__ = field( default=20.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} ) @dataclass class A_ : """simple docstring""" a__ = 42 a__ = 42 a__ = "longest" a__ = None a__ = None def __call__( self :Tuple , lowerCAmelCase__ :List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]: '''simple docstring''' snake_case_ : Union[str, Any] = self.feature_extractor.pad( lowerCAmelCase__ , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , ) snake_case_ : List[str] = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] ) snake_case_ : List[str] = batch["input_values"].shape[0] # make sure that no loss is computed on padded inputs if batch["attention_mask"] is not None: # compute real output lengths according to convolution formula snake_case_ : Any = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to( torch.long ) snake_case_ : int = torch.zeros( (batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["input_values"].device ) # these two operations makes sure that all values # before the output lengths indices are attended to snake_case_ : Union[str, Any] = 1 snake_case_ : List[str] = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool() # sample randomly masked indices snake_case_ : str = _compute_mask_indices( (batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=lowerCAmelCase__ , min_masks=2 , ) return batch class A_ (a_ ): """simple docstring""" def __init__( self :str , *lowerCAmelCase__ :str , lowerCAmelCase__ :Tuple=1 , lowerCAmelCase__ :int=0 , lowerCAmelCase__ :List[Any]=1.0 , **lowerCAmelCase__ :Union[str, Any] ) -> List[str]: '''simple docstring''' super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ ) snake_case_ : int = 0 snake_case_ : Any = max_gumbel_temp snake_case_ : Tuple = min_gumbel_temp snake_case_ : Tuple = gumbel_temp_decay def _A ( self :Optional[int] , lowerCAmelCase__ :nn.Module , lowerCAmelCase__ :Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor: '''simple docstring''' model.train() snake_case_ : int = self._prepare_inputs(lowerCAmelCase__ ) if self.use_amp: with autocast(): snake_case_ : Dict = self.compute_loss(lowerCAmelCase__ , lowerCAmelCase__ ) else: snake_case_ : Dict = self.compute_loss(lowerCAmelCase__ , lowerCAmelCase__ ) if self.args.n_gpu > 1 or self.deepspeed: if model.module.config.ctc_loss_reduction == "mean": snake_case_ : Union[str, Any] = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": snake_case_ : Union[str, Any] = loss.sum() / (inputs["mask_time_indices"]).sum() else: raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' ) if self.args.gradient_accumulation_steps > 1: snake_case_ : str = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(lowerCAmelCase__ ).backward() elif self.use_apex: with amp.scale_loss(lowerCAmelCase__ , self.optimizer ) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(lowerCAmelCase__ ) else: loss.backward() self.num_update_step += 1 # make sure gumbel softmax temperature is decayed if self.args.n_gpu > 1 or self.deepspeed: model.module.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) ) else: model.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) ) return loss.detach() def __UpperCAmelCase ( )-> Tuple: """simple docstring""" snake_case_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) snake_case_, snake_case_, snake_case_ : Any = parser.parse_args_into_dataclasses() configure_logger(__magic_name__ ,__magic_name__ ) # Downloading and loading a dataset from the hub. snake_case_ : Tuple = load_dataset(data_args.dataset_name ,data_args.dataset_config_name ,cache_dir=model_args.cache_dir ) if "validation" not in datasets.keys(): # make sure only "validation" and "train" keys remain" snake_case_ : List[str] = DatasetDict() snake_case_ : Optional[int] = load_dataset( data_args.dataset_name ,data_args.dataset_config_name ,split=F'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' ,cache_dir=model_args.cache_dir ,) snake_case_ : List[Any] = load_dataset( data_args.dataset_name ,data_args.dataset_config_name ,split=F'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' ,cache_dir=model_args.cache_dir ,) else: # make sure only "validation" and "train" keys remain" snake_case_ : Tuple = DatasetDict() snake_case_ : List[Any] = load_dataset( data_args.dataset_name ,data_args.dataset_config_name ,split="validation" ,cache_dir=model_args.cache_dir ,) snake_case_ : Any = load_dataset( data_args.dataset_name ,data_args.dataset_config_name ,split=F'''{data_args.train_split_name}''' ,cache_dir=model_args.cache_dir ,) # only normalized-inputs-training is supported snake_case_ : Tuple = WavaVecaFeatureExtractor.from_pretrained( model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,do_normalize=__magic_name__ ) def prepare_dataset(__magic_name__ ): # check that all files have the correct sampling rate snake_case_, snake_case_ : str = librosa.load(batch[data_args.speech_file_column] ,sr=feature_extractor.sampling_rate ) return batch # load audio files into numpy arrays snake_case_ : Tuple = datasets.map( __magic_name__ ,num_proc=data_args.preprocessing_num_workers ,remove_columns=datasets["train"].column_names ) # filter audio files that are too long snake_case_ : Union[str, Any] = vectorized_datasets.filter( lambda __magic_name__ : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) ) def normalize(__magic_name__ ): return feature_extractor(batch["speech"] ,sampling_rate=feature_extractor.sampling_rate ) # normalize and transform to `BatchFeatures` snake_case_ : Optional[Any] = vectorized_datasets.map( __magic_name__ ,batched=__magic_name__ ,num_proc=data_args.preprocessing_num_workers ,load_from_cache_file=not data_args.overwrite_cache ,remove_columns=vectorized_datasets["train"].column_names ,) # pretraining is only supported for "newer" stable layer norm architecture # apply_spec_augment has to be True, mask_feature_prob has to be 0.0 snake_case_ : Dict = WavaVecaConfig.from_pretrained( model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,gradient_checkpointing=training_args.gradient_checkpointing ,) if not config.do_stable_layer_norm or config.feat_extract_norm != "layer": raise ValueError( "PreTraining is only supported for ``config.do_stable_layer_norm=True`` and" " ``config.feat_extract_norm='layer'" ) snake_case_ : List[str] = WavaVecaForPreTraining(__magic_name__ ) snake_case_ : Optional[int] = DataCollatorForWavaVecaPretraining(model=__magic_name__ ,feature_extractor=__magic_name__ ) snake_case_ : Dict = WavaVecaPreTrainer( model=__magic_name__ ,data_collator=__magic_name__ ,args=__magic_name__ ,train_dataset=vectorized_datasets["train"] ,eval_dataset=vectorized_datasets["validation"] ,tokenizer=__magic_name__ ,max_gumbel_temp=model_args.max_gumbel_temperature ,min_gumbel_temp=model_args.min_gumbel_temperature ,gumbel_temp_decay=model_args.gumbel_temperature_decay ,) trainer.train() if __name__ == "__main__": main()
653
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class A_ (a_ ): """simple docstring""" a__ = '''facebook/bart-large-mnli''' a__ = ( '''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which ''' '''should be the text to classify, and `labels`, which should be the list of labels to use for classification. ''' '''It returns the most likely label in the list of provided `labels` for the input text.''' ) a__ = '''text_classifier''' a__ = AutoTokenizer a__ = AutoModelForSequenceClassification a__ = ['''text''', ['''text''']] a__ = ['''text'''] def _A ( self :List[str] ) -> Union[str, Any]: '''simple docstring''' super().setup() snake_case_ : Optional[int] = self.model.config snake_case_ : Any = -1 for idx, label in config.idalabel.items(): if label.lower().startswith("entail" ): snake_case_ : Union[str, Any] = int(lowerCAmelCase__ ) if self.entailment_id == -1: raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." ) def _A ( self :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :Tuple ) -> int: '''simple docstring''' snake_case_ : Tuple = labels return self.pre_processor( [text] * len(lowerCAmelCase__ ) , [F'''This example is {label}''' for label in labels] , return_tensors="pt" , padding="max_length" , ) def _A ( self :Any , lowerCAmelCase__ :str ) -> List[str]: '''simple docstring''' snake_case_ : Optional[int] = outputs.logits snake_case_ : Tuple = torch.argmax(logits[:, 2] ).item() return self._labels[label_id]
653
1
'''simple docstring''' import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer __lowerCamelCase : str = logging.get_logger(__name__) __lowerCamelCase : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} __lowerCamelCase : Optional[Any] = { '''vocab_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''', }, '''merges_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''', }, } __lowerCamelCase : Dict = { '''allenai/led-base-16384''': 16384, } class A_ (a_ ): """simple docstring""" a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = LEDTokenizer a__ = ['''input_ids''', '''attention_mask'''] def __init__( self :str , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :Union[str, Any]=None , lowerCAmelCase__ :List[str]=None , lowerCAmelCase__ :Dict="replace" , lowerCAmelCase__ :Dict="<s>" , lowerCAmelCase__ :List[str]="</s>" , lowerCAmelCase__ :List[Any]="</s>" , lowerCAmelCase__ :str="<s>" , lowerCAmelCase__ :str="<unk>" , lowerCAmelCase__ :Optional[int]="<pad>" , lowerCAmelCase__ :Dict="<mask>" , lowerCAmelCase__ :int=False , lowerCAmelCase__ :List[str]=True , **lowerCAmelCase__ :List[str] , ) -> List[str]: '''simple docstring''' super().__init__( lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ , **lowerCAmelCase__ , ) snake_case_ : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , lowerCAmelCase__ ) != add_prefix_space: snake_case_ : Optional[int] = getattr(lowerCAmelCase__ , pre_tok_state.pop("type" ) ) snake_case_ : Tuple = add_prefix_space snake_case_ : int = pre_tok_class(**lowerCAmelCase__ ) snake_case_ : Tuple = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` snake_case_ : Optional[Any] = "post_processor" snake_case_ : Tuple = getattr(self.backend_tokenizer , lowerCAmelCase__ , lowerCAmelCase__ ) if tokenizer_component_instance: snake_case_ : str = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: snake_case_ : str = tuple(state["sep"] ) if "cls" in state: snake_case_ : Optional[int] = tuple(state["cls"] ) snake_case_ : Optional[int] = False if state.get("add_prefix_space" , lowerCAmelCase__ ) != add_prefix_space: snake_case_ : List[str] = add_prefix_space snake_case_ : Optional[Any] = True if state.get("trim_offsets" , lowerCAmelCase__ ) != trim_offsets: snake_case_ : Any = trim_offsets snake_case_ : str = True if changes_to_apply: snake_case_ : Optional[Any] = getattr(lowerCAmelCase__ , state.pop("type" ) ) snake_case_ : int = component_class(**lowerCAmelCase__ ) setattr(self.backend_tokenizer , lowerCAmelCase__ , lowerCAmelCase__ ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def _A ( self :List[Any] ) -> str: '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def _A ( self :Any , lowerCAmelCase__ :Optional[Any] ) -> Tuple: '''simple docstring''' snake_case_ : List[str] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else value snake_case_ : Optional[int] = value def _A ( self :List[str] , *lowerCAmelCase__ :Optional[int] , **lowerCAmelCase__ :str ) -> BatchEncoding: '''simple docstring''' snake_case_ : str = kwargs.get("is_split_into_words" , lowerCAmelCase__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ ) def _A ( self :str , *lowerCAmelCase__ :Any , **lowerCAmelCase__ :Tuple ) -> BatchEncoding: '''simple docstring''' snake_case_ : Union[str, Any] = kwargs.get("is_split_into_words" , lowerCAmelCase__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ ) def _A ( self :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]: '''simple docstring''' snake_case_ : int = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ ) return tuple(lowerCAmelCase__ ) def _A ( self :Optional[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[Any]=None ) -> Optional[Any]: '''simple docstring''' snake_case_ : List[str] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]: '''simple docstring''' snake_case_ : List[Any] = [self.sep_token_id] snake_case_ : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _A ( self :Dict , lowerCAmelCase__ :Union[Dict[str, EncodedInput], BatchEncoding] , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :Optional[bool] = None , ) -> dict: '''simple docstring''' snake_case_ : Tuple = super()._pad( encoded_inputs=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding_strategy=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , ) # Load from model defaults if return_attention_mask is None: snake_case_ : Union[str, Any] = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: snake_case_ : Union[str, Any] = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. snake_case_ : Optional[int] = len(encoded_inputs["global_attention_mask"] ) != len(lowerCAmelCase__ ) if needs_to_be_padded: snake_case_ : Union[str, Any] = len(lowerCAmelCase__ ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` snake_case_ : int = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": snake_case_ : Tuple = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
653
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) __lowerCamelCase : Any = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Any = ['''ViTFeatureExtractor'''] __lowerCamelCase : Any = ['''ViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Optional[Any] = [ '''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ViTForImageClassification''', '''ViTForMaskedImageModeling''', '''ViTModel''', '''ViTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Union[str, Any] = [ '''TFViTForImageClassification''', '''TFViTModel''', '''TFViTPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Tuple = [ '''FlaxViTForImageClassification''', '''FlaxViTModel''', '''FlaxViTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys __lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
653
1
'''simple docstring''' from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf __lowerCamelCase : int = logging.get_logger(__name__) @dataclass class A_ (a_ ): """simple docstring""" a__ = [ '''no_inference''', '''no_cuda''', '''no_tpu''', '''no_speed''', '''no_memory''', '''no_env_print''', '''no_multi_process''', ] def __init__( self :Tuple , **lowerCAmelCase__ :Optional[int] ) -> List[str]: '''simple docstring''' for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: snake_case_ : int = deprecated_arg[3:] snake_case_ : int = not kwargs.pop(lowerCAmelCase__ ) logger.warning( F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or''' F''' {positive_arg}={kwargs[positive_arg]}''' ) snake_case_ : Tuple = kwargs.pop("tpu_name" , self.tpu_name ) snake_case_ : Optional[Any] = kwargs.pop("device_idx" , self.device_idx ) snake_case_ : Any = kwargs.pop("eager_mode" , self.eager_mode ) snake_case_ : str = kwargs.pop("use_xla" , self.use_xla ) super().__init__(**lowerCAmelCase__ ) a__ = field( default=a_ , metadata={'''help''': '''Name of TPU'''} , ) a__ = field( default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , ) a__ = field(default=a_ , metadata={'''help''': '''Benchmark models in eager model.'''} ) a__ = field( default=a_ , metadata={ '''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.''' } , ) @cached_property def _A ( self :List[Any] ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]: '''simple docstring''' requires_backends(self , ["tf"] ) snake_case_ : Tuple = None if self.tpu: try: if self.tpu_name: snake_case_ : List[Any] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: snake_case_ : Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: snake_case_ : Optional[Any] = None return tpu @cached_property def _A ( self :List[Any] ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]: '''simple docstring''' requires_backends(self , ["tf"] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) snake_case_ : Tuple = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" ) snake_case_ : Optional[int] = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' ) else: tf.config.set_visible_devices([] , "GPU" ) # disable GPU snake_case_ : Optional[Any] = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' ) return strategy @property def _A ( self :Optional[Any] ) -> bool: '''simple docstring''' requires_backends(self , ["tf"] ) return self._setup_tpu is not None @property def _A ( self :List[Any] ) -> "tf.distribute.Strategy": '''simple docstring''' requires_backends(self , ["tf"] ) return self._setup_strategy @property def _A ( self :Any ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ["tf"] ) return tf.config.list_physical_devices("GPU" ) @property def _A ( self :List[str] ) -> int: '''simple docstring''' requires_backends(self , ["tf"] ) if self.cuda: return len(self.gpu_list ) return 0 @property def _A ( self :int ) -> bool: '''simple docstring''' return self.n_gpu > 0
653
'''simple docstring''' import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class A_ : """simple docstring""" def __init__( self :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str]=2 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :List[str]=7 , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :List[str]=99 , lowerCAmelCase__ :Union[str, Any]=36 , lowerCAmelCase__ :Dict=3 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Optional[int]=37 , lowerCAmelCase__ :Dict="gelu" , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Optional[int]=512 , lowerCAmelCase__ :Union[str, Any]=16 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :Any=0.0_2 , lowerCAmelCase__ :Dict=6 , lowerCAmelCase__ :Optional[int]=6 , lowerCAmelCase__ :Any=3 , lowerCAmelCase__ :int=4 , lowerCAmelCase__ :int=None , lowerCAmelCase__ :Any=1_000 , ) -> Any: '''simple docstring''' snake_case_ : Optional[int] = parent snake_case_ : Union[str, Any] = batch_size snake_case_ : Optional[int] = num_channels snake_case_ : List[Any] = image_size snake_case_ : Optional[int] = patch_size snake_case_ : Union[str, Any] = text_seq_length snake_case_ : Dict = is_training snake_case_ : Optional[Any] = use_input_mask snake_case_ : Union[str, Any] = use_token_type_ids snake_case_ : Dict = use_labels snake_case_ : List[str] = vocab_size snake_case_ : Optional[Any] = hidden_size snake_case_ : List[str] = num_hidden_layers snake_case_ : int = num_attention_heads snake_case_ : List[str] = intermediate_size snake_case_ : str = hidden_act snake_case_ : Optional[Any] = hidden_dropout_prob snake_case_ : Optional[int] = attention_probs_dropout_prob snake_case_ : Union[str, Any] = max_position_embeddings snake_case_ : List[Any] = type_vocab_size snake_case_ : Union[str, Any] = type_sequence_label_size snake_case_ : List[Any] = initializer_range snake_case_ : Union[str, Any] = coordinate_size snake_case_ : int = shape_size snake_case_ : Tuple = num_labels snake_case_ : List[Any] = num_choices snake_case_ : List[str] = scope snake_case_ : Dict = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) snake_case_ : str = text_seq_length snake_case_ : Optional[int] = (image_size // patch_size) ** 2 + 1 snake_case_ : str = self.text_seq_length + self.image_seq_length def _A ( self :Union[str, Any] ) -> Tuple: '''simple docstring''' snake_case_ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) snake_case_ : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: snake_case_ : Optional[Any] = bbox[i, j, 3] snake_case_ : Any = bbox[i, j, 1] snake_case_ : Tuple = t if bbox[i, j, 2] < bbox[i, j, 0]: snake_case_ : str = bbox[i, j, 2] snake_case_ : Dict = bbox[i, j, 0] snake_case_ : Union[str, Any] = t snake_case_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ : Dict = None if self.use_input_mask: snake_case_ : str = random_attention_mask([self.batch_size, self.text_seq_length] ) snake_case_ : Any = None if self.use_token_type_ids: snake_case_ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) snake_case_ : Union[str, Any] = None snake_case_ : str = None if self.use_labels: snake_case_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) snake_case_ : str = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def _A ( self :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] ) -> Optional[Any]: '''simple docstring''' snake_case_ : Tuple = LayoutLMvaModel(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() # text + image snake_case_ : Tuple = model(lowerCAmelCase__ , pixel_values=lowerCAmelCase__ ) snake_case_ : Optional[int] = model( lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ ) snake_case_ : Optional[int] = model(lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ ) snake_case_ : int = model(lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only snake_case_ : List[Any] = model(lowerCAmelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only snake_case_ : Union[str, Any] = model(pixel_values=lowerCAmelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def _A ( self :str , lowerCAmelCase__ :str , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple ) -> List[Any]: '''simple docstring''' snake_case_ : str = self.num_labels snake_case_ : List[Any] = LayoutLMvaForSequenceClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case_ : Optional[int] = model( lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] ) -> str: '''simple docstring''' snake_case_ : Optional[int] = self.num_labels snake_case_ : str = LayoutLMvaForTokenClassification(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case_ : List[Any] = model( lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def _A ( self :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :str ) -> Tuple: '''simple docstring''' snake_case_ : List[str] = LayoutLMvaForQuestionAnswering(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case_ : List[Any] = model( lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _A ( self :int ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Dict = self.prepare_config_and_inputs() ( ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ) : Optional[Any] = config_and_inputs snake_case_ : Tuple = { "input_ids": input_ids, "bbox": bbox, "pixel_values": pixel_values, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class A_ (a_ , a_ , unittest.TestCase ): """simple docstring""" a__ = False a__ = False a__ = False a__ = ( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) a__ = ( {'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel} if is_torch_available() else {} ) def _A ( self :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] ) -> List[str]: '''simple docstring''' return True def _A ( self :List[Any] ) -> str: '''simple docstring''' snake_case_ : Tuple = LayoutLMvaModelTester(self ) snake_case_ : Optional[int] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 ) def _A ( self :Tuple , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any]=False ) -> Any: '''simple docstring''' snake_case_ : List[str] = copy.deepcopy(lowerCAmelCase__ ) if model_class in get_values(lowerCAmelCase__ ): snake_case_ : Optional[Any] = { k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous() if isinstance(lowerCAmelCase__ , torch.Tensor ) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(lowerCAmelCase__ ): snake_case_ : Union[str, Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ ) elif model_class in get_values(lowerCAmelCase__ ): snake_case_ : List[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ ) snake_case_ : str = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ ) elif model_class in [ *get_values(lowerCAmelCase__ ), ]: snake_case_ : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ ) elif model_class in [ *get_values(lowerCAmelCase__ ), ]: snake_case_ : List[str] = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCAmelCase__ , ) return inputs_dict def _A ( self :Any ) -> Any: '''simple docstring''' self.config_tester.run_common_tests() def _A ( self :int ) -> int: '''simple docstring''' snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def _A ( self :Any ) -> Dict: '''simple docstring''' snake_case_ : Dict = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: snake_case_ : int = type self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def _A ( self :int ) -> str: '''simple docstring''' snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ ) def _A ( self :List[Any] ) -> Optional[Any]: '''simple docstring''' snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ ) def _A ( self :int ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ ) @slow def _A ( self :Tuple ) -> List[Any]: '''simple docstring''' for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ : str = LayoutLMvaModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) def __UpperCAmelCase ( )-> List[str]: """simple docstring""" snake_case_ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch class A_ (unittest.TestCase ): """simple docstring""" @cached_property def _A ( self :Union[str, Any] ) -> Optional[Any]: '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase__ ) if is_vision_available() else None @slow def _A ( self :Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Optional[int] = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(lowerCAmelCase__ ) snake_case_ : Optional[Any] = self.default_image_processor snake_case_ : Optional[int] = prepare_img() snake_case_ : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).pixel_values.to(lowerCAmelCase__ ) snake_case_ : List[str] = torch.tensor([[1, 2]] ) snake_case_ : Any = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 ) # forward pass snake_case_ : Any = model( input_ids=input_ids.to(lowerCAmelCase__ ) , bbox=bbox.to(lowerCAmelCase__ ) , pixel_values=pixel_values.to(lowerCAmelCase__ ) , ) # verify the logits snake_case_ : Optional[Any] = torch.Size((1, 199, 768) ) self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase__ ) snake_case_ : str = torch.tensor( [[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(lowerCAmelCase__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
653
1
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCamelCase : List[str] = { '''configuration_informer''': [ '''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''InformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[Any] = [ '''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''InformerForPrediction''', '''InformerModel''', '''InformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys __lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
653
'''simple docstring''' import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def __UpperCAmelCase ( __magic_name__ )-> int: # picklable for multiprocessing """simple docstring""" return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def __UpperCAmelCase ( )-> List[str]: """simple docstring""" with parallel_backend("spark" ): assert ParallelBackendConfig.backend_name == "spark" snake_case_ : str = [1, 2, 3] with pytest.raises(__magic_name__ ): with parallel_backend("unsupported backend" ): map_nested(__magic_name__ ,__magic_name__ ,num_proc=2 ) with pytest.raises(__magic_name__ ): with parallel_backend("unsupported backend" ): map_nested(__magic_name__ ,__magic_name__ ,num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize("num_proc" ,[2, -1] ) def __UpperCAmelCase ( __magic_name__ )-> List[Any]: """simple docstring""" snake_case_ : Optional[Any] = [1, 2] snake_case_ : Union[str, Any] = {"a": 1, "b": 2} snake_case_ : str = {"a": [1, 2], "b": [3, 4]} snake_case_ : List[str] = {"a": {"1": 1}, "b": 2} snake_case_ : Optional[int] = {"a": 1, "b": 2, "c": 3, "d": 4} snake_case_ : Tuple = [2, 3] snake_case_ : str = {"a": 2, "b": 3} snake_case_ : Dict = {"a": [2, 3], "b": [4, 5]} snake_case_ : List[Any] = {"a": {"1": 2}, "b": 3} snake_case_ : str = {"a": 2, "b": 3, "c": 4, "d": 5} with parallel_backend("spark" ): assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
653
1
'''simple docstring''' def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> list: """simple docstring""" snake_case_ : Optional[Any] = len(__magic_name__ ) snake_case_ : Tuple = [[0] * n for i in range(__magic_name__ )] for i in range(__magic_name__ ): snake_case_ : Tuple = y_points[i] for i in range(2 ,__magic_name__ ): for j in range(__magic_name__ ,__magic_name__ ): snake_case_ : str = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
653
'''simple docstring''' from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase : Dict = logging.get_logger(__name__) # TODO Update this __lowerCamelCase : int = { '''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''', # See all ESM models at https://huggingface.co/models?filter=esm } class A_ (a_ ): """simple docstring""" a__ = '''esm''' def __init__( self :Dict , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :str=None , lowerCAmelCase__ :int=768 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :Dict=12 , lowerCAmelCase__ :Union[str, Any]=3_072 , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :List[Any]=1_026 , lowerCAmelCase__ :int=0.0_2 , lowerCAmelCase__ :Optional[int]=1E-1_2 , lowerCAmelCase__ :List[str]="absolute" , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :List[str]=False , lowerCAmelCase__ :List[Any]=False , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :str=None , **lowerCAmelCase__ :Union[str, Any] , ) -> Any: '''simple docstring''' super().__init__(pad_token_id=lowerCAmelCase__ , mask_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) snake_case_ : str = vocab_size snake_case_ : str = hidden_size snake_case_ : List[str] = num_hidden_layers snake_case_ : List[str] = num_attention_heads snake_case_ : Any = intermediate_size snake_case_ : Optional[Any] = hidden_dropout_prob snake_case_ : Tuple = attention_probs_dropout_prob snake_case_ : List[Any] = max_position_embeddings snake_case_ : str = initializer_range snake_case_ : List[Any] = layer_norm_eps snake_case_ : str = position_embedding_type snake_case_ : Optional[int] = use_cache snake_case_ : str = emb_layer_norm_before snake_case_ : List[Any] = token_dropout snake_case_ : str = is_folding_model if is_folding_model: if esmfold_config is None: logger.info("No esmfold_config supplied for folding model, using default values." ) snake_case_ : Optional[Any] = EsmFoldConfig() elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): snake_case_ : Union[str, Any] = EsmFoldConfig(**lowerCAmelCase__ ) snake_case_ : Optional[Any] = esmfold_config if vocab_list is None: logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" ) snake_case_ : List[str] = get_default_vocab_list() else: snake_case_ : List[str] = vocab_list else: snake_case_ : List[Any] = None snake_case_ : int = None if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , lowerCAmelCase__ ): raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" ) def _A ( self :Optional[int] ) -> List[Any]: '''simple docstring''' snake_case_ : Any = super().to_dict() if isinstance(self.esmfold_config , lowerCAmelCase__ ): snake_case_ : Optional[int] = self.esmfold_config.to_dict() return output @dataclass class A_ : """simple docstring""" a__ = None a__ = True a__ = False a__ = False a__ = False a__ = 0 a__ = True a__ = False a__ = 128 a__ = None def _A ( self :Dict ) -> int: '''simple docstring''' if self.trunk is None: snake_case_ : Dict = TrunkConfig() elif isinstance(self.trunk , lowerCAmelCase__ ): snake_case_ : int = TrunkConfig(**self.trunk ) def _A ( self :Optional[int] ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Tuple = asdict(self ) snake_case_ : Optional[int] = self.trunk.to_dict() return output @dataclass class A_ : """simple docstring""" a__ = 48 a__ = 1024 a__ = 128 a__ = 32 a__ = 32 a__ = 32 a__ = 0 a__ = 0 a__ = False a__ = 4 a__ = 128 a__ = None def _A ( self :List[Any] ) -> Union[str, Any]: '''simple docstring''' if self.structure_module is None: snake_case_ : Optional[int] = StructureModuleConfig() elif isinstance(self.structure_module , lowerCAmelCase__ ): snake_case_ : List[str] = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( "`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got" F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( "`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got" F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' ) snake_case_ : Dict = self.sequence_state_dim // self.sequence_head_width snake_case_ : Optional[int] = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( "`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got" F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( "`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got" F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' ) if self.dropout >= 0.4: raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' ) def _A ( self :Tuple ) -> List[str]: '''simple docstring''' snake_case_ : int = asdict(self ) snake_case_ : Dict = self.structure_module.to_dict() return output @dataclass class A_ : """simple docstring""" a__ = 384 a__ = 128 a__ = 16 a__ = 128 a__ = 12 a__ = 4 a__ = 8 a__ = 0.1 a__ = 8 a__ = 1 a__ = 2 a__ = 7 a__ = 10 a__ = 1E-8 a__ = 1E5 def _A ( self :Dict ) -> Dict: '''simple docstring''' return asdict(self ) def __UpperCAmelCase ( )-> int: """simple docstring""" return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
653
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase : Optional[int] = { '''configuration_xlm_roberta''': [ '''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMRobertaConfig''', '''XLMRobertaOnnxConfig''', ], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Union[str, Any] = ['''XLMRobertaTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : str = ['''XLMRobertaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[Any] = [ '''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLMRobertaForCausalLM''', '''XLMRobertaForMaskedLM''', '''XLMRobertaForMultipleChoice''', '''XLMRobertaForQuestionAnswering''', '''XLMRobertaForSequenceClassification''', '''XLMRobertaForTokenClassification''', '''XLMRobertaModel''', '''XLMRobertaPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[str] = [ '''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXLMRobertaForCausalLM''', '''TFXLMRobertaForMaskedLM''', '''TFXLMRobertaForMultipleChoice''', '''TFXLMRobertaForQuestionAnswering''', '''TFXLMRobertaForSequenceClassification''', '''TFXLMRobertaForTokenClassification''', '''TFXLMRobertaModel''', '''TFXLMRobertaPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[Any] = [ '''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FlaxXLMRobertaForMaskedLM''', '''FlaxXLMRobertaForCausalLM''', '''FlaxXLMRobertaForMultipleChoice''', '''FlaxXLMRobertaForQuestionAnswering''', '''FlaxXLMRobertaForSequenceClassification''', '''FlaxXLMRobertaForTokenClassification''', '''FlaxXLMRobertaModel''', '''FlaxXLMRobertaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig, XLMRobertaOnnxConfig, ) try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta import XLMRobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, XLMRobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, TFXLMRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xlm_roberta import ( FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, FlaxXLMRobertaForQuestionAnswering, FlaxXLMRobertaForSequenceClassification, FlaxXLMRobertaForTokenClassification, FlaxXLMRobertaModel, FlaxXLMRobertaPreTrainedModel, ) else: import sys __lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
653
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase : Any = { '''configuration_longformer''': [ '''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongformerConfig''', '''LongformerOnnxConfig''', ], '''tokenization_longformer''': ['''LongformerTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Any = ['''LongformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = [ '''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LongformerForMaskedLM''', '''LongformerForMultipleChoice''', '''LongformerForQuestionAnswering''', '''LongformerForSequenceClassification''', '''LongformerForTokenClassification''', '''LongformerModel''', '''LongformerPreTrainedModel''', '''LongformerSelfAttention''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Any = [ '''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLongformerForMaskedLM''', '''TFLongformerForMultipleChoice''', '''TFLongformerForQuestionAnswering''', '''TFLongformerForSequenceClassification''', '''TFLongformerForTokenClassification''', '''TFLongformerModel''', '''TFLongformerPreTrainedModel''', '''TFLongformerSelfAttention''', ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys __lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
653
1
'''simple docstring''' import unittest from transformers import GPTNeoXJapaneseConfig, is_torch_available from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel class A_ : """simple docstring""" def __init__( self :Any , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Any=13 , lowerCAmelCase__ :Union[str, Any]=7 , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :int=True , lowerCAmelCase__ :int=True , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Dict=99 , lowerCAmelCase__ :Union[str, Any]=32 , lowerCAmelCase__ :int=5 , lowerCAmelCase__ :Tuple=4 , lowerCAmelCase__ :List[str]=4 , lowerCAmelCase__ :Any="gelu" , lowerCAmelCase__ :int=0.0 , lowerCAmelCase__ :Union[str, Any]=0.1 , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :Optional[Any]=512 , lowerCAmelCase__ :Dict=16 , lowerCAmelCase__ :str=2 , lowerCAmelCase__ :Optional[Any]=0.0_2 , lowerCAmelCase__ :Tuple=3 , lowerCAmelCase__ :Optional[Any]=4 , lowerCAmelCase__ :Optional[int]=None , ) -> Tuple: '''simple docstring''' snake_case_ : Optional[int] = parent snake_case_ : List[Any] = batch_size snake_case_ : int = seq_length snake_case_ : int = is_training snake_case_ : Dict = use_input_mask snake_case_ : int = use_token_type_ids snake_case_ : Dict = use_labels snake_case_ : Any = vocab_size snake_case_ : str = hidden_size snake_case_ : Optional[int] = num_hidden_layers snake_case_ : List[str] = num_attention_heads snake_case_ : str = intermediate_multiple_size snake_case_ : List[Any] = hidden_act snake_case_ : Tuple = hidden_dropout snake_case_ : Optional[Any] = attention_dropout snake_case_ : List[Any] = weight_tying snake_case_ : Optional[Any] = max_position_embeddings snake_case_ : Dict = type_vocab_size snake_case_ : int = type_sequence_label_size snake_case_ : str = initializer_range snake_case_ : Optional[Any] = num_labels snake_case_ : Any = num_choices snake_case_ : Optional[int] = scope def _A ( self :List[Any] ) -> Optional[int]: '''simple docstring''' snake_case_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ : Optional[int] = None if self.use_input_mask: snake_case_ : Any = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ : Optional[Any] = None if self.use_labels: snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case_ : List[Any] = self.get_config() return config, input_ids, input_mask, token_labels def _A ( self :List[Any] ) -> Any: '''simple docstring''' return GPTNeoXJapaneseConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , ) def _A ( self :Optional[Any] ) -> Any: '''simple docstring''' snake_case_, snake_case_, snake_case_, snake_case_ : List[Any] = self.prepare_config_and_inputs() snake_case_ : int = True return config, input_ids, input_mask, token_labels def _A ( self :Optional[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int ) -> Dict: '''simple docstring''' snake_case_ : Tuple = GPTNeoXJapaneseModel(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case_ : int = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ) snake_case_ : List[str] = model(lowerCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any] ) -> Optional[int]: '''simple docstring''' snake_case_ : Optional[Any] = True snake_case_ : Optional[Any] = GPTNeoXJapaneseModel(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case_ : str = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _A ( self :Union[str, Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :int ) -> Optional[int]: '''simple docstring''' snake_case_ : Any = GPTNeoXJapaneseForCausalLM(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case_ : str = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A ( self :str , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :str ) -> Tuple: '''simple docstring''' snake_case_ : Optional[Any] = True snake_case_ : Union[str, Any] = GPTNeoXJapaneseForCausalLM(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() # first forward pass snake_case_ : str = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ ) snake_case_ : List[Any] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids snake_case_ : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size ) snake_case_ : int = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and snake_case_ : Dict = torch.cat([input_ids, next_tokens] , dim=-1 ) snake_case_ : Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 ) snake_case_ : str = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ ) snake_case_ : Union[str, Any] = output_from_no_past["hidden_states"][0] snake_case_ : List[str] = model( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , )["hidden_states"][0] # select random slice snake_case_ : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item() snake_case_ : int = output_from_no_past[:, -3:, random_slice_idx].detach() snake_case_ : List[str] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) ) def _A ( self :List[str] ) -> Optional[Any]: '''simple docstring''' snake_case_ : Tuple = self.prepare_config_and_inputs() snake_case_, snake_case_, snake_case_, snake_case_ : List[str] = config_and_inputs snake_case_ : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class A_ (a_ , a_ , unittest.TestCase ): """simple docstring""" a__ = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else () a__ = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else () a__ = ( {'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM} if is_torch_available() else {} ) a__ = False a__ = False a__ = False a__ = False def _A ( self :Tuple ) -> List[Any]: '''simple docstring''' snake_case_ : int = GPTNeoXJapaneseModelTester(self ) snake_case_ : Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 ) def _A ( self :str ) -> Tuple: '''simple docstring''' self.config_tester.run_common_tests() def _A ( self :Dict ) -> int: '''simple docstring''' snake_case_, snake_case_, snake_case_, snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def _A ( self :int ) -> List[str]: '''simple docstring''' snake_case_, snake_case_, snake_case_, snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def _A ( self :Optional[Any] ) -> Optional[Any]: '''simple docstring''' snake_case_, snake_case_, snake_case_, snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder() snake_case_ : Any = None self.model_tester.create_and_check_model_as_decoder(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def _A ( self :Dict ) -> List[Any]: '''simple docstring''' snake_case_, snake_case_, snake_case_, snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def _A ( self :List[str] ) -> int: '''simple docstring''' snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*lowerCAmelCase__ ) @slow def _A ( self :Dict ) -> int: '''simple docstring''' snake_case_ : Dict = "abeja/gpt-neox-japanese-2.7b" snake_case_ : List[Any] = ["データサイエンティストとは、", "100年後に必要とされる会社は、", "フルリモートの環境で働くために必要なことは、", "国境の長いトンネルを抜けると", "美味しい日本食といえば、"] snake_case_ : Optional[Any] = [ "データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。", "100年後に必要とされる会社は、「人」が中心の会社です。", "フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。", "国境の長いトンネルを抜けると、そこは雪国だった。", "美味しい日本食といえば、やっぱりお寿司ですよね。", ] snake_case_ : Dict = GPTNeoXJapaneseTokenizer.from_pretrained(lowerCAmelCase__ ) snake_case_ : str = GPTNeoXJapaneseForCausalLM.from_pretrained(lowerCAmelCase__ ) snake_case_ : Any = [] for prompt in prompts: snake_case_ : str = tokenizer(lowerCAmelCase__ , return_tensors="pt" ).input_ids snake_case_ : List[str] = model.generate(lowerCAmelCase__ , max_length=50 ) snake_case_ : Tuple = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) predicted_outputs += generated_string self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
653
'''simple docstring''' import inspect import tempfile import unittest from huggingface_hub import hf_hub_download from transformers import is_torch_available from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin __lowerCamelCase : Optional[int] = 1E-4 if is_torch_available(): import torch from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder @require_torch class A_ : """simple docstring""" def __init__( self :Tuple , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[Any]=16 , lowerCAmelCase__ :Any=13 , lowerCAmelCase__ :Optional[Any]=7 , lowerCAmelCase__ :str=14 , lowerCAmelCase__ :Union[str, Any]=10 , lowerCAmelCase__ :Tuple=19 , lowerCAmelCase__ :Optional[Any]=5 , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Any=16 , lowerCAmelCase__ :str=2 , lowerCAmelCase__ :List[Any]=4 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Optional[int]=[1, 2, 3, 4, 5] , lowerCAmelCase__ :str=25 , lowerCAmelCase__ :Optional[Any]=5 , ) -> Dict: '''simple docstring''' snake_case_ : List[str] = d_model snake_case_ : Dict = parent snake_case_ : Optional[Any] = batch_size snake_case_ : Optional[Any] = prediction_length snake_case_ : str = context_length snake_case_ : Tuple = cardinality snake_case_ : List[str] = num_time_features snake_case_ : Optional[Any] = lags_sequence snake_case_ : Union[str, Any] = embedding_dimension snake_case_ : Optional[Any] = is_training snake_case_ : Optional[Any] = hidden_size snake_case_ : Any = num_hidden_layers snake_case_ : Optional[Any] = num_attention_heads snake_case_ : int = intermediate_size snake_case_ : Any = hidden_act snake_case_ : Union[str, Any] = hidden_dropout_prob snake_case_ : Dict = attention_probs_dropout_prob snake_case_ : List[str] = context_length snake_case_ : Any = prediction_length + label_length snake_case_ : Union[str, Any] = label_length snake_case_ : List[Any] = moving_average snake_case_ : str = autocorrelation_factor def _A ( self :List[Any] ) -> Any: '''simple docstring''' return AutoformerConfig( d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , ) def _A ( self :Union[str, Any] , lowerCAmelCase__ :Optional[Any] ) -> Dict: '''simple docstring''' snake_case_ : Any = config.context_length + max(config.lags_sequence ) snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, 1] , config.cardinality[0] ) snake_case_ : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] ) snake_case_ : List[Any] = floats_tensor([self.batch_size, _past_length] ) snake_case_ : Dict = floats_tensor([self.batch_size, _past_length] ) > 0.5 # decoder inputs snake_case_ : List[Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] ) snake_case_ : List[Any] = floats_tensor([self.batch_size, config.prediction_length] ) snake_case_ : int = { "past_values": past_values, "static_categorical_features": static_categorical_features, "past_time_features": past_time_features, "past_observed_mask": past_observed_mask, "future_time_features": future_time_features, "future_values": future_values, } return inputs_dict def _A ( self :Dict ) -> Tuple: '''simple docstring''' snake_case_ : str = self.get_config() snake_case_ : int = self.prepare_autoformer_inputs_dict(lowerCAmelCase__ ) return config, inputs_dict def _A ( self :Optional[int] ) -> Dict: '''simple docstring''' snake_case_, snake_case_ : Union[str, Any] = self.prepare_config_and_inputs() return config, inputs_dict def _A ( self :Tuple , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int] ) -> List[str]: '''simple docstring''' snake_case_ : Dict = AutoformerModel(config=lowerCAmelCase__ ).to(lowerCAmelCase__ ).eval() snake_case_ : Optional[int] = model(**lowerCAmelCase__ ) snake_case_ : Any = outputs.encoder_last_hidden_state snake_case_ : Dict = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : Optional[Any] = model.get_encoder() encoder.save_pretrained(lowerCAmelCase__ ) snake_case_ : Tuple = AutoformerEncoder.from_pretrained(lowerCAmelCase__ ).to(lowerCAmelCase__ ) snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : List[str] = model.create_network_inputs(**lowerCAmelCase__ ) snake_case_, snake_case_ : Optional[int] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] ) snake_case_ : List[Any] = torch.cat( (transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , ) snake_case_ : Optional[int] = encoder(inputs_embeds=lowerCAmelCase__ )[0] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 ) snake_case_ : Any = ( torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 ) .unsqueeze(1 ) .repeat(1 , config.prediction_length , 1 ) ) snake_case_ : List[str] = torch.zeros( [transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , ) snake_case_ : Optional[Any] = torch.cat( ( torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) snake_case_ : Any = torch.cat( ( torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : List[Any] = model.get_decoder() decoder.save_pretrained(lowerCAmelCase__ ) snake_case_ : int = AutoformerDecoder.from_pretrained(lowerCAmelCase__ ).to(lowerCAmelCase__ ) snake_case_ : Tuple = decoder( trend=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 ) @require_torch class A_ (a_ , a_ , unittest.TestCase ): """simple docstring""" a__ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else () a__ = (AutoformerForPrediction,) if is_torch_available() else () a__ = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {} a__ = False a__ = False a__ = False a__ = False a__ = False a__ = False def _A ( self :Dict ) -> int: '''simple docstring''' snake_case_ : Tuple = AutoformerModelTester(self ) snake_case_ : str = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ ) def _A ( self :List[str] ) -> Tuple: '''simple docstring''' self.config_tester.run_common_tests() def _A ( self :List[Any] ) -> Union[str, Any]: '''simple docstring''' snake_case_, snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: snake_case_ : List[Any] = model_class(lowerCAmelCase__ ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCAmelCase__ ) snake_case_, snake_case_ : str = model_class.from_pretrained(lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ ) self.assertEqual(info["missing_keys"] , [] ) def _A ( self :Optional[int] ) -> Tuple: '''simple docstring''' snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*lowerCAmelCase__ ) @unittest.skip(reason="Model has no tokens embeddings" ) def _A ( self :str ) -> str: '''simple docstring''' pass def _A ( self :Optional[Any] ) -> Union[str, Any]: '''simple docstring''' snake_case_ : List[Any] = inspect.signature(getattr(lowerCAmelCase__ , "forward" ) ) # The main input is the name of the argument after `self` snake_case_ : Dict = list(model_signature.parameters.keys() )[1] self.assertEqual(AutoformerModel.main_input_name , lowerCAmelCase__ ) def _A ( self :Optional[Any] ) -> Optional[int]: '''simple docstring''' snake_case_, snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : Tuple = model_class(lowerCAmelCase__ ) snake_case_ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ : Optional[Any] = [*signature.parameters.keys()] snake_case_ : Dict = [ "past_values", "past_time_features", "past_observed_mask", "static_categorical_features", "static_real_features", "future_values", "future_time_features", ] if model.__class__.__name__ in ["AutoformerForPrediction"]: expected_arg_names.append("future_observed_mask" ) expected_arg_names.extend( [ "decoder_attention_mask", "head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs", "past_key_values", "output_hidden_states", "output_attentions", "use_cache", "return_dict", ] ) self.assertListEqual(arg_names[: len(lowerCAmelCase__ )] , lowerCAmelCase__ ) def _A ( self :int ) -> Any: '''simple docstring''' snake_case_, snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Union[str, Any] = True snake_case_ : List[str] = getattr(self.model_tester , "seq_length" , lowerCAmelCase__ ) snake_case_ : Dict = getattr(self.model_tester , "decoder_seq_length" , lowerCAmelCase__ ) snake_case_ : Union[str, Any] = getattr(self.model_tester , "encoder_seq_length" , lowerCAmelCase__ ) snake_case_ : Union[str, Any] = getattr(self.model_tester , "d_model" , lowerCAmelCase__ ) snake_case_ : Dict = getattr(self.model_tester , "num_attention_heads" , lowerCAmelCase__ ) snake_case_ : Optional[int] = d_model // num_attention_heads for model_class in self.all_model_classes: snake_case_ : Any = True snake_case_ : Any = False snake_case_ : Dict = True snake_case_ : List[str] = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): snake_case_ : Tuple = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) snake_case_ : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] snake_case_ : Optional[int] = True snake_case_ : Any = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): snake_case_ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) snake_case_ : str = outputs.encoder_attentions self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) snake_case_ : Tuple = len(lowerCAmelCase__ ) snake_case_ : List[str] = 7 if "last_hidden_state" in outputs: correct_outlen += 1 if "trend" in outputs: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned if "loss" in outputs: correct_outlen += 1 if "params" in outputs: correct_outlen += 1 self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) # decoder attentions snake_case_ : Optional[int] = outputs.decoder_attentions self.assertIsInstance(lowerCAmelCase__ , (list, tuple) ) self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # cross attentions snake_case_ : List[Any] = outputs.cross_attentions self.assertIsInstance(lowerCAmelCase__ , (list, tuple) ) self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # Check attention is always last and order is fine snake_case_ : Optional[int] = True snake_case_ : List[Any] = True snake_case_ : Union[str, Any] = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): snake_case_ : List[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) self.assertEqual(out_len + 2 , len(lowerCAmelCase__ ) ) snake_case_ : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) @is_flaky() def _A ( self :Any ) -> Optional[Any]: '''simple docstring''' super().test_retain_grad_hidden_states_attentions() def __UpperCAmelCase ( __magic_name__="train-batch.pt" )-> int: """simple docstring""" snake_case_ : List[str] = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" ,filename=__magic_name__ ,repo_type="dataset" ) snake_case_ : List[str] = torch.load(__magic_name__ ,map_location=__magic_name__ ) return batch @require_torch @slow class A_ (unittest.TestCase ): """simple docstring""" def _A ( self :str ) -> Any: '''simple docstring''' snake_case_ : Optional[int] = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ ) snake_case_ : List[str] = prepare_batch() with torch.no_grad(): snake_case_ : int = model( past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0] snake_case_ : Optional[int] = torch.Size( (64, model.config.prediction_length + model.config.label_length, model.config.feature_size) ) self.assertEqual(output.shape , lowerCAmelCase__ ) snake_case_ : Optional[Any] = torch.tensor( [[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=lowerCAmelCase__ ) self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) ) def _A ( self :Any ) -> str: '''simple docstring''' snake_case_ : str = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ ) snake_case_ : Optional[Any] = prepare_batch("val-batch.pt" ) with torch.no_grad(): snake_case_ : Tuple = model( past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state snake_case_ : Dict = torch.Size((64, model.config.context_length, model.config.d_model) ) self.assertEqual(output.shape , lowerCAmelCase__ ) snake_case_ : Any = torch.tensor( [[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=lowerCAmelCase__ ) self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) ) def _A ( self :List[str] ) -> Any: '''simple docstring''' snake_case_ : List[Any] = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ ) snake_case_ : str = prepare_batch("val-batch.pt" ) with torch.no_grad(): snake_case_ : Optional[Any] = model.generate( static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , ) snake_case_ : List[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) ) self.assertEqual(outputs.sequences.shape , lowerCAmelCase__ ) snake_case_ : Dict = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=lowerCAmelCase__ ) snake_case_ : Optional[Any] = outputs.sequences.mean(dim=1 ) self.assertTrue(torch.allclose(mean_prediction[0, -3:] , lowerCAmelCase__ , rtol=1E-1 ) )
653
1
'''simple docstring''' from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def __UpperCAmelCase ( )-> Any: """simple docstring""" snake_case_ : Optional[Any] = HfArgumentParser(__magic_name__ ) snake_case_ : Optional[Any] = parser.parse_args_into_dataclasses()[0] snake_case_ : List[Any] = TensorFlowBenchmark(args=__magic_name__ ) try: snake_case_ : Any = parser.parse_args_into_dataclasses()[0] except ValueError as e: snake_case_ : Optional[int] = "Arg --no_{0} is no longer used, please use --no-{0} instead." snake_case_ : str = " ".join(str(__magic_name__ ).split(" " )[:-1] ) snake_case_ : List[str] = "" snake_case_ : Union[str, Any] = eval(str(__magic_name__ ).split(" " )[-1] ) snake_case_ : List[str] = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(__magic_name__ ) if len(__magic_name__ ) > 0: snake_case_ : str = full_error_msg + begin_error_msg + str(__magic_name__ ) raise ValueError(__magic_name__ ) benchmark.run() if __name__ == "__main__": main()
653
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A_ (a_ , unittest.TestCase ): """simple docstring""" a__ = RobertaTokenizer a__ = RobertaTokenizerFast a__ = True a__ = {'''cls_token''': '''<s>'''} def _A ( self :Optional[int] ) -> List[Any]: '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt snake_case_ : List[Any] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] snake_case_ : Tuple = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) ) snake_case_ : List[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] snake_case_ : int = {"unk_token": "<unk>"} snake_case_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowerCAmelCase__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowerCAmelCase__ ) ) def _A ( self :Optional[Any] , **lowerCAmelCase__ :str ) -> str: '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def _A ( self :Any , **lowerCAmelCase__ :Tuple ) -> Optional[int]: '''simple docstring''' kwargs.update(self.special_tokens_map ) return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def _A ( self :Optional[int] , lowerCAmelCase__ :str ) -> Optional[int]: '''simple docstring''' snake_case_ : int = "lower newer" snake_case_ : Tuple = "lower newer" return input_text, output_text def _A ( self :Tuple ) -> Union[str, Any]: '''simple docstring''' snake_case_ : str = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) snake_case_ : Dict = "lower newer" snake_case_ : int = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"] snake_case_ : str = tokenizer.tokenize(lowerCAmelCase__ ) # , add_prefix_space=True) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) snake_case_ : List[str] = tokens + [tokenizer.unk_token] snake_case_ : Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ ) def _A ( self :Any ) -> str: '''simple docstring''' snake_case_ : List[str] = self.get_tokenizer() self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=lowerCAmelCase__ ) , [0, 31_414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=lowerCAmelCase__ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , ) @slow def _A ( self :str ) -> List[str]: '''simple docstring''' snake_case_ : Tuple = self.tokenizer_class.from_pretrained("roberta-base" ) snake_case_ : List[str] = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase__ ) snake_case_ : List[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase__ ) snake_case_ : List[str] = tokenizer.encode( "sequence builders" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ ) snake_case_ : Union[str, Any] = tokenizer.encode( "sequence builders" , "multi-sequence build" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ ) snake_case_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ ) snake_case_ : Any = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def _A ( self :List[Any] ) -> Any: '''simple docstring''' snake_case_ : Optional[Any] = self.get_tokenizer() snake_case_ : Tuple = "Encode this sequence." snake_case_ : Optional[Any] = tokenizer.byte_encoder[" ".encode("utf-8" )[0]] # Testing encoder arguments snake_case_ : str = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ ) snake_case_ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ ) snake_case_ : List[Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ ) snake_case_ : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) tokenizer.add_special_tokens({"bos_token": "<s>"} ) snake_case_ : str = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) snake_case_ : int = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ ) # Testing spaces after special tokens snake_case_ : List[Any] = "<mask>" tokenizer.add_special_tokens( {"mask_token": AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )} ) # mask token has a left space snake_case_ : str = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) snake_case_ : List[str] = "Encode <mask> sequence" snake_case_ : List[Any] = "Encode <mask>sequence" snake_case_ : Tuple = tokenizer.encode(lowerCAmelCase__ ) snake_case_ : int = encoded.index(lowerCAmelCase__ ) snake_case_ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) snake_case_ : List[str] = tokenizer.encode(lowerCAmelCase__ ) snake_case_ : Union[str, Any] = encoded.index(lowerCAmelCase__ ) snake_case_ : int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def _A ( self :Tuple ) -> Tuple: '''simple docstring''' pass def _A ( self :int ) -> Optional[Any]: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case_ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) snake_case_ : List[Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) snake_case_ : Any = "A, <mask> AllenNLP sentence." snake_case_ : str = tokenizer_r.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ ) snake_case_ : int = tokenizer_p.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) snake_case_ : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) snake_case_ : str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) def _A ( self :int ) -> Tuple: '''simple docstring''' for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): snake_case_ : str = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) snake_case_ : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) snake_case_ : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["add_prefix_space"] , lowerCAmelCase__ ) self.assertEqual(post_processor_state["add_prefix_space"] , lowerCAmelCase__ ) self.assertEqual(post_processor_state["trim_offsets"] , lowerCAmelCase__ ) def _A ( self :List[str] ) -> List[Any]: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case_ : str = "hello" # `hello` is a token in the vocabulary of `pretrained_name` snake_case_ : Tuple = F'''{text_of_1_token} {text_of_1_token}''' snake_case_ : Any = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , ) snake_case_ : List[str] = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) snake_case_ : Tuple = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , ) snake_case_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , ) snake_case_ : Dict = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) snake_case_ : str = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , ) snake_case_ : Tuple = F''' {text}''' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) snake_case_ : Dict = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ) + 1, 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , ) snake_case_ : Any = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) snake_case_ : Any = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , ) snake_case_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) snake_case_ : Optional[int] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
653
1
'''simple docstring''' __lowerCamelCase : Optional[int] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/''' def __UpperCAmelCase ( __magic_name__ )-> bytes: """simple docstring""" if not isinstance(__magic_name__ ,__magic_name__ ): snake_case_ : Any = F'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(__magic_name__ ) snake_case_ : Union[str, Any] = "".join(bin(__magic_name__ )[2:].zfill(8 ) for byte in data ) snake_case_ : Tuple = len(__magic_name__ ) % 6 != 0 if padding_needed: # The padding that will be added later snake_case_ : Optional[int] = B"=" * ((6 - len(__magic_name__ ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(__magic_name__ ) % 6) else: snake_case_ : Tuple = B"" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] ,2 )] for index in range(0 ,len(__magic_name__ ) ,6 ) ).encode() + padding ) def __UpperCAmelCase ( __magic_name__ )-> bytes: """simple docstring""" if not isinstance(__magic_name__ ,__magic_name__ ) and not isinstance(__magic_name__ ,__magic_name__ ): snake_case_ : Any = ( "argument should be a bytes-like object or ASCII string, " F'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(__magic_name__ ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(__magic_name__ ,__magic_name__ ): try: snake_case_ : Any = encoded_data.decode("utf-8" ) except UnicodeDecodeError: raise ValueError("base64 encoded data should only contain ASCII characters" ) snake_case_ : Optional[int] = encoded_data.count("=" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(__magic_name__ ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one snake_case_ : Any = encoded_data[:-padding] snake_case_ : Optional[int] = "".join( bin(B64_CHARSET.index(__magic_name__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: snake_case_ : Optional[Any] = "".join( bin(B64_CHARSET.index(__magic_name__ ) )[2:].zfill(6 ) for char in encoded_data ) snake_case_ : Union[str, Any] = [ int(binary_stream[index : index + 8] ,2 ) for index in range(0 ,len(__magic_name__ ) ,8 ) ] return bytes(__magic_name__ ) if __name__ == "__main__": import doctest doctest.testmod()
653
'''simple docstring''' import math def __UpperCAmelCase ( __magic_name__ )-> bool: """simple docstring""" snake_case_ : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(__magic_name__ ) def __UpperCAmelCase ( __magic_name__ = 1 / 1_2345 )-> int: """simple docstring""" snake_case_ : Any = 0 snake_case_ : int = 0 snake_case_ : Union[str, Any] = 3 while True: snake_case_ : Any = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(__magic_name__ ): snake_case_ : Optional[Any] = int(__magic_name__ ) total_partitions += 1 if check_partition_perfect(__magic_name__ ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(__magic_name__ ) integer += 1 if __name__ == "__main__": print(f'''{solution() = }''')
653
1
'''simple docstring''' import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem __lowerCamelCase : Tuple = importlib.util.find_spec('''s3fs''') is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 __lowerCamelCase : List[compression.BaseCompressedFileFileSystem] = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(f'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''') fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def __UpperCAmelCase ( __magic_name__ )-> str: """simple docstring""" if "://" in dataset_path: snake_case_ : List[str] = dataset_path.split("://" )[1] return dataset_path def __UpperCAmelCase ( __magic_name__ )-> bool: """simple docstring""" if fs is not None and fs.protocol != "file": return True else: return False def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Optional[int]: """simple docstring""" snake_case_ : List[Any] = not is_remote_filesystem(__magic_name__ ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(__magic_name__ ) ,fs._strip_protocol(__magic_name__ ) ) else: fs.mv(__magic_name__ ,__magic_name__ ,recursive=__magic_name__ ) def __UpperCAmelCase ( )-> None: """simple docstring""" if hasattr(fsspec.asyn ,"reset_lock" ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: snake_case_ : Union[str, Any] = None snake_case_ : List[Any] = None snake_case_ : List[Any] = threading.Lock()
653
'''simple docstring''' import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() __lowerCamelCase : int = logging.get_logger() @dataclass class A_ : """simple docstring""" a__ = 42 a__ = field(default_factory=a_ ) a__ = field(default_factory=a_ ) def _A ( self :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tensor , lowerCAmelCase__ :Tensor ) -> int: '''simple docstring''' snake_case_ : int = len(list(m.modules() ) ) == 1 or isinstance(lowerCAmelCase__ , nn.Convad ) or isinstance(lowerCAmelCase__ , nn.BatchNormad ) if has_not_submodules: self.traced.append(lowerCAmelCase__ ) def __call__( self :List[Any] , lowerCAmelCase__ :Tensor ) -> Union[str, Any]: '''simple docstring''' for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(lowerCAmelCase__ ) [x.remove() for x in self.handles] return self @property def _A ( self :int ) -> List[Any]: '''simple docstring''' return list(filter(lambda lowerCAmelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class A_ : """simple docstring""" a__ = 42 a__ = 42 a__ = 0 a__ = field(default_factory=a_ ) a__ = field(default_factory=a_ ) def __call__( self :Tuple , lowerCAmelCase__ :Tensor ) -> Tuple: '''simple docstring''' snake_case_ : List[Any] = Tracker(self.dest )(lowerCAmelCase__ ).parametrized snake_case_ : Tuple = Tracker(self.src )(lowerCAmelCase__ ).parametrized snake_case_ : List[str] = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.src_skip , lowerCAmelCase__ ) ) snake_case_ : Tuple = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.dest_skip , lowerCAmelCase__ ) ) if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ): raise Exception( F'''Numbers of operations are different. Source module has {len(lowerCAmelCase__ )} operations while''' F''' destination module has {len(lowerCAmelCase__ )}.''' ) for dest_m, src_m in zip(lowerCAmelCase__ , lowerCAmelCase__ ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F'''Transfered from={src_m} to={dest_m}''' ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ = True )-> Optional[int]: """simple docstring""" print(F'''Converting {name}...''' ) with torch.no_grad(): snake_case_ : List[str] = timm.create_model(__magic_name__ ,pretrained=__magic_name__ ).eval() snake_case_ : Optional[int] = ResNetForImageClassification(__magic_name__ ).eval() snake_case_ : Dict = ModuleTransfer(src=__magic_name__ ,dest=__magic_name__ ) snake_case_ : Optional[int] = torch.randn((1, 3, 224, 224) ) module_transfer(__magic_name__ ) assert torch.allclose(from_model(__magic_name__ ) ,our_model(__magic_name__ ).logits ), "The model logits don't match the original one." snake_case_ : str = F'''resnet{'-'.join(name.split('resnet' ) )}''' print(__magic_name__ ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add model" ,use_temp_dir=__magic_name__ ,) # we can use the convnext one snake_case_ : Optional[Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add image processor" ,use_temp_dir=__magic_name__ ,) print(F'''Pushed {checkpoint_name}''' ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = None ,__magic_name__ = True )-> Tuple: """simple docstring""" snake_case_ : List[str] = "imagenet-1k-id2label.json" snake_case_ : Optional[Any] = 1000 snake_case_ : List[Any] = (1, num_labels) snake_case_ : Optional[Any] = "huggingface/label-files" snake_case_ : Dict = num_labels snake_case_ : List[Any] = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) ) snake_case_ : List[str] = {int(__magic_name__ ): v for k, v in idalabel.items()} snake_case_ : Any = idalabel snake_case_ : List[Any] = {v: k for k, v in idalabel.items()} snake_case_ : Optional[int] = partial(__magic_name__ ,num_labels=__magic_name__ ,idalabel=__magic_name__ ,labelaid=__magic_name__ ) snake_case_ : Optional[int] = { "resnet18": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ), "resnet26": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ), "resnet34": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ), "resnet50": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ), "resnet101": ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ), "resnet152": ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ), } if model_name: convert_weight_and_push(__magic_name__ ,names_to_config[model_name] ,__magic_name__ ,__magic_name__ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ) return config, expected_shape if __name__ == "__main__": __lowerCamelCase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default=None, type=str, help=( '''The name of the model you wish to convert, it must be one of the supported resnet* architecture,''' ''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=Path, required=True, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=True, type=bool, required=False, help='''If True, push model and image processor to the hub.''', ) __lowerCamelCase : Tuple = parser.parse_args() __lowerCamelCase : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
653
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: __lowerCamelCase : Optional[Any] = None __lowerCamelCase : Dict = logging.get_logger(__name__) __lowerCamelCase : Tuple = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} __lowerCamelCase : Optional[int] = { '''vocab_file''': { '''facebook/mbart-large-en-ro''': ( '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model''' ), '''facebook/mbart-large-cc25''': ( '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''', '''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''', }, } __lowerCamelCase : Optional[int] = { '''facebook/mbart-large-en-ro''': 1024, '''facebook/mbart-large-cc25''': 1024, } # fmt: off __lowerCamelCase : str = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN'''] class A_ (a_ ): """simple docstring""" a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = ['''input_ids''', '''attention_mask'''] a__ = MBartTokenizer a__ = [] a__ = [] def __init__( self :int , lowerCAmelCase__ :int=None , lowerCAmelCase__ :Union[str, Any]=None , lowerCAmelCase__ :List[Any]="<s>" , lowerCAmelCase__ :int="</s>" , lowerCAmelCase__ :Union[str, Any]="</s>" , lowerCAmelCase__ :List[Any]="<s>" , lowerCAmelCase__ :str="<unk>" , lowerCAmelCase__ :Union[str, Any]="<pad>" , lowerCAmelCase__ :Any="<mask>" , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :Tuple=None , **lowerCAmelCase__ :Optional[int] , ) -> Optional[Any]: '''simple docstring''' snake_case_ : int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token super().__init__( vocab_file=lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , **lowerCAmelCase__ , ) snake_case_ : Optional[Any] = vocab_file snake_case_ : Optional[Any] = False if not self.vocab_file else True snake_case_ : Any = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} ) snake_case_ : Union[str, Any] = { lang_code: self.convert_tokens_to_ids(lowerCAmelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } snake_case_ : Dict = src_lang if src_lang is not None else "en_XX" snake_case_ : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang ) snake_case_ : Tuple = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def _A ( self :Tuple ) -> str: '''simple docstring''' return self._src_lang @src_lang.setter def _A ( self :Tuple , lowerCAmelCase__ :str ) -> None: '''simple docstring''' snake_case_ : Union[str, Any] = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def _A ( self :List[str] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _A ( self :List[Any] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]: '''simple docstring''' snake_case_ : Union[str, Any] = [self.sep_token_id] snake_case_ : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _A ( self :Tuple , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] , lowerCAmelCase__ :Optional[str] , **lowerCAmelCase__ :Any ) -> str: '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) snake_case_ : List[str] = src_lang snake_case_ : Optional[Any] = self(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ ) snake_case_ : Optional[int] = self.convert_tokens_to_ids(lowerCAmelCase__ ) snake_case_ : int = tgt_lang_id return inputs def _A ( self :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :str = "en_XX" , lowerCAmelCase__ :Optional[List[str]] = None , lowerCAmelCase__ :str = "ro_RO" , **lowerCAmelCase__ :Dict , ) -> BatchEncoding: '''simple docstring''' snake_case_ : Any = src_lang snake_case_ : Optional[int] = tgt_lang return super().prepare_seqaseq_batch(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) def _A ( self :List[Any] ) -> Optional[int]: '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def _A ( self :Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def _A ( self :Union[str, Any] , lowerCAmelCase__ :str ) -> None: '''simple docstring''' snake_case_ : Union[str, Any] = self.convert_tokens_to_ids(lowerCAmelCase__ ) snake_case_ : Tuple = [] snake_case_ : Union[str, Any] = [self.eos_token_id, self.cur_lang_code] snake_case_ : Dict = self.convert_ids_to_tokens(self.prefix_tokens ) snake_case_ : int = self.convert_ids_to_tokens(self.suffix_tokens ) snake_case_ : Any = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def _A ( self :Optional[int] , lowerCAmelCase__ :str ) -> None: '''simple docstring''' snake_case_ : Any = self.convert_tokens_to_ids(lowerCAmelCase__ ) snake_case_ : List[Any] = [] snake_case_ : List[str] = [self.eos_token_id, self.cur_lang_code] snake_case_ : Dict = self.convert_ids_to_tokens(self.prefix_tokens ) snake_case_ : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens ) snake_case_ : int = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def _A ( self :Union[str, Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(lowerCAmelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' ) return snake_case_ : Union[str, Any] = os.path.join( lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ): copyfile(self.vocab_file , lowerCAmelCase__ ) return (out_vocab_file,)
653
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase : List[Any] = logging.get_logger(__name__) __lowerCamelCase : Dict = { '''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''', } class A_ (a_ ): """simple docstring""" a__ = '''roc_bert''' def __init__( self :Dict , lowerCAmelCase__ :Optional[Any]=30_522 , lowerCAmelCase__ :Dict=768 , lowerCAmelCase__ :str=12 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :Optional[Any]=3_072 , lowerCAmelCase__ :Any="gelu" , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :List[str]=512 , lowerCAmelCase__ :int=2 , lowerCAmelCase__ :Optional[int]=0.0_2 , lowerCAmelCase__ :Tuple=1E-1_2 , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :List[str]=0 , lowerCAmelCase__ :Optional[Any]="absolute" , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :List[str]=768 , lowerCAmelCase__ :Optional[Any]=910 , lowerCAmelCase__ :str=512 , lowerCAmelCase__ :int=24_858 , lowerCAmelCase__ :List[Any]=True , **lowerCAmelCase__ :int , ) -> List[str]: '''simple docstring''' snake_case_ : int = vocab_size snake_case_ : Dict = max_position_embeddings snake_case_ : int = hidden_size snake_case_ : str = num_hidden_layers snake_case_ : Union[str, Any] = num_attention_heads snake_case_ : int = intermediate_size snake_case_ : Optional[Any] = hidden_act snake_case_ : Optional[int] = hidden_dropout_prob snake_case_ : List[Any] = attention_probs_dropout_prob snake_case_ : Dict = initializer_range snake_case_ : str = type_vocab_size snake_case_ : Tuple = layer_norm_eps snake_case_ : Optional[Any] = use_cache snake_case_ : Optional[Any] = enable_pronunciation snake_case_ : List[Any] = enable_shape snake_case_ : Optional[int] = pronunciation_embed_dim snake_case_ : Dict = pronunciation_vocab_size snake_case_ : int = shape_embed_dim snake_case_ : Any = shape_vocab_size snake_case_ : Optional[int] = concat_input snake_case_ : List[Any] = position_embedding_type snake_case_ : Any = classifier_dropout super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
653
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __lowerCamelCase : Any = logging.get_logger(__name__) __lowerCamelCase : Optional[Any] = {'''vocab_file''': '''spm_char.model'''} __lowerCamelCase : int = { '''vocab_file''': { '''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''', '''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''', '''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''', } } __lowerCamelCase : Optional[int] = { '''microsoft/speecht5_asr''': 1024, '''microsoft/speecht5_tts''': 1024, '''microsoft/speecht5_vc''': 1024, } class A_ (a_ ): """simple docstring""" a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = ['''input_ids''', '''attention_mask'''] def __init__( self :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :int="<s>" , lowerCAmelCase__ :Dict="</s>" , lowerCAmelCase__ :Union[str, Any]="<unk>" , lowerCAmelCase__ :List[Any]="<pad>" , lowerCAmelCase__ :Optional[Dict[str, Any]] = None , **lowerCAmelCase__ :str , ) -> None: '''simple docstring''' snake_case_ : str = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , ) snake_case_ : Dict = vocab_file snake_case_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCAmelCase__ ) @property def _A ( self :int ) -> List[str]: '''simple docstring''' return self.sp_model.get_piece_size() def _A ( self :Optional[Any] ) -> List[str]: '''simple docstring''' snake_case_ : Union[str, Any] = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self :List[str] ) -> Any: '''simple docstring''' snake_case_ : int = self.__dict__.copy() snake_case_ : Union[str, Any] = None return state def __setstate__( self :Union[str, Any] , lowerCAmelCase__ :int ) -> int: '''simple docstring''' snake_case_ : Any = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): snake_case_ : Tuple = {} snake_case_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _A ( self :Optional[int] , lowerCAmelCase__ :str ) -> List[str]: '''simple docstring''' return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ ) def _A ( self :str , lowerCAmelCase__ :List[Any] ) -> List[Any]: '''simple docstring''' return self.sp_model.piece_to_id(lowerCAmelCase__ ) def _A ( self :List[str] , lowerCAmelCase__ :Optional[Any] ) -> Optional[int]: '''simple docstring''' snake_case_ : Optional[Any] = self.sp_model.IdToPiece(lowerCAmelCase__ ) return token def _A ( self :Optional[Any] , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]: '''simple docstring''' snake_case_ : str = [] snake_case_ : List[Any] = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowerCAmelCase__ ) + token snake_case_ : int = [] else: current_sub_tokens.append(lowerCAmelCase__ ) out_string += self.sp_model.decode(lowerCAmelCase__ ) return out_string.strip() def _A ( self :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str=None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _A ( self :List[str] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None , lowerCAmelCase__ :bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ ) snake_case_ : List[Any] = [1] if token_ids_a is None: return ([0] * len(lowerCAmelCase__ )) + suffix_ones return ([0] * len(lowerCAmelCase__ )) + ([0] * len(lowerCAmelCase__ )) + suffix_ones def _A ( self :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(lowerCAmelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case_ : List[str] = os.path.join( lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCAmelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase__ , "wb" ) as fi: snake_case_ : Union[str, Any] = self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase__ ) return (out_vocab_file,)
653
'''simple docstring''' def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int: """simple docstring""" def update_area_of_max_square(__magic_name__ ,__magic_name__ ) -> int: # BASE CASE if row >= rows or col >= cols: return 0 snake_case_ : str = update_area_of_max_square(__magic_name__ ,col + 1 ) snake_case_ : Dict = update_area_of_max_square(row + 1 ,col + 1 ) snake_case_ : int = update_area_of_max_square(row + 1 ,__magic_name__ ) if mat[row][col]: snake_case_ : str = 1 + min([right, diagonal, down] ) snake_case_ : Tuple = max(largest_square_area[0] ,__magic_name__ ) return sub_problem_sol else: return 0 snake_case_ : Union[str, Any] = [0] update_area_of_max_square(0 ,0 ) return largest_square_area[0] def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int: """simple docstring""" def update_area_of_max_square_using_dp_array( __magic_name__ ,__magic_name__ ,__magic_name__ ) -> int: if row >= rows or col >= cols: return 0 if dp_array[row][col] != -1: return dp_array[row][col] snake_case_ : Dict = update_area_of_max_square_using_dp_array(__magic_name__ ,col + 1 ,__magic_name__ ) snake_case_ : List[Any] = update_area_of_max_square_using_dp_array(row + 1 ,col + 1 ,__magic_name__ ) snake_case_ : Any = update_area_of_max_square_using_dp_array(row + 1 ,__magic_name__ ,__magic_name__ ) if mat[row][col]: snake_case_ : int = 1 + min([right, diagonal, down] ) snake_case_ : Tuple = max(largest_square_area[0] ,__magic_name__ ) snake_case_ : Optional[Any] = sub_problem_sol return sub_problem_sol else: return 0 snake_case_ : List[Any] = [0] snake_case_ : Optional[int] = [[-1] * cols for _ in range(__magic_name__ )] update_area_of_max_square_using_dp_array(0 ,0 ,__magic_name__ ) return largest_square_area[0] def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int: """simple docstring""" snake_case_ : Dict = [[0] * (cols + 1) for _ in range(rows + 1 )] snake_case_ : Dict = 0 for row in range(rows - 1 ,-1 ,-1 ): for col in range(cols - 1 ,-1 ,-1 ): snake_case_ : List[str] = dp_array[row][col + 1] snake_case_ : Any = dp_array[row + 1][col + 1] snake_case_ : Any = dp_array[row + 1][col] if mat[row][col] == 1: snake_case_ : Any = 1 + min(__magic_name__ ,__magic_name__ ,__magic_name__ ) snake_case_ : str = max(dp_array[row][col] ,__magic_name__ ) else: snake_case_ : Optional[Any] = 0 return largest_square_area def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int: """simple docstring""" snake_case_ : str = [0] * (cols + 1) snake_case_ : Tuple = [0] * (cols + 1) snake_case_ : List[str] = 0 for row in range(rows - 1 ,-1 ,-1 ): for col in range(cols - 1 ,-1 ,-1 ): snake_case_ : Optional[Any] = current_row[col + 1] snake_case_ : Optional[int] = next_row[col + 1] snake_case_ : Dict = next_row[col] if mat[row][col] == 1: snake_case_ : Union[str, Any] = 1 + min(__magic_name__ ,__magic_name__ ,__magic_name__ ) snake_case_ : Any = max(current_row[col] ,__magic_name__ ) else: snake_case_ : Dict = 0 snake_case_ : Optional[Any] = current_row return largest_square_area if __name__ == "__main__": import doctest doctest.testmod() print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
653
1
'''simple docstring''' import argparse import math import traceback import dateutil.parser as date_parser import requests def __UpperCAmelCase ( __magic_name__ )-> List[str]: """simple docstring""" snake_case_ : List[str] = {} snake_case_ : Any = job["started_at"] snake_case_ : Optional[Any] = job["completed_at"] snake_case_ : Optional[int] = date_parser.parse(__magic_name__ ) snake_case_ : Tuple = date_parser.parse(__magic_name__ ) snake_case_ : int = round((end_datetime - start_datetime).total_seconds() / 60.0 ) snake_case_ : Any = start snake_case_ : int = end snake_case_ : Union[str, Any] = duration_in_min return job_info def __UpperCAmelCase ( __magic_name__ ,__magic_name__=None )-> Tuple: """simple docstring""" snake_case_ : int = None if token is not None: snake_case_ : List[Any] = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''} snake_case_ : str = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100''' snake_case_ : Tuple = requests.get(__magic_name__ ,headers=__magic_name__ ).json() snake_case_ : List[Any] = {} try: job_time.update({job["name"]: extract_time_from_single_job(__magic_name__ ) for job in result["jobs"]} ) snake_case_ : Optional[Any] = math.ceil((result["total_count"] - 100) / 100 ) for i in range(__magic_name__ ): snake_case_ : List[Any] = requests.get(url + F'''&page={i + 2}''' ,headers=__magic_name__ ).json() job_time.update({job["name"]: extract_time_from_single_job(__magic_name__ ) for job in result["jobs"]} ) return job_time except Exception: print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} if __name__ == "__main__": __lowerCamelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''') __lowerCamelCase : Tuple = parser.parse_args() __lowerCamelCase : int = get_job_time(args.workflow_run_id) __lowerCamelCase : str = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(f'''{k}: {v["duration"]}''')
653
'''simple docstring''' import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def __UpperCAmelCase ( __magic_name__ ,__magic_name__=7 )-> Tuple: """simple docstring""" snake_case_ : List[str] = None if token is not None: snake_case_ : List[str] = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''} # The id of a workflow (not of a workflow run) snake_case_ : Dict = "636036" snake_case_ : List[str] = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs''' # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}''' snake_case_ : Optional[Any] = requests.get(__magic_name__ ,headers=__magic_name__ ).json() return result["workflow_runs"] def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]: """simple docstring""" snake_case_ : str = get_daily_ci_runs(__magic_name__ ) snake_case_ : Optional[int] = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": snake_case_ : Dict = workflow_run["id"] break return workflow_run_id def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[Any]: """simple docstring""" snake_case_ : Optional[Any] = get_last_daily_ci_runs(__magic_name__ ) if workflow_run_id is not None: snake_case_ : Union[str, Any] = get_artifacts_links(worflow_run_id=__magic_name__ ,token=__magic_name__ ) for artifact_name in artifact_names: if artifact_name in artifacts_links: snake_case_ : Union[str, Any] = artifacts_links[artifact_name] download_artifact( artifact_name=__magic_name__ ,artifact_url=__magic_name__ ,output_dir=__magic_name__ ,token=__magic_name__ ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[Any]: """simple docstring""" get_last_daily_ci_artifacts(__magic_name__ ,__magic_name__ ,__magic_name__ ) snake_case_ : Union[str, Any] = {} for artifact_name in artifact_names: snake_case_ : Any = os.path.join(__magic_name__ ,F'''{artifact_name}.zip''' ) if os.path.isfile(__magic_name__ ): snake_case_ : Tuple = {} with zipfile.ZipFile(__magic_name__ ) as z: for filename in z.namelist(): if not os.path.isdir(__magic_name__ ): # read the file with z.open(__magic_name__ ) as f: snake_case_ : Optional[Any] = f.read().decode("UTF-8" ) return results
653
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class A_ (a_ , unittest.TestCase ): """simple docstring""" a__ = KandinskyInpaintPipeline a__ = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image'''] a__ = [ '''prompt''', '''negative_prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''', ] a__ = [ '''generator''', '''height''', '''width''', '''latents''', '''guidance_scale''', '''negative_prompt''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] a__ = False @property def _A ( self :str ) -> str: '''simple docstring''' return 32 @property def _A ( self :Any ) -> List[str]: '''simple docstring''' return 32 @property def _A ( self :List[Any] ) -> Union[str, Any]: '''simple docstring''' return self.time_input_dim @property def _A ( self :int ) -> Tuple: '''simple docstring''' return self.time_input_dim * 4 @property def _A ( self :int ) -> Tuple: '''simple docstring''' return 100 @property def _A ( self :Union[str, Any] ) -> str: '''simple docstring''' snake_case_ : Any = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" ) return tokenizer @property def _A ( self :Optional[Any] ) -> List[str]: '''simple docstring''' torch.manual_seed(0 ) snake_case_ : Union[str, Any] = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , ) snake_case_ : Union[str, Any] = MultilingualCLIP(lowerCAmelCase__ ) snake_case_ : List[Any] = text_encoder.eval() return text_encoder @property def _A ( self :List[str] ) -> List[str]: '''simple docstring''' torch.manual_seed(0 ) snake_case_ : Any = { "in_channels": 9, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "text_image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "text_image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } snake_case_ : Union[str, Any] = UNetaDConditionModel(**lowerCAmelCase__ ) return model @property def _A ( self :str ) -> Any: '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def _A ( self :Optional[Any] ) -> Any: '''simple docstring''' torch.manual_seed(0 ) snake_case_ : Optional[Any] = VQModel(**self.dummy_movq_kwargs ) return model def _A ( self :List[Any] ) -> Optional[Any]: '''simple docstring''' snake_case_ : List[Any] = self.dummy_text_encoder snake_case_ : List[str] = self.dummy_tokenizer snake_case_ : str = self.dummy_unet snake_case_ : str = self.dummy_movq snake_case_ : Optional[Any] = DDIMScheduler( num_train_timesteps=1_000 , beta_schedule="linear" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , steps_offset=1 , prediction_type="epsilon" , thresholding=lowerCAmelCase__ , ) snake_case_ : Optional[int] = { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "movq": movq, } return components def _A ( self :Optional[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Dict=0 ) -> Tuple: '''simple docstring''' snake_case_ : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ ) snake_case_ : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowerCAmelCase__ ) # create init_image snake_case_ : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ ) snake_case_ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] snake_case_ : List[str] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("RGB" ).resize((256, 256) ) # create mask snake_case_ : Optional[Any] = np.ones((64, 64) , dtype=np.floataa ) snake_case_ : Any = 0 if str(lowerCAmelCase__ ).startswith("mps" ): snake_case_ : int = torch.manual_seed(lowerCAmelCase__ ) else: snake_case_ : Optional[Any] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ ) snake_case_ : int = { "prompt": "horse", "image": init_image, "mask_image": mask, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 2, "guidance_scale": 4.0, "output_type": "np", } return inputs def _A ( self :int ) -> List[str]: '''simple docstring''' snake_case_ : Any = "cpu" snake_case_ : List[Any] = self.get_dummy_components() snake_case_ : Dict = self.pipeline_class(**lowerCAmelCase__ ) snake_case_ : int = pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) snake_case_ : Optional[int] = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) ) snake_case_ : Any = output.images snake_case_ : Optional[int] = pipe( **self.get_dummy_inputs(lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ , )[0] snake_case_ : Tuple = image[0, -3:, -3:, -1] snake_case_ : List[Any] = image_from_tuple[0, -3:, -3:, -1] print(F'''image.shape {image.shape}''' ) assert image.shape == (1, 64, 64, 3) snake_case_ : Tuple = np.array( [0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' def _A ( self :Tuple ) -> Optional[int]: '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class A_ (unittest.TestCase ): """simple docstring""" def _A ( self :str ) -> str: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _A ( self :int ) -> Any: '''simple docstring''' snake_case_ : Tuple = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" ) snake_case_ : List[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) snake_case_ : Optional[Any] = np.ones((768, 768) , dtype=np.floataa ) snake_case_ : Optional[int] = 0 snake_case_ : Optional[int] = "a hat" snake_case_ : Any = KandinskyPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa ) pipe_prior.to(lowerCAmelCase__ ) snake_case_ : Optional[int] = KandinskyInpaintPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa ) snake_case_ : List[Any] = pipeline.to(lowerCAmelCase__ ) pipeline.set_progress_bar_config(disable=lowerCAmelCase__ ) snake_case_ : str = torch.Generator(device="cpu" ).manual_seed(0 ) snake_case_, snake_case_ : int = pipe_prior( lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple() snake_case_ : Tuple = pipeline( lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=100 , height=768 , width=768 , output_type="np" , ) snake_case_ : str = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
653
'''simple docstring''' from string import ascii_uppercase __lowerCamelCase : Optional[Any] = {char: i for i, char in enumerate(ascii_uppercase)} __lowerCamelCase : List[str] = dict(enumerate(ascii_uppercase)) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str: """simple docstring""" snake_case_ : Tuple = len(__magic_name__ ) snake_case_ : str = 0 while True: if x == i: snake_case_ : List[str] = 0 if len(__magic_name__ ) == len(__magic_name__ ): break key += key[i] i += 1 return key def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str: """simple docstring""" snake_case_ : str = "" snake_case_ : List[Any] = 0 for letter in message: if letter == " ": cipher_text += " " else: snake_case_ : Optional[Any] = (dicta[letter] - dicta[key_new[i]]) % 26 i += 1 cipher_text += dicta[x] return cipher_text def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str: """simple docstring""" snake_case_ : Dict = "" snake_case_ : Dict = 0 for letter in cipher_text: if letter == " ": or_txt += " " else: snake_case_ : str = (dicta[letter] + dicta[key_new[i]] + 26) % 26 i += 1 or_txt += dicta[x] return or_txt def __UpperCAmelCase ( )-> None: """simple docstring""" snake_case_ : List[str] = "THE GERMAN ATTACK" snake_case_ : List[str] = "SECRET" snake_case_ : Optional[int] = generate_key(__magic_name__ ,__magic_name__ ) snake_case_ : Any = cipher_text(__magic_name__ ,__magic_name__ ) print(F'''Encrypted Text = {s}''' ) print(F'''Original Text = {original_text(__magic_name__ ,__magic_name__ )}''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
653
1
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class A_ (unittest.TestCase ): """simple docstring""" def _A ( self :List[Any] ) -> int: '''simple docstring''' snake_case_ : Any = tempfile.mkdtemp() # fmt: off snake_case_ : Any = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on snake_case_ : List[Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) ) snake_case_ : str = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] snake_case_ : Optional[Any] = {"unk_token": "<unk>"} snake_case_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) snake_case_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowerCAmelCase__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowerCAmelCase__ ) ) snake_case_ : Optional[int] = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], "image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } snake_case_ : Optional[Any] = os.path.join(self.tmpdirname , lowerCAmelCase__ ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(lowerCAmelCase__ , lowerCAmelCase__ ) def _A ( self :Union[str, Any] , **lowerCAmelCase__ :int ) -> Dict: '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def _A ( self :Any , **lowerCAmelCase__ :str ) -> Union[str, Any]: '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def _A ( self :List[str] , **lowerCAmelCase__ :List[Any] ) -> int: '''simple docstring''' return CLIPImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def _A ( self :Optional[Any] ) -> Optional[Any]: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _A ( self :List[Any] ) -> List[Any]: '''simple docstring''' snake_case_ : Any = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] snake_case_ : Dict = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def _A ( self :List[str] ) -> Any: '''simple docstring''' snake_case_ : Any = self.get_tokenizer() snake_case_ : str = self.get_rust_tokenizer() snake_case_ : Tuple = self.get_image_processor() snake_case_ : Union[str, Any] = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) processor_slow.save_pretrained(self.tmpdirname ) snake_case_ : Union[str, Any] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase__ ) snake_case_ : List[str] = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) processor_fast.save_pretrained(self.tmpdirname ) snake_case_ : Any = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase__ ) self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase__ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase__ ) self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase__ ) def _A ( self :Tuple ) -> Optional[int]: '''simple docstring''' snake_case_ : Optional[int] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) snake_case_ : int = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) snake_case_ : Any = self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0 ) snake_case_ : int = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCAmelCase__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowerCAmelCase__ ) def _A ( self :Union[str, Any] ) -> List[str]: '''simple docstring''' snake_case_ : Optional[int] = self.get_image_processor() snake_case_ : List[str] = self.get_tokenizer() snake_case_ : int = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) snake_case_ : int = self.prepare_image_inputs() snake_case_ : List[str] = image_processor(lowerCAmelCase__ , return_tensors="np" ) snake_case_ : Dict = processor(images=lowerCAmelCase__ , return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _A ( self :str ) -> Dict: '''simple docstring''' snake_case_ : int = self.get_image_processor() snake_case_ : str = self.get_tokenizer() snake_case_ : List[Any] = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) snake_case_ : int = "lower newer" snake_case_ : Union[str, Any] = processor(text=lowerCAmelCase__ ) snake_case_ : Union[str, Any] = tokenizer(lowerCAmelCase__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _A ( self :List[Any] ) -> Dict: '''simple docstring''' snake_case_ : Tuple = self.get_image_processor() snake_case_ : int = self.get_tokenizer() snake_case_ : Optional[Any] = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) snake_case_ : List[str] = "lower newer" snake_case_ : Dict = self.prepare_image_inputs() snake_case_ : Optional[Any] = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(lowerCAmelCase__ ): processor() def _A ( self :int ) -> List[str]: '''simple docstring''' snake_case_ : str = self.get_image_processor() snake_case_ : List[Any] = self.get_tokenizer() snake_case_ : Tuple = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) snake_case_ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] snake_case_ : Dict = processor.batch_decode(lowerCAmelCase__ ) snake_case_ : Tuple = tokenizer.batch_decode(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def _A ( self :Tuple ) -> Optional[Any]: '''simple docstring''' snake_case_ : List[Any] = self.get_image_processor() snake_case_ : int = self.get_tokenizer() snake_case_ : Any = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) snake_case_ : int = "lower newer" snake_case_ : str = self.prepare_image_inputs() snake_case_ : Any = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
653
'''simple docstring''' import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Dict: """simple docstring""" snake_case_ : Tuple = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") snake_case_ : Union[str, Any] = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(__magic_name__ ): os.makedirs(__magic_name__ ) snake_case_ : str = model.state_dict() def to_tf_var_name(__magic_name__ ): for patt, repl in iter(__magic_name__ ): snake_case_ : List[str] = name.replace(__magic_name__ ,__magic_name__ ) return F'''bert/{name}''' def create_tf_var(__magic_name__ ,__magic_name__ ,__magic_name__ ): snake_case_ : List[Any] = tf.dtypes.as_dtype(tensor.dtype ) snake_case_ : Union[str, Any] = tf.get_variable(dtype=__magic_name__ ,shape=tensor.shape ,name=__magic_name__ ,initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(__magic_name__ ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: snake_case_ : Optional[int] = to_tf_var_name(__magic_name__ ) snake_case_ : Dict = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): snake_case_ : List[Any] = torch_tensor.T snake_case_ : Union[str, Any] = create_tf_var(tensor=__magic_name__ ,name=__magic_name__ ,session=__magic_name__ ) tf.keras.backend.set_value(__magic_name__ ,__magic_name__ ) snake_case_ : List[str] = session.run(__magic_name__ ) print(F'''Successfully created {tf_name}: {np.allclose(__magic_name__ ,__magic_name__ )}''' ) snake_case_ : Any = tf.train.Saver(tf.trainable_variables() ) saver.save(__magic_name__ ,os.path.join(__magic_name__ ,model_name.replace("-" ,"_" ) + ".ckpt" ) ) def __UpperCAmelCase ( __magic_name__=None )-> Optional[Any]: """simple docstring""" snake_case_ : Any = argparse.ArgumentParser() parser.add_argument("--model_name" ,type=__magic_name__ ,required=__magic_name__ ,help="model name e.g. bert-base-uncased" ) parser.add_argument( "--cache_dir" ,type=__magic_name__ ,default=__magic_name__ ,required=__magic_name__ ,help="Directory containing pytorch model" ) parser.add_argument("--pytorch_model_path" ,type=__magic_name__ ,required=__magic_name__ ,help="/path/to/<pytorch-model-name>.bin" ) parser.add_argument("--tf_cache_dir" ,type=__magic_name__ ,required=__magic_name__ ,help="Directory in which to save tensorflow model" ) snake_case_ : Optional[int] = parser.parse_args(__magic_name__ ) snake_case_ : Optional[int] = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name ,state_dict=torch.load(args.pytorch_model_path ) ,cache_dir=args.cache_dir ,) convert_pytorch_checkpoint_to_tf(model=__magic_name__ ,ckpt_dir=args.tf_cache_dir ,model_name=args.model_name ) if __name__ == "__main__": main()
653
1
'''simple docstring''' from typing import TYPE_CHECKING from ..utils import _LazyModule __lowerCamelCase : Tuple = { '''config''': [ '''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''', '''OnnxConfig''', '''OnnxConfigWithPast''', '''OnnxSeq2SeqConfigWithPast''', '''PatchingSpec''', ], '''convert''': ['''export''', '''validate_model_outputs'''], '''features''': ['''FeaturesManager'''], '''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys __lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
653
'''simple docstring''' from collections import deque from .hash_table import HashTable class A_ (a_ ): """simple docstring""" def __init__( self :List[str] , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Dict ) -> Union[str, Any]: '''simple docstring''' super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ ) def _A ( self :Optional[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Optional[int] = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(lowerCAmelCase__ ) snake_case_ : Tuple = self.values[key] def _A ( self :int ) -> Dict: '''simple docstring''' return ( sum(self.charge_factor - len(lowerCAmelCase__ ) for slot in self.values ) / self.size_table * self.charge_factor ) def _A ( self :str , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple=None ) -> Any: '''simple docstring''' if not ( len(self.values[key] ) == self.charge_factor and self.values.count(lowerCAmelCase__ ) == 0 ): return key return super()._collision_resolution(lowerCAmelCase__ , lowerCAmelCase__ )
653
1
'''simple docstring''' import requests from bsa import BeautifulSoup def __UpperCAmelCase ( __magic_name__ = "https://www.worldometers.info/coronavirus" )-> dict: """simple docstring""" snake_case_ : List[Any] = BeautifulSoup(requests.get(__magic_name__ ).text ,"html.parser" ) snake_case_ : Union[str, Any] = soup.findAll("h1" ) snake_case_ : int = soup.findAll("div" ,{"class": "maincounter-number"} ) keys += soup.findAll("span" ,{"class": "panel-title"} ) values += soup.findAll("div" ,{"class": "number-table-main"} ) return {key.text.strip(): value.text.strip() for key, value in zip(__magic_name__ ,__magic_name__ )} if __name__ == "__main__": print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''') for key, value in world_covidaa_stats().items(): print(f'''{key}\n{value}\n''')
653
'''simple docstring''' from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar __lowerCamelCase : Dict = TypeVar('''KEY''') __lowerCamelCase : int = TypeVar('''VAL''') @dataclass(frozen=a_ , slots=a_ ) class A_ (Generic[KEY, VAL] ): """simple docstring""" a__ = 42 a__ = 42 class A_ (_Item ): """simple docstring""" def __init__( self :List[Any] ) -> None: '''simple docstring''' super().__init__(lowerCAmelCase__ , lowerCAmelCase__ ) def __bool__( self :Optional[int] ) -> bool: '''simple docstring''' return False __lowerCamelCase : Dict = _DeletedItem() class A_ (MutableMapping[KEY, VAL] ): """simple docstring""" def __init__( self :Dict , lowerCAmelCase__ :int = 8 , lowerCAmelCase__ :float = 0.7_5 ) -> None: '''simple docstring''' snake_case_ : Any = initial_block_size snake_case_ : list[_Item | None] = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 snake_case_ : Tuple = capacity_factor snake_case_ : List[Any] = 0 def _A ( self :Tuple , lowerCAmelCase__ :KEY ) -> int: '''simple docstring''' return hash(lowerCAmelCase__ ) % len(self._buckets ) def _A ( self :Any , lowerCAmelCase__ :int ) -> int: '''simple docstring''' return (ind + 1) % len(self._buckets ) def _A ( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> bool: '''simple docstring''' snake_case_ : Optional[int] = self._buckets[ind] if not stored: snake_case_ : int = _Item(lowerCAmelCase__ , lowerCAmelCase__ ) self._len += 1 return True elif stored.key == key: snake_case_ : Optional[int] = _Item(lowerCAmelCase__ , lowerCAmelCase__ ) return True else: return False def _A ( self :int ) -> bool: '''simple docstring''' snake_case_ : Any = len(self._buckets ) * self._capacity_factor return len(self ) >= int(lowerCAmelCase__ ) def _A ( self :Any ) -> bool: '''simple docstring''' if len(self._buckets ) <= self._initial_block_size: return False snake_case_ : Optional[int] = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def _A ( self :Tuple , lowerCAmelCase__ :int ) -> None: '''simple docstring''' snake_case_ : Tuple = self._buckets snake_case_ : int = [None] * new_size snake_case_ : Any = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def _A ( self :Optional[int] ) -> None: '''simple docstring''' self._resize(len(self._buckets ) * 2 ) def _A ( self :str ) -> None: '''simple docstring''' self._resize(len(self._buckets ) // 2 ) def _A ( self :Optional[int] , lowerCAmelCase__ :KEY ) -> Iterator[int]: '''simple docstring''' snake_case_ : str = self._get_bucket_index(lowerCAmelCase__ ) for _ in range(len(self._buckets ) ): yield ind snake_case_ : List[Any] = self._get_next_ind(lowerCAmelCase__ ) def _A ( self :Union[str, Any] , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> None: '''simple docstring''' for ind in self._iterate_buckets(lowerCAmelCase__ ): if self._try_set(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): break def __setitem__( self :Optional[int] , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> None: '''simple docstring''' if self._is_full(): self._size_up() self._add_item(lowerCAmelCase__ , lowerCAmelCase__ ) def __delitem__( self :List[Any] , lowerCAmelCase__ :KEY ) -> None: '''simple docstring''' for ind in self._iterate_buckets(lowerCAmelCase__ ): snake_case_ : int = self._buckets[ind] if item is None: raise KeyError(lowerCAmelCase__ ) if item is _deleted: continue if item.key == key: snake_case_ : List[str] = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self :List[str] , lowerCAmelCase__ :KEY ) -> VAL: '''simple docstring''' for ind in self._iterate_buckets(lowerCAmelCase__ ): snake_case_ : Optional[Any] = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(lowerCAmelCase__ ) def __len__( self :Optional[Any] ) -> int: '''simple docstring''' return self._len def __iter__( self :List[Any] ) -> Iterator[KEY]: '''simple docstring''' yield from (item.key for item in self._buckets if item) def __repr__( self :Any ) -> str: '''simple docstring''' snake_case_ : Dict = " ,".join( F'''{item.key}: {item.val}''' for item in self._buckets if item ) return F'''HashMap({val_string})'''
653
1
'''simple docstring''' import numpy as np __lowerCamelCase : Dict = [ ['''a''', '''b''', '''c''', '''d''', '''e'''], ['''f''', '''g''', '''h''', '''i''', '''k'''], ['''l''', '''m''', '''n''', '''o''', '''p'''], ['''q''', '''r''', '''s''', '''t''', '''u'''], ['''v''', '''w''', '''x''', '''y''', '''z'''], ] class A_ : """simple docstring""" def __init__( self :List[Any] ) -> None: '''simple docstring''' snake_case_ : str = np.array(lowerCAmelCase__ ) def _A ( self :Optional[int] , lowerCAmelCase__ :str ) -> np.ndarray: '''simple docstring''' snake_case_, snake_case_ : int = np.where(letter == self.SQUARE ) snake_case_ : str = np.concatenate([indexa + 1, indexa + 1] ) return indexes def _A ( self :int , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> str: '''simple docstring''' snake_case_ : Optional[int] = self.SQUARE[indexa - 1, indexa - 1] return letter def _A ( self :Optional[int] , lowerCAmelCase__ :str ) -> str: '''simple docstring''' snake_case_ : Union[str, Any] = message.lower() snake_case_ : Optional[Any] = message.replace(" " , "" ) snake_case_ : str = message.replace("j" , "i" ) snake_case_ : str = np.empty((2, len(lowerCAmelCase__ )) ) for letter_index in range(len(lowerCAmelCase__ ) ): snake_case_ : Union[str, Any] = self.letter_to_numbers(message[letter_index] ) snake_case_ : Optional[int] = numbers[0] snake_case_ : Any = numbers[1] snake_case_ : Tuple = first_step.reshape(2 * len(lowerCAmelCase__ ) ) snake_case_ : Dict = "" for numbers_index in range(len(lowerCAmelCase__ ) ): snake_case_ : Union[str, Any] = int(second_step[numbers_index * 2] ) snake_case_ : Tuple = int(second_step[(numbers_index * 2) + 1] ) snake_case_ : Union[str, Any] = self.numbers_to_letter(lowerCAmelCase__ , lowerCAmelCase__ ) snake_case_ : Tuple = encoded_message + letter return encoded_message def _A ( self :int , lowerCAmelCase__ :str ) -> str: '''simple docstring''' snake_case_ : Optional[int] = message.lower() message.replace(" " , "" ) snake_case_ : int = np.empty(2 * len(lowerCAmelCase__ ) ) for letter_index in range(len(lowerCAmelCase__ ) ): snake_case_ : List[str] = self.letter_to_numbers(message[letter_index] ) snake_case_ : Any = numbers[0] snake_case_ : Optional[Any] = numbers[1] snake_case_ : Union[str, Any] = first_step.reshape((2, len(lowerCAmelCase__ )) ) snake_case_ : Optional[Any] = "" for numbers_index in range(len(lowerCAmelCase__ ) ): snake_case_ : List[str] = int(second_step[0, numbers_index] ) snake_case_ : Optional[int] = int(second_step[1, numbers_index] ) snake_case_ : Union[str, Any] = self.numbers_to_letter(lowerCAmelCase__ , lowerCAmelCase__ ) snake_case_ : List[str] = decoded_message + letter return decoded_message
653
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase : str = logging.get_logger(__name__) __lowerCamelCase : Tuple = { '''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''', } class A_ (a_ ): """simple docstring""" a__ = '''gpt_bigcode''' a__ = ['''past_key_values'''] a__ = { '''hidden_size''': '''n_embd''', '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self :List[Any] , lowerCAmelCase__ :Any=50_257 , lowerCAmelCase__ :Dict=1_024 , lowerCAmelCase__ :Optional[int]=768 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :int=12 , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :List[str]="gelu_pytorch_tanh" , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Any=1E-5 , lowerCAmelCase__ :Union[str, Any]=0.0_2 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :int=50_256 , lowerCAmelCase__ :List[str]=50_256 , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :int=True , **lowerCAmelCase__ :Union[str, Any] , ) -> Any: '''simple docstring''' snake_case_ : List[Any] = vocab_size snake_case_ : Any = n_positions snake_case_ : Any = n_embd snake_case_ : Optional[Any] = n_layer snake_case_ : List[Any] = n_head snake_case_ : Tuple = n_inner snake_case_ : str = activation_function snake_case_ : Union[str, Any] = resid_pdrop snake_case_ : Optional[Any] = embd_pdrop snake_case_ : Any = attn_pdrop snake_case_ : List[Any] = layer_norm_epsilon snake_case_ : Tuple = initializer_range snake_case_ : int = scale_attn_weights snake_case_ : Union[str, Any] = use_cache snake_case_ : Dict = attention_softmax_in_fpaa snake_case_ : Any = scale_attention_softmax_in_fpaa snake_case_ : List[str] = multi_query snake_case_ : List[str] = bos_token_id snake_case_ : Any = eos_token_id super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
653
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) __lowerCamelCase : str = { '''configuration_clip''': [ '''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPConfig''', '''CLIPOnnxConfig''', '''CLIPTextConfig''', '''CLIPVisionConfig''', ], '''processing_clip''': ['''CLIPProcessor'''], '''tokenization_clip''': ['''CLIPTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Union[str, Any] = ['''CLIPTokenizerFast'''] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Optional[Any] = ['''CLIPFeatureExtractor'''] __lowerCamelCase : Union[str, Any] = ['''CLIPImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : str = [ '''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPModel''', '''CLIPPreTrainedModel''', '''CLIPTextModel''', '''CLIPTextModelWithProjection''', '''CLIPVisionModel''', '''CLIPVisionModelWithProjection''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Union[str, Any] = [ '''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFCLIPModel''', '''TFCLIPPreTrainedModel''', '''TFCLIPTextModel''', '''TFCLIPVisionModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : int = [ '''FlaxCLIPModel''', '''FlaxCLIPPreTrainedModel''', '''FlaxCLIPTextModel''', '''FlaxCLIPTextPreTrainedModel''', '''FlaxCLIPVisionModel''', '''FlaxCLIPVisionPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys __lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
653
'''simple docstring''' import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) __lowerCamelCase : Union[str, Any] = logging.getLogger(__name__) def __UpperCAmelCase ( __magic_name__ )-> str: """simple docstring""" snake_case_ : Dict = git.Repo(search_parent_directories=__magic_name__ ) snake_case_ : Optional[int] = { "repo_id": str(__magic_name__ ), "repo_sha": str(repo.head.object.hexsha ), "repo_branch": str(repo.active_branch ), } with open(os.path.join(__magic_name__ ,"git_log.json" ) ,"w" ) as f: json.dump(__magic_name__ ,__magic_name__ ,indent=4 ) def __UpperCAmelCase ( __magic_name__ )-> Tuple: """simple docstring""" if params.n_gpu <= 0: snake_case_ : Any = 0 snake_case_ : Any = -1 snake_case_ : Tuple = True snake_case_ : List[str] = False return assert torch.cuda.is_available() logger.info("Initializing GPUs" ) if params.n_gpu > 1: assert params.local_rank != -1 snake_case_ : Optional[int] = int(os.environ["WORLD_SIZE"] ) snake_case_ : int = int(os.environ["N_GPU_NODE"] ) snake_case_ : Any = int(os.environ["RANK"] ) # number of nodes / node ID snake_case_ : Dict = params.world_size // params.n_gpu_per_node snake_case_ : Optional[int] = params.global_rank // params.n_gpu_per_node snake_case_ : Tuple = True assert params.n_nodes == int(os.environ["N_NODES"] ) assert params.node_id == int(os.environ["NODE_RANK"] ) # local job (single GPU) else: assert params.local_rank == -1 snake_case_ : Optional[int] = 1 snake_case_ : str = 0 snake_case_ : List[Any] = 0 snake_case_ : int = 0 snake_case_ : Dict = 1 snake_case_ : Optional[Any] = 1 snake_case_ : str = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode snake_case_ : str = params.node_id == 0 and params.local_rank == 0 snake_case_ : str = params.n_nodes > 1 # summary snake_case_ : str = F'''--- Global rank: {params.global_rank} - ''' logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes ) logger.info(PREFIX + "Node ID : %i" % params.node_id ) logger.info(PREFIX + "Local rank : %i" % params.local_rank ) logger.info(PREFIX + "World size : %i" % params.world_size ) logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node ) logger.info(PREFIX + "Master : %s" % str(params.is_master ) ) logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) ) logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) ) logger.info(PREFIX + "Hostname : %s" % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info("Initializing PyTorch distributed" ) torch.distributed.init_process_group( init_method="env://" ,backend="nccl" ,) def __UpperCAmelCase ( __magic_name__ )-> Dict: """simple docstring""" np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
653
1
'''simple docstring''' from ... import PretrainedConfig __lowerCamelCase : Any = { '''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''', } class A_ (a_ ): """simple docstring""" a__ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP a__ = '''nezha''' def __init__( self :Union[str, Any] , lowerCAmelCase__ :Optional[Any]=21_128 , lowerCAmelCase__ :Any=768 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :Optional[Any]=12 , lowerCAmelCase__ :Tuple=3_072 , lowerCAmelCase__ :Union[str, Any]="gelu" , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :Dict=512 , lowerCAmelCase__ :int=64 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :Union[str, Any]=0.0_2 , lowerCAmelCase__ :Any=1E-1_2 , lowerCAmelCase__ :Optional[int]=0.1 , lowerCAmelCase__ :Optional[int]=0 , lowerCAmelCase__ :str=2 , lowerCAmelCase__ :str=3 , lowerCAmelCase__ :List[Any]=True , **lowerCAmelCase__ :List[Any] , ) -> Optional[Any]: '''simple docstring''' super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) snake_case_ : Optional[int] = vocab_size snake_case_ : Dict = hidden_size snake_case_ : Union[str, Any] = num_hidden_layers snake_case_ : str = num_attention_heads snake_case_ : Optional[int] = hidden_act snake_case_ : Optional[int] = intermediate_size snake_case_ : Union[str, Any] = hidden_dropout_prob snake_case_ : List[Any] = attention_probs_dropout_prob snake_case_ : int = max_position_embeddings snake_case_ : Optional[int] = max_relative_position snake_case_ : Optional[int] = type_vocab_size snake_case_ : Tuple = initializer_range snake_case_ : int = layer_norm_eps snake_case_ : List[str] = classifier_dropout snake_case_ : Tuple = use_cache
653
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import YolosImageProcessor class A_ (unittest.TestCase ): """simple docstring""" def __init__( self :Any , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict=7 , lowerCAmelCase__ :Union[str, Any]=3 , lowerCAmelCase__ :List[str]=30 , lowerCAmelCase__ :List[str]=400 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :str=True , lowerCAmelCase__ :Optional[int]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :Optional[int]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :str=True , lowerCAmelCase__ :int=1 / 255 , lowerCAmelCase__ :int=True , ) -> str: '''simple docstring''' snake_case_ : List[Any] = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333} snake_case_ : Dict = parent snake_case_ : Union[str, Any] = batch_size snake_case_ : Optional[Any] = num_channels snake_case_ : str = min_resolution snake_case_ : Dict = max_resolution snake_case_ : Optional[Any] = do_resize snake_case_ : str = size snake_case_ : Optional[int] = do_normalize snake_case_ : Dict = image_mean snake_case_ : Optional[int] = image_std snake_case_ : List[str] = do_rescale snake_case_ : Dict = rescale_factor snake_case_ : str = do_pad def _A ( self :List[Any] ) -> Dict: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def _A ( self :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str]=False ) -> str: '''simple docstring''' if not batched: snake_case_ : List[str] = image_inputs[0] if isinstance(lowerCAmelCase__ , Image.Image ): snake_case_, snake_case_ : int = image.size else: snake_case_, snake_case_ : Any = image.shape[1], image.shape[2] if w < h: snake_case_ : int = int(self.size["shortest_edge"] * h / w ) snake_case_ : List[Any] = self.size["shortest_edge"] elif w > h: snake_case_ : Optional[int] = self.size["shortest_edge"] snake_case_ : str = int(self.size["shortest_edge"] * w / h ) else: snake_case_ : Tuple = self.size["shortest_edge"] snake_case_ : Dict = self.size["shortest_edge"] else: snake_case_ : List[str] = [] for image in image_inputs: snake_case_, snake_case_ : Any = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) snake_case_ : str = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0] snake_case_ : int = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class A_ (a_ , unittest.TestCase ): """simple docstring""" a__ = YolosImageProcessor if is_vision_available() else None def _A ( self :Optional[Any] ) -> str: '''simple docstring''' snake_case_ : int = YolosImageProcessingTester(self ) @property def _A ( self :List[str] ) -> Any: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _A ( self :List[str] ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase__ , "image_mean" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "image_std" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) ) def _A ( self :List[Any] ) -> Any: '''simple docstring''' snake_case_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} ) self.assertEqual(image_processor.do_pad , lowerCAmelCase__ ) snake_case_ : Optional[int] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase__ ) self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} ) self.assertEqual(image_processor.do_pad , lowerCAmelCase__ ) def _A ( self :List[str] ) -> int: '''simple docstring''' pass def _A ( self :Optional[Any] ) -> Optional[Any]: '''simple docstring''' snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image ) # Test not batched input snake_case_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case_, snake_case_ : int = self.image_processor_tester.get_expected_values(lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ ) snake_case_ : Any = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _A ( self :Dict ) -> Dict: '''simple docstring''' snake_case_ : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , np.ndarray ) # Test not batched input snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case_, snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case_ : Tuple = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values snake_case_, snake_case_ : Dict = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _A ( self :Tuple ) -> Tuple: '''simple docstring''' snake_case_ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , torch.Tensor ) # Test not batched input snake_case_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case_ : List[Any] = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _A ( self :Tuple ) -> Dict: '''simple docstring''' snake_case_ : str = self.image_processing_class(**self.image_processor_dict ) snake_case_ : List[Any] = self.image_processing_class(do_resize=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ , do_rescale=lowerCAmelCase__ ) # create random PyTorch tensors snake_case_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , torch.Tensor ) # Test whether the method "pad" and calling the image processor return the same tensors snake_case_ : Tuple = image_processing_a.pad(lowerCAmelCase__ , return_tensors="pt" ) snake_case_ : Union[str, Any] = image_processing_a(lowerCAmelCase__ , return_tensors="pt" ) self.assertTrue( torch.allclose(encoded_images_with_method["pixel_values"] , encoded_images["pixel_values"] , atol=1E-4 ) ) @slow def _A ( self :str ) -> Any: '''simple docstring''' snake_case_ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: snake_case_ : int = json.loads(f.read() ) snake_case_ : Optional[int] = {"image_id": 39_769, "annotations": target} # encode them snake_case_ : Tuple = YolosImageProcessor.from_pretrained("hustvl/yolos-small" ) snake_case_ : Dict = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors="pt" ) # verify pixel values snake_case_ : Optional[int] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase__ ) snake_case_ : str = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) ) # verify area snake_case_ : Dict = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase__ ) ) # verify boxes snake_case_ : Optional[int] = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase__ ) snake_case_ : Any = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase__ , atol=1E-3 ) ) # verify image_id snake_case_ : Dict = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase__ ) ) # verify is_crowd snake_case_ : int = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase__ ) ) # verify class_labels snake_case_ : List[str] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase__ ) ) # verify orig_size snake_case_ : Any = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase__ ) ) # verify size snake_case_ : List[Any] = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase__ ) ) @slow def _A ( self :Dict ) -> int: '''simple docstring''' snake_case_ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: snake_case_ : Optional[int] = json.loads(f.read() ) snake_case_ : Tuple = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target} snake_case_ : Any = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them snake_case_ : int = YolosImageProcessor(format="coco_panoptic" ) snake_case_ : Union[str, Any] = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors="pt" ) # verify pixel values snake_case_ : Optional[int] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase__ ) snake_case_ : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) ) # verify area snake_case_ : int = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase__ ) ) # verify boxes snake_case_ : Optional[int] = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase__ ) snake_case_ : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase__ , atol=1E-3 ) ) # verify image_id snake_case_ : List[str] = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase__ ) ) # verify is_crowd snake_case_ : Dict = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase__ ) ) # verify class_labels snake_case_ : str = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase__ ) ) # verify masks snake_case_ : Any = 822_873 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCAmelCase__ ) # verify orig_size snake_case_ : int = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase__ ) ) # verify size snake_case_ : Union[str, Any] = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase__ ) )
653
1
'''simple docstring''' import inspect import tempfile import unittest from huggingface_hub import hf_hub_download from transformers import is_torch_available from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin __lowerCamelCase : Optional[int] = 1E-4 if is_torch_available(): import torch from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder @require_torch class A_ : """simple docstring""" def __init__( self :Tuple , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[Any]=16 , lowerCAmelCase__ :Any=13 , lowerCAmelCase__ :Optional[Any]=7 , lowerCAmelCase__ :str=14 , lowerCAmelCase__ :Union[str, Any]=10 , lowerCAmelCase__ :Tuple=19 , lowerCAmelCase__ :Optional[Any]=5 , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Any=16 , lowerCAmelCase__ :str=2 , lowerCAmelCase__ :List[Any]=4 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Optional[int]=[1, 2, 3, 4, 5] , lowerCAmelCase__ :str=25 , lowerCAmelCase__ :Optional[Any]=5 , ) -> Dict: '''simple docstring''' snake_case_ : List[str] = d_model snake_case_ : Dict = parent snake_case_ : Optional[Any] = batch_size snake_case_ : Optional[Any] = prediction_length snake_case_ : str = context_length snake_case_ : Tuple = cardinality snake_case_ : List[str] = num_time_features snake_case_ : Optional[Any] = lags_sequence snake_case_ : Union[str, Any] = embedding_dimension snake_case_ : Optional[Any] = is_training snake_case_ : Optional[Any] = hidden_size snake_case_ : Any = num_hidden_layers snake_case_ : Optional[Any] = num_attention_heads snake_case_ : int = intermediate_size snake_case_ : Any = hidden_act snake_case_ : Union[str, Any] = hidden_dropout_prob snake_case_ : Dict = attention_probs_dropout_prob snake_case_ : List[str] = context_length snake_case_ : Any = prediction_length + label_length snake_case_ : Union[str, Any] = label_length snake_case_ : List[Any] = moving_average snake_case_ : str = autocorrelation_factor def _A ( self :List[Any] ) -> Any: '''simple docstring''' return AutoformerConfig( d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , ) def _A ( self :Union[str, Any] , lowerCAmelCase__ :Optional[Any] ) -> Dict: '''simple docstring''' snake_case_ : Any = config.context_length + max(config.lags_sequence ) snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, 1] , config.cardinality[0] ) snake_case_ : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] ) snake_case_ : List[Any] = floats_tensor([self.batch_size, _past_length] ) snake_case_ : Dict = floats_tensor([self.batch_size, _past_length] ) > 0.5 # decoder inputs snake_case_ : List[Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] ) snake_case_ : List[Any] = floats_tensor([self.batch_size, config.prediction_length] ) snake_case_ : int = { "past_values": past_values, "static_categorical_features": static_categorical_features, "past_time_features": past_time_features, "past_observed_mask": past_observed_mask, "future_time_features": future_time_features, "future_values": future_values, } return inputs_dict def _A ( self :Dict ) -> Tuple: '''simple docstring''' snake_case_ : str = self.get_config() snake_case_ : int = self.prepare_autoformer_inputs_dict(lowerCAmelCase__ ) return config, inputs_dict def _A ( self :Optional[int] ) -> Dict: '''simple docstring''' snake_case_, snake_case_ : Union[str, Any] = self.prepare_config_and_inputs() return config, inputs_dict def _A ( self :Tuple , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int] ) -> List[str]: '''simple docstring''' snake_case_ : Dict = AutoformerModel(config=lowerCAmelCase__ ).to(lowerCAmelCase__ ).eval() snake_case_ : Optional[int] = model(**lowerCAmelCase__ ) snake_case_ : Any = outputs.encoder_last_hidden_state snake_case_ : Dict = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : Optional[Any] = model.get_encoder() encoder.save_pretrained(lowerCAmelCase__ ) snake_case_ : Tuple = AutoformerEncoder.from_pretrained(lowerCAmelCase__ ).to(lowerCAmelCase__ ) snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : List[str] = model.create_network_inputs(**lowerCAmelCase__ ) snake_case_, snake_case_ : Optional[int] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] ) snake_case_ : List[Any] = torch.cat( (transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , ) snake_case_ : Optional[int] = encoder(inputs_embeds=lowerCAmelCase__ )[0] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 ) snake_case_ : Any = ( torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 ) .unsqueeze(1 ) .repeat(1 , config.prediction_length , 1 ) ) snake_case_ : List[str] = torch.zeros( [transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , ) snake_case_ : Optional[Any] = torch.cat( ( torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) snake_case_ : Any = torch.cat( ( torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : List[Any] = model.get_decoder() decoder.save_pretrained(lowerCAmelCase__ ) snake_case_ : int = AutoformerDecoder.from_pretrained(lowerCAmelCase__ ).to(lowerCAmelCase__ ) snake_case_ : Tuple = decoder( trend=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 ) @require_torch class A_ (a_ , a_ , unittest.TestCase ): """simple docstring""" a__ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else () a__ = (AutoformerForPrediction,) if is_torch_available() else () a__ = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {} a__ = False a__ = False a__ = False a__ = False a__ = False a__ = False def _A ( self :Dict ) -> int: '''simple docstring''' snake_case_ : Tuple = AutoformerModelTester(self ) snake_case_ : str = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ ) def _A ( self :List[str] ) -> Tuple: '''simple docstring''' self.config_tester.run_common_tests() def _A ( self :List[Any] ) -> Union[str, Any]: '''simple docstring''' snake_case_, snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: snake_case_ : List[Any] = model_class(lowerCAmelCase__ ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCAmelCase__ ) snake_case_, snake_case_ : str = model_class.from_pretrained(lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ ) self.assertEqual(info["missing_keys"] , [] ) def _A ( self :Optional[int] ) -> Tuple: '''simple docstring''' snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*lowerCAmelCase__ ) @unittest.skip(reason="Model has no tokens embeddings" ) def _A ( self :str ) -> str: '''simple docstring''' pass def _A ( self :Optional[Any] ) -> Union[str, Any]: '''simple docstring''' snake_case_ : List[Any] = inspect.signature(getattr(lowerCAmelCase__ , "forward" ) ) # The main input is the name of the argument after `self` snake_case_ : Dict = list(model_signature.parameters.keys() )[1] self.assertEqual(AutoformerModel.main_input_name , lowerCAmelCase__ ) def _A ( self :Optional[Any] ) -> Optional[int]: '''simple docstring''' snake_case_, snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : Tuple = model_class(lowerCAmelCase__ ) snake_case_ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ : Optional[Any] = [*signature.parameters.keys()] snake_case_ : Dict = [ "past_values", "past_time_features", "past_observed_mask", "static_categorical_features", "static_real_features", "future_values", "future_time_features", ] if model.__class__.__name__ in ["AutoformerForPrediction"]: expected_arg_names.append("future_observed_mask" ) expected_arg_names.extend( [ "decoder_attention_mask", "head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs", "past_key_values", "output_hidden_states", "output_attentions", "use_cache", "return_dict", ] ) self.assertListEqual(arg_names[: len(lowerCAmelCase__ )] , lowerCAmelCase__ ) def _A ( self :int ) -> Any: '''simple docstring''' snake_case_, snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Union[str, Any] = True snake_case_ : List[str] = getattr(self.model_tester , "seq_length" , lowerCAmelCase__ ) snake_case_ : Dict = getattr(self.model_tester , "decoder_seq_length" , lowerCAmelCase__ ) snake_case_ : Union[str, Any] = getattr(self.model_tester , "encoder_seq_length" , lowerCAmelCase__ ) snake_case_ : Union[str, Any] = getattr(self.model_tester , "d_model" , lowerCAmelCase__ ) snake_case_ : Dict = getattr(self.model_tester , "num_attention_heads" , lowerCAmelCase__ ) snake_case_ : Optional[int] = d_model // num_attention_heads for model_class in self.all_model_classes: snake_case_ : Any = True snake_case_ : Any = False snake_case_ : Dict = True snake_case_ : List[str] = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): snake_case_ : Tuple = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) snake_case_ : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] snake_case_ : Optional[int] = True snake_case_ : Any = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): snake_case_ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) snake_case_ : str = outputs.encoder_attentions self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) snake_case_ : Tuple = len(lowerCAmelCase__ ) snake_case_ : List[str] = 7 if "last_hidden_state" in outputs: correct_outlen += 1 if "trend" in outputs: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned if "loss" in outputs: correct_outlen += 1 if "params" in outputs: correct_outlen += 1 self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) # decoder attentions snake_case_ : Optional[int] = outputs.decoder_attentions self.assertIsInstance(lowerCAmelCase__ , (list, tuple) ) self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # cross attentions snake_case_ : List[Any] = outputs.cross_attentions self.assertIsInstance(lowerCAmelCase__ , (list, tuple) ) self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # Check attention is always last and order is fine snake_case_ : Optional[int] = True snake_case_ : List[Any] = True snake_case_ : Union[str, Any] = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): snake_case_ : List[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) self.assertEqual(out_len + 2 , len(lowerCAmelCase__ ) ) snake_case_ : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) @is_flaky() def _A ( self :Any ) -> Optional[Any]: '''simple docstring''' super().test_retain_grad_hidden_states_attentions() def __UpperCAmelCase ( __magic_name__="train-batch.pt" )-> int: """simple docstring""" snake_case_ : List[str] = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" ,filename=__magic_name__ ,repo_type="dataset" ) snake_case_ : List[str] = torch.load(__magic_name__ ,map_location=__magic_name__ ) return batch @require_torch @slow class A_ (unittest.TestCase ): """simple docstring""" def _A ( self :str ) -> Any: '''simple docstring''' snake_case_ : Optional[int] = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ ) snake_case_ : List[str] = prepare_batch() with torch.no_grad(): snake_case_ : int = model( past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0] snake_case_ : Optional[int] = torch.Size( (64, model.config.prediction_length + model.config.label_length, model.config.feature_size) ) self.assertEqual(output.shape , lowerCAmelCase__ ) snake_case_ : Optional[Any] = torch.tensor( [[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=lowerCAmelCase__ ) self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) ) def _A ( self :Any ) -> str: '''simple docstring''' snake_case_ : str = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ ) snake_case_ : Optional[Any] = prepare_batch("val-batch.pt" ) with torch.no_grad(): snake_case_ : Tuple = model( past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state snake_case_ : Dict = torch.Size((64, model.config.context_length, model.config.d_model) ) self.assertEqual(output.shape , lowerCAmelCase__ ) snake_case_ : Any = torch.tensor( [[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=lowerCAmelCase__ ) self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) ) def _A ( self :List[str] ) -> Any: '''simple docstring''' snake_case_ : List[Any] = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ ) snake_case_ : str = prepare_batch("val-batch.pt" ) with torch.no_grad(): snake_case_ : Optional[Any] = model.generate( static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , ) snake_case_ : List[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) ) self.assertEqual(outputs.sequences.shape , lowerCAmelCase__ ) snake_case_ : Dict = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=lowerCAmelCase__ ) snake_case_ : Optional[Any] = outputs.sequences.mean(dim=1 ) self.assertTrue(torch.allclose(mean_prediction[0, -3:] , lowerCAmelCase__ , rtol=1E-1 ) )
653
'''simple docstring''' def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str: """simple docstring""" if not isinstance(__magic_name__ ,__magic_name__ ): raise ValueError("iterations must be defined as integers" ) if not isinstance(__magic_name__ ,__magic_name__ ) or not number >= 1: raise ValueError( "starting number must be\n and integer and be more than 0" ) if not iterations >= 1: raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" ) snake_case_ : Dict = "" while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(__magic_name__ ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
653
1
'''simple docstring''' import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Dict: """simple docstring""" snake_case_ : Tuple = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") snake_case_ : Union[str, Any] = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(__magic_name__ ): os.makedirs(__magic_name__ ) snake_case_ : str = model.state_dict() def to_tf_var_name(__magic_name__ ): for patt, repl in iter(__magic_name__ ): snake_case_ : List[str] = name.replace(__magic_name__ ,__magic_name__ ) return F'''bert/{name}''' def create_tf_var(__magic_name__ ,__magic_name__ ,__magic_name__ ): snake_case_ : List[Any] = tf.dtypes.as_dtype(tensor.dtype ) snake_case_ : Union[str, Any] = tf.get_variable(dtype=__magic_name__ ,shape=tensor.shape ,name=__magic_name__ ,initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(__magic_name__ ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: snake_case_ : Optional[int] = to_tf_var_name(__magic_name__ ) snake_case_ : Dict = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): snake_case_ : List[Any] = torch_tensor.T snake_case_ : Union[str, Any] = create_tf_var(tensor=__magic_name__ ,name=__magic_name__ ,session=__magic_name__ ) tf.keras.backend.set_value(__magic_name__ ,__magic_name__ ) snake_case_ : List[str] = session.run(__magic_name__ ) print(F'''Successfully created {tf_name}: {np.allclose(__magic_name__ ,__magic_name__ )}''' ) snake_case_ : Any = tf.train.Saver(tf.trainable_variables() ) saver.save(__magic_name__ ,os.path.join(__magic_name__ ,model_name.replace("-" ,"_" ) + ".ckpt" ) ) def __UpperCAmelCase ( __magic_name__=None )-> Optional[Any]: """simple docstring""" snake_case_ : Any = argparse.ArgumentParser() parser.add_argument("--model_name" ,type=__magic_name__ ,required=__magic_name__ ,help="model name e.g. bert-base-uncased" ) parser.add_argument( "--cache_dir" ,type=__magic_name__ ,default=__magic_name__ ,required=__magic_name__ ,help="Directory containing pytorch model" ) parser.add_argument("--pytorch_model_path" ,type=__magic_name__ ,required=__magic_name__ ,help="/path/to/<pytorch-model-name>.bin" ) parser.add_argument("--tf_cache_dir" ,type=__magic_name__ ,required=__magic_name__ ,help="Directory in which to save tensorflow model" ) snake_case_ : Optional[int] = parser.parse_args(__magic_name__ ) snake_case_ : Optional[int] = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name ,state_dict=torch.load(args.pytorch_model_path ) ,cache_dir=args.cache_dir ,) convert_pytorch_checkpoint_to_tf(model=__magic_name__ ,ckpt_dir=args.tf_cache_dir ,model_name=args.model_name ) if __name__ == "__main__": main()
653
'''simple docstring''' import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __lowerCamelCase : Tuple = 16 __lowerCamelCase : Optional[int] = 32 def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = 16 )-> int: """simple docstring""" snake_case_ : Optional[int] = AutoTokenizer.from_pretrained("bert-base-cased" ) snake_case_ : str = load_dataset("glue" ,"mrpc" ) def tokenize_function(__magic_name__ ): # max_length=None => use the model max length (it's actually the default) snake_case_ : Dict = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=__magic_name__ ,max_length=__magic_name__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): snake_case_ : Any = datasets.map( __magic_name__ ,batched=__magic_name__ ,remove_columns=["idx", "sentence1", "sentence2"] ,) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library snake_case_ : List[Any] = tokenized_datasets.rename_column("label" ,"labels" ) def collate_fn(__magic_name__ ): # On TPU it's best to pad everything to the same length or training will be very slow. snake_case_ : int = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": snake_case_ : Tuple = 16 elif accelerator.mixed_precision != "no": snake_case_ : str = 8 else: snake_case_ : Optional[Any] = None return tokenizer.pad( __magic_name__ ,padding="longest" ,max_length=__magic_name__ ,pad_to_multiple_of=__magic_name__ ,return_tensors="pt" ,) # Instantiate dataloaders. snake_case_ : str = DataLoader( tokenized_datasets["train"] ,shuffle=__magic_name__ ,collate_fn=__magic_name__ ,batch_size=__magic_name__ ) snake_case_ : Optional[Any] = DataLoader( tokenized_datasets["validation"] ,shuffle=__magic_name__ ,collate_fn=__magic_name__ ,batch_size=__magic_name__ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders __lowerCamelCase : Optional[Any] = mocked_dataloaders # noqa: F811 def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Dict: """simple docstring""" if os.environ.get("TESTING_MOCKED_DATALOADERS" ,__magic_name__ ) == "1": snake_case_ : List[str] = 2 # Initialize accelerator snake_case_ : Union[str, Any] = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs snake_case_ : List[str] = config["lr"] snake_case_ : Dict = int(config["num_epochs"] ) snake_case_ : Dict = int(config["seed"] ) snake_case_ : Optional[int] = int(config["batch_size"] ) snake_case_ : Dict = evaluate.load("glue" ,"mrpc" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=__magic_name__ ) def inner_training_loop(__magic_name__ ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(__magic_name__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) snake_case_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" ,return_dict=__magic_name__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). snake_case_ : Optional[int] = model.to(accelerator.device ) # Instantiate optimizer snake_case_ : List[Any] = AdamW(params=model.parameters() ,lr=__magic_name__ ) snake_case_, snake_case_ : int = get_dataloaders(__magic_name__ ,__magic_name__ ) # Instantiate scheduler snake_case_ : Tuple = get_linear_schedule_with_warmup( optimizer=__magic_name__ ,num_warmup_steps=100 ,num_training_steps=(len(__magic_name__ ) * num_epochs) ,) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : Tuple = accelerator.prepare( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ) # Now we train the model for epoch in range(__magic_name__ ): model.train() for step, batch in enumerate(__magic_name__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) snake_case_ : int = model(**__magic_name__ ) snake_case_ : Any = outputs.loss accelerator.backward(__magic_name__ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__magic_name__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): snake_case_ : Union[str, Any] = model(**__magic_name__ ) snake_case_ : List[str] = outputs.logits.argmax(dim=-1 ) snake_case_, snake_case_ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=__magic_name__ ,references=__magic_name__ ,) snake_case_ : Tuple = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' ,__magic_name__ ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def __UpperCAmelCase ( )-> List[str]: """simple docstring""" snake_case_ : List[Any] = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" ,type=__magic_name__ ,default=__magic_name__ ,choices=["no", "fp16", "bf16", "fp8"] ,help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." ,) parser.add_argument("--cpu" ,action="store_true" ,help="If passed, will train on the CPU." ) snake_case_ : str = parser.parse_args() snake_case_ : Optional[int] = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(__magic_name__ ,__magic_name__ ) if __name__ == "__main__": main()
653
1
import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): SCREAMING_SNAKE_CASE__ : Dict = """pt""" elif is_tf_available(): SCREAMING_SNAKE_CASE__ : List[str] = """tf""" else: SCREAMING_SNAKE_CASE__ : List[Any] = """jax""" class lowerCamelCase_ ( lowerCamelCase , unittest.TestCase ): a__ = PerceiverTokenizer a__ = False def A ( self ): """simple docstring""" super().setUp() __magic_name__ :str = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def A ( self ): """simple docstring""" return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' ) def A ( self , **__lowerCAmelCase ): """simple docstring""" return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def A ( self , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=2_0 , __lowerCAmelCase=5 ): """simple docstring""" # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for Perceiver because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. __magic_name__ :List[str] = [] for i in range(len(__lowerCAmelCase ) ): try: __magic_name__ :Optional[int] = tokenizer.decode([i] , clean_up_tokenization_spaces=__lowerCAmelCase ) except UnicodeDecodeError: pass toks.append((i, tok) ) __magic_name__ :Dict = list(filter(lambda __lowerCAmelCase : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , __lowerCAmelCase ) ) __magic_name__ :Union[str, Any] = list(filter(lambda __lowerCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__lowerCAmelCase ) , __lowerCAmelCase ) ) if max_length is not None and len(__lowerCAmelCase ) > max_length: __magic_name__ :int = toks[:max_length] if min_length is not None and len(__lowerCAmelCase ) < min_length and len(__lowerCAmelCase ) > 0: while len(__lowerCAmelCase ) < min_length: __magic_name__ :str = toks + toks # toks_str = [t[1] for t in toks] __magic_name__ :Dict = [t[0] for t in toks] # Ensure consistency __magic_name__ :List[str] = tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase ) if " " not in output_txt and len(__lowerCAmelCase ) > 1: __magic_name__ :Tuple = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowerCAmelCase ) + ''' ''' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowerCAmelCase ) ) if with_prefix_space: __magic_name__ :Union[str, Any] = ''' ''' + output_txt __magic_name__ :str = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) return output_txt, output_ids def A ( self ): """simple docstring""" __magic_name__ :Union[str, Any] = self.perceiver_tokenizer __magic_name__ :List[Any] = '''Unicode €.''' __magic_name__ :Tuple = tokenizer(__lowerCAmelCase ) __magic_name__ :Optional[int] = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5] self.assertEqual(encoded['''input_ids'''] , __lowerCAmelCase ) # decoding __magic_name__ :List[str] = tokenizer.decode(__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , '''[CLS]Unicode €.[SEP]''' ) __magic_name__ :int = tokenizer('''e è é ê ë''' ) __magic_name__ :List[Any] = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5] self.assertEqual(encoded['''input_ids'''] , __lowerCAmelCase ) # decoding __magic_name__ :Tuple = tokenizer.decode(__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , '''[CLS]e è é ê ë[SEP]''' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' ) def A ( self ): """simple docstring""" __magic_name__ :Tuple = self.perceiver_tokenizer __magic_name__ :Tuple = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] # fmt: off __magic_name__ :Union[str, Any] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0] # fmt: on __magic_name__ :List[str] = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors=__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) if FRAMEWORK != "jax": __magic_name__ :List[Any] = list(batch.input_ids.numpy()[0] ) else: __magic_name__ :Dict = list(batch.input_ids.tolist()[0] ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) self.assertEqual((2, 3_8) , batch.input_ids.shape ) self.assertEqual((2, 3_8) , batch.attention_mask.shape ) def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = self.perceiver_tokenizer __magic_name__ :Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] __magic_name__ :Any = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors=__lowerCAmelCase ) # check if input_ids are returned and no decoder_input_ids self.assertIn('''input_ids''' , __lowerCAmelCase ) self.assertIn('''attention_mask''' , __lowerCAmelCase ) self.assertNotIn('''decoder_input_ids''' , __lowerCAmelCase ) self.assertNotIn('''decoder_attention_mask''' , __lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :str = self.perceiver_tokenizer __magic_name__ :Optional[Any] = [ '''Summary of the text.''', '''Another summary.''', ] __magic_name__ :int = tokenizer( text_target=__lowerCAmelCase , max_length=3_2 , padding='''max_length''' , truncation=__lowerCAmelCase , return_tensors=__lowerCAmelCase ) self.assertEqual(3_2 , targets['''input_ids'''].shape[1] ) def A ( self ): """simple docstring""" # safety check on max_len default value so we are sure the test works __magic_name__ :List[str] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 4_2 ) # Now let's start the test __magic_name__ :Dict = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __magic_name__ :Union[str, Any] = tempfile.mkdtemp() __magic_name__ :Union[str, Any] = ''' He is very happy, UNwant\u00E9d,running''' __magic_name__ :Optional[Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) tokenizer.save_pretrained(__lowerCAmelCase ) __magic_name__ :int = tokenizer.__class__.from_pretrained(__lowerCAmelCase ) __magic_name__ :Union[str, Any] = after_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) shutil.rmtree(__lowerCAmelCase ) __magic_name__ :int = self.get_tokenizers(model_max_length=4_2 ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __magic_name__ :Union[str, Any] = tempfile.mkdtemp() __magic_name__ :Tuple = ''' He is very happy, UNwant\u00E9d,running''' tokenizer.add_tokens(['''bim''', '''bambam'''] ) __magic_name__ :Optional[Any] = tokenizer.additional_special_tokens additional_special_tokens.append('''new_additional_special_token''' ) tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} ) __magic_name__ :List[Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) tokenizer.save_pretrained(__lowerCAmelCase ) __magic_name__ :int = tokenizer.__class__.from_pretrained(__lowerCAmelCase ) __magic_name__ :Union[str, Any] = after_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 4_2 ) __magic_name__ :Optional[Any] = tokenizer.__class__.from_pretrained(__lowerCAmelCase , model_max_length=4_3 ) self.assertEqual(tokenizer.model_max_length , 4_3 ) shutil.rmtree(__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :str = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__lowerCAmelCase ) with open(os.path.join(__lowerCAmelCase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file: __magic_name__ :Union[str, Any] = json.load(__lowerCAmelCase ) with open(os.path.join(__lowerCAmelCase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file: __magic_name__ :Tuple = json.load(__lowerCAmelCase ) __magic_name__ :str = [F'''<extra_id_{i}>''' for i in range(1_2_5 )] __magic_name__ :List[str] = added_tokens_extra_ids + [ '''an_additional_special_token''' ] __magic_name__ :Tuple = added_tokens_extra_ids + [ '''an_additional_special_token''' ] with open(os.path.join(__lowerCAmelCase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(__lowerCAmelCase , __lowerCAmelCase ) with open(os.path.join(__lowerCAmelCase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(__lowerCAmelCase , __lowerCAmelCase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __magic_name__ :Optional[int] = tokenizer_class.from_pretrained( __lowerCAmelCase , ) self.assertIn( '''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __magic_name__ :Optional[Any] = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=__lowerCAmelCase )] __magic_name__ :Any = tokenizer_class.from_pretrained( __lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , ) self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens ) self.assertEqual( ['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , ) def A ( self ): """simple docstring""" __magic_name__ :List[str] = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([1_7_8] ) , '''�''' ) def A ( self ): """simple docstring""" pass def A ( self ): """simple docstring""" pass def A ( self ): """simple docstring""" pass def A ( self ): """simple docstring""" pass def A ( self ): """simple docstring""" # The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character # strings and special added tokens as tokens __magic_name__ :List[Any] = self.get_tokenizers(fast=__lowerCAmelCase , do_lower_case=__lowerCAmelCase ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): __magic_name__ :Any = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]'''] __magic_name__ :Union[str, Any] = tokenizer.convert_tokens_to_string(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
0
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class A_ (a_ ): """simple docstring""" a__ = '''facebook/bart-large-mnli''' a__ = ( '''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which ''' '''should be the text to classify, and `labels`, which should be the list of labels to use for classification. ''' '''It returns the most likely label in the list of provided `labels` for the input text.''' ) a__ = '''text_classifier''' a__ = AutoTokenizer a__ = AutoModelForSequenceClassification a__ = ['''text''', ['''text''']] a__ = ['''text'''] def _A ( self :List[str] ) -> Union[str, Any]: '''simple docstring''' super().setup() snake_case_ : Optional[int] = self.model.config snake_case_ : Any = -1 for idx, label in config.idalabel.items(): if label.lower().startswith("entail" ): snake_case_ : Union[str, Any] = int(lowerCAmelCase__ ) if self.entailment_id == -1: raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." ) def _A ( self :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :Tuple ) -> int: '''simple docstring''' snake_case_ : Tuple = labels return self.pre_processor( [text] * len(lowerCAmelCase__ ) , [F'''This example is {label}''' for label in labels] , return_tensors="pt" , padding="max_length" , ) def _A ( self :Any , lowerCAmelCase__ :str ) -> List[str]: '''simple docstring''' snake_case_ : Optional[int] = outputs.logits snake_case_ : Tuple = torch.argmax(logits[:, 2] ).item() return self._labels[label_id]
653
0
__snake_case = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] __snake_case = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] __snake_case = { 0: '''Sunday''', 1: '''Monday''', 2: '''Tuesday''', 3: '''Wednesday''', 4: '''Thursday''', 5: '''Friday''', 6: '''Saturday''', } def _A ( _lowercase , _lowercase , _lowercase ) -> str: """simple docstring""" assert len(str(_lowercase ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 12, "month should be between 1 to 12" assert 1 <= day <= 31, "day should be between 1 to 31" # Doomsday algorithm: __UpperCamelCase = year // 1_00 __UpperCamelCase = (5 * (century % 4) + 2) % 7 __UpperCamelCase = year % 1_00 __UpperCamelCase = centurian % 12 __UpperCamelCase = ( (centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 __UpperCamelCase = ( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0) else DOOMSDAY_LEAP[month - 1] ) __UpperCamelCase = (dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) __lowerCamelCase : Any = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Any = ['''ViTFeatureExtractor'''] __lowerCamelCase : Any = ['''ViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Optional[Any] = [ '''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ViTForImageClassification''', '''ViTForMaskedImageModeling''', '''ViTModel''', '''ViTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Union[str, Any] = [ '''TFViTForImageClassification''', '''TFViTModel''', '''TFViTPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Tuple = [ '''FlaxViTForImageClassification''', '''FlaxViTModel''', '''FlaxViTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys __lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
653
0
import os def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> List[Any]: _A = len(grid[0] ) _A = len(_snake_case ) _A = 0 _A = 0 _A = 0 # Check vertically, horizontally, diagonally at the same time (only works # for nxn grid) for i in range(_snake_case ): for j in range(n_rows - 3 ): _A = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i] _A = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3] # Left-to-right diagonal (\) product if i < n_columns - 3: _A = ( grid[i][j] * grid[i + 1][j + 1] * grid[i + 2][j + 2] * grid[i + 3][j + 3] ) # Right-to-left diagonal(/) product if i > 2: _A = ( grid[i][j] * grid[i - 1][j + 1] * grid[i - 2][j + 2] * grid[i - 3][j + 3] ) _A = max( _snake_case , _snake_case , _snake_case , _snake_case ) if max_product > largest: _A = max_product return largest def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]: _A = [] with open(os.path.dirname(_snake_case ) + '''/grid.txt''' ) as file: for line in file: grid.append(line.strip('''\n''' ).split(''' ''' ) ) _A = [[int(_snake_case ) for i in grid[j]] for j in range(len(_snake_case ) )] return largest_product(_snake_case ) if __name__ == "__main__": print(solution())
2
'''simple docstring''' import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class A_ : """simple docstring""" def __init__( self :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str]=2 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :List[str]=7 , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :List[str]=99 , lowerCAmelCase__ :Union[str, Any]=36 , lowerCAmelCase__ :Dict=3 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Optional[int]=37 , lowerCAmelCase__ :Dict="gelu" , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Optional[int]=512 , lowerCAmelCase__ :Union[str, Any]=16 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :Any=0.0_2 , lowerCAmelCase__ :Dict=6 , lowerCAmelCase__ :Optional[int]=6 , lowerCAmelCase__ :Any=3 , lowerCAmelCase__ :int=4 , lowerCAmelCase__ :int=None , lowerCAmelCase__ :Any=1_000 , ) -> Any: '''simple docstring''' snake_case_ : Optional[int] = parent snake_case_ : Union[str, Any] = batch_size snake_case_ : Optional[int] = num_channels snake_case_ : List[Any] = image_size snake_case_ : Optional[int] = patch_size snake_case_ : Union[str, Any] = text_seq_length snake_case_ : Dict = is_training snake_case_ : Optional[Any] = use_input_mask snake_case_ : Union[str, Any] = use_token_type_ids snake_case_ : Dict = use_labels snake_case_ : List[str] = vocab_size snake_case_ : Optional[Any] = hidden_size snake_case_ : List[str] = num_hidden_layers snake_case_ : int = num_attention_heads snake_case_ : List[str] = intermediate_size snake_case_ : str = hidden_act snake_case_ : Optional[Any] = hidden_dropout_prob snake_case_ : Optional[int] = attention_probs_dropout_prob snake_case_ : Union[str, Any] = max_position_embeddings snake_case_ : List[Any] = type_vocab_size snake_case_ : Union[str, Any] = type_sequence_label_size snake_case_ : List[Any] = initializer_range snake_case_ : Union[str, Any] = coordinate_size snake_case_ : int = shape_size snake_case_ : Tuple = num_labels snake_case_ : List[Any] = num_choices snake_case_ : List[str] = scope snake_case_ : Dict = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) snake_case_ : str = text_seq_length snake_case_ : Optional[int] = (image_size // patch_size) ** 2 + 1 snake_case_ : str = self.text_seq_length + self.image_seq_length def _A ( self :Union[str, Any] ) -> Tuple: '''simple docstring''' snake_case_ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) snake_case_ : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: snake_case_ : Optional[Any] = bbox[i, j, 3] snake_case_ : Any = bbox[i, j, 1] snake_case_ : Tuple = t if bbox[i, j, 2] < bbox[i, j, 0]: snake_case_ : str = bbox[i, j, 2] snake_case_ : Dict = bbox[i, j, 0] snake_case_ : Union[str, Any] = t snake_case_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ : Dict = None if self.use_input_mask: snake_case_ : str = random_attention_mask([self.batch_size, self.text_seq_length] ) snake_case_ : Any = None if self.use_token_type_ids: snake_case_ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) snake_case_ : Union[str, Any] = None snake_case_ : str = None if self.use_labels: snake_case_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) snake_case_ : str = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def _A ( self :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] ) -> Optional[Any]: '''simple docstring''' snake_case_ : Tuple = LayoutLMvaModel(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() # text + image snake_case_ : Tuple = model(lowerCAmelCase__ , pixel_values=lowerCAmelCase__ ) snake_case_ : Optional[int] = model( lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ ) snake_case_ : Optional[int] = model(lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ ) snake_case_ : int = model(lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only snake_case_ : List[Any] = model(lowerCAmelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only snake_case_ : Union[str, Any] = model(pixel_values=lowerCAmelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def _A ( self :str , lowerCAmelCase__ :str , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple ) -> List[Any]: '''simple docstring''' snake_case_ : str = self.num_labels snake_case_ : List[Any] = LayoutLMvaForSequenceClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case_ : Optional[int] = model( lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] ) -> str: '''simple docstring''' snake_case_ : Optional[int] = self.num_labels snake_case_ : str = LayoutLMvaForTokenClassification(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case_ : List[Any] = model( lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def _A ( self :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :str ) -> Tuple: '''simple docstring''' snake_case_ : List[str] = LayoutLMvaForQuestionAnswering(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case_ : List[Any] = model( lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _A ( self :int ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Dict = self.prepare_config_and_inputs() ( ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ) : Optional[Any] = config_and_inputs snake_case_ : Tuple = { "input_ids": input_ids, "bbox": bbox, "pixel_values": pixel_values, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class A_ (a_ , a_ , unittest.TestCase ): """simple docstring""" a__ = False a__ = False a__ = False a__ = ( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) a__ = ( {'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel} if is_torch_available() else {} ) def _A ( self :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] ) -> List[str]: '''simple docstring''' return True def _A ( self :List[Any] ) -> str: '''simple docstring''' snake_case_ : Tuple = LayoutLMvaModelTester(self ) snake_case_ : Optional[int] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 ) def _A ( self :Tuple , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any]=False ) -> Any: '''simple docstring''' snake_case_ : List[str] = copy.deepcopy(lowerCAmelCase__ ) if model_class in get_values(lowerCAmelCase__ ): snake_case_ : Optional[Any] = { k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous() if isinstance(lowerCAmelCase__ , torch.Tensor ) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(lowerCAmelCase__ ): snake_case_ : Union[str, Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ ) elif model_class in get_values(lowerCAmelCase__ ): snake_case_ : List[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ ) snake_case_ : str = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ ) elif model_class in [ *get_values(lowerCAmelCase__ ), ]: snake_case_ : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ ) elif model_class in [ *get_values(lowerCAmelCase__ ), ]: snake_case_ : List[str] = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCAmelCase__ , ) return inputs_dict def _A ( self :Any ) -> Any: '''simple docstring''' self.config_tester.run_common_tests() def _A ( self :int ) -> int: '''simple docstring''' snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def _A ( self :Any ) -> Dict: '''simple docstring''' snake_case_ : Dict = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: snake_case_ : int = type self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def _A ( self :int ) -> str: '''simple docstring''' snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ ) def _A ( self :List[Any] ) -> Optional[Any]: '''simple docstring''' snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ ) def _A ( self :int ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ ) @slow def _A ( self :Tuple ) -> List[Any]: '''simple docstring''' for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ : str = LayoutLMvaModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) def __UpperCAmelCase ( )-> List[str]: """simple docstring""" snake_case_ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch class A_ (unittest.TestCase ): """simple docstring""" @cached_property def _A ( self :Union[str, Any] ) -> Optional[Any]: '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase__ ) if is_vision_available() else None @slow def _A ( self :Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Optional[int] = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(lowerCAmelCase__ ) snake_case_ : Optional[Any] = self.default_image_processor snake_case_ : Optional[int] = prepare_img() snake_case_ : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).pixel_values.to(lowerCAmelCase__ ) snake_case_ : List[str] = torch.tensor([[1, 2]] ) snake_case_ : Any = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 ) # forward pass snake_case_ : Any = model( input_ids=input_ids.to(lowerCAmelCase__ ) , bbox=bbox.to(lowerCAmelCase__ ) , pixel_values=pixel_values.to(lowerCAmelCase__ ) , ) # verify the logits snake_case_ : Optional[Any] = torch.Size((1, 199, 768) ) self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase__ ) snake_case_ : str = torch.tensor( [[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(lowerCAmelCase__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
653
0
'''simple docstring''' lowerCAmelCase : dict[str, float] = { "joule": 1.0, "kilojoule": 10_00, "megajoule": 1_00_00_00, "gigajoule": 10_00_00_00_00, "wattsecond": 1.0, "watthour": 36_00, "kilowatthour": 3_60_00_00, "newtonmeter": 1.0, "calorie_nutr": 41_86.8, "kilocalorie_nutr": 4_18_68_00.00, "electronvolt": 1.6_0217_6634e-19, "britishthermalunit_it": 10_55.0_55_85, "footpound": 1.355818, } def A_( A : str , A : str , A : float): if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: UpperCamelCase = ( f'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n''' f'''Valid values are: {", ".join(A)}''' ) raise ValueError(A) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
3
'''simple docstring''' import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def __UpperCAmelCase ( __magic_name__ )-> int: # picklable for multiprocessing """simple docstring""" return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def __UpperCAmelCase ( )-> List[str]: """simple docstring""" with parallel_backend("spark" ): assert ParallelBackendConfig.backend_name == "spark" snake_case_ : str = [1, 2, 3] with pytest.raises(__magic_name__ ): with parallel_backend("unsupported backend" ): map_nested(__magic_name__ ,__magic_name__ ,num_proc=2 ) with pytest.raises(__magic_name__ ): with parallel_backend("unsupported backend" ): map_nested(__magic_name__ ,__magic_name__ ,num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize("num_proc" ,[2, -1] ) def __UpperCAmelCase ( __magic_name__ )-> List[Any]: """simple docstring""" snake_case_ : Optional[Any] = [1, 2] snake_case_ : Union[str, Any] = {"a": 1, "b": 2} snake_case_ : str = {"a": [1, 2], "b": [3, 4]} snake_case_ : List[str] = {"a": {"1": 1}, "b": 2} snake_case_ : Optional[int] = {"a": 1, "b": 2, "c": 3, "d": 4} snake_case_ : Tuple = [2, 3] snake_case_ : str = {"a": 2, "b": 3} snake_case_ : Dict = {"a": [2, 3], "b": [4, 5]} snake_case_ : List[Any] = {"a": {"1": 2}, "b": 3} snake_case_ : str = {"a": 2, "b": 3, "c": 4, "d": 5} with parallel_backend("spark" ): assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
653
0
"""simple docstring""" import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple ): if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class a ( nn.Module ): def __init__( self , _snake_case , _snake_case ): """simple docstring""" super().__init__() lowerCAmelCase = module lowerCAmelCase = nn.Sequential( nn.Linear(module.in_features , _snake_case , bias=_snake_case ) , nn.Linear(_snake_case , module.out_features , bias=_snake_case ) , ) lowerCAmelCase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=_snake_case ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def UpperCamelCase__ ( self , _snake_case , *_snake_case , **_snake_case ): """simple docstring""" return self.module(_snake_case , *_snake_case , **_snake_case ) + self.adapter(_snake_case ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class a ( unittest.TestCase ): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module snake_case__ = '''bigscience/bloom-1b7''' # Constant values snake_case__ = 2.1_09_65_95_52_69_25_74 snake_case__ = '''Hello my name is''' snake_case__ = set() EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' ) EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' ) EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' ) snake_case__ = 1_0 def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = AutoTokenizer.from_pretrained(self.model_name ) class a ( a__ ): def UpperCamelCase__ ( self ): """simple docstring""" super().setUp() # Models and tokenizer lowerCAmelCase = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map='auto' ) lowerCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_snake_case , device_map='auto' ) def UpperCamelCase__ ( self ): """simple docstring""" del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_abit.config self.assertTrue(hasattr(_snake_case , 'quantization_config' ) ) lowerCAmelCase = config.to_dict() lowerCAmelCase = config.to_diff_dict() lowerCAmelCase = config.to_json_string() def UpperCamelCase__ ( self ): """simple docstring""" from bitsandbytes.nn import Paramsabit lowerCAmelCase = self.model_fpaa.get_memory_footprint() lowerCAmelCase = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) lowerCAmelCase = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def UpperCamelCase__ ( self ): """simple docstring""" from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(_snake_case , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.tokenizer(self.input_text , return_tensors='pt' ) lowerCAmelCase = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_snake_case ) , self.EXPECTED_OUTPUTS ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = BitsAndBytesConfig() lowerCAmelCase = True lowerCAmelCase = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=_snake_case , device_map='auto' ) lowerCAmelCase = self.tokenizer(self.input_text , return_tensors='pt' ) lowerCAmelCase = model_abit_from_config.generate( input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_snake_case ) , self.EXPECTED_OUTPUTS ) def UpperCamelCase__ ( self ): """simple docstring""" with self.assertRaises(_snake_case ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = BitsAndBytesConfig() with self.assertRaises(_snake_case ): lowerCAmelCase = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=_snake_case , load_in_abit=_snake_case , device_map='auto' , bnb_abit_quant_type='nf4' , ) def UpperCamelCase__ ( self ): """simple docstring""" with self.assertRaises(_snake_case ): # Tries with `str` self.model_abit.to('cpu' ) with self.assertRaises(_snake_case ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(_snake_case ): # Tries with a `device` self.model_abit.to(torch.device('cuda:0' ) ) with self.assertRaises(_snake_case ): # Tries with a `device` self.model_abit.float() with self.assertRaises(_snake_case ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything lowerCAmelCase = self.tokenizer(self.input_text , return_tensors='pt' ) lowerCAmelCase = self.model_fpaa.to(torch.floataa ) lowerCAmelCase = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error lowerCAmelCase = self.model_fpaa.to('cpu' ) # Check this does not throw an error lowerCAmelCase = self.model_fpaa.half() # Check this does not throw an error lowerCAmelCase = self.model_fpaa.float() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=_snake_case , device_map='auto' ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class a ( unittest.TestCase ): @classmethod def UpperCamelCase__ ( cls ): """simple docstring""" lowerCAmelCase = 't5-small' lowerCAmelCase = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense lowerCAmelCase = AutoTokenizer.from_pretrained(cls.model_name ) lowerCAmelCase = 'Translate in German: Hello, my dog is cute' def UpperCamelCase__ ( self ): """simple docstring""" gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self ): """simple docstring""" from transformers import TaForConditionalGeneration lowerCAmelCase = TaForConditionalGeneration._keep_in_fpaa_modules lowerCAmelCase = None # test with `t5-small` lowerCAmelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_snake_case , device_map='auto' ) lowerCAmelCase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) lowerCAmelCase = model.generate(**_snake_case ) # test with `flan-t5-small` lowerCAmelCase = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=_snake_case , device_map='auto' ) lowerCAmelCase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) lowerCAmelCase = model.generate(**_snake_case ) lowerCAmelCase = modules def UpperCamelCase__ ( self ): """simple docstring""" import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` lowerCAmelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_snake_case , device_map='auto' ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) lowerCAmelCase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) lowerCAmelCase = model.generate(**_snake_case ) # test with `flan-t5-small` lowerCAmelCase = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=_snake_case , device_map='auto' ) lowerCAmelCase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) lowerCAmelCase = model.generate(**_snake_case ) class a ( a__ ): def UpperCamelCase__ ( self ): """simple docstring""" super().setUp() # model_name lowerCAmelCase = 'bigscience/bloom-560m' lowerCAmelCase = 't5-small' # Different types of model lowerCAmelCase = AutoModel.from_pretrained(self.model_name , load_in_abit=_snake_case , device_map='auto' ) # Sequence classification model lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=_snake_case , device_map='auto' ) # CausalLM model lowerCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_snake_case , device_map='auto' ) # Seq2seq model lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=_snake_case , device_map='auto' ) def UpperCamelCase__ ( self ): """simple docstring""" del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self ): """simple docstring""" from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class a ( a__ ): def UpperCamelCase__ ( self ): """simple docstring""" super().setUp() def UpperCamelCase__ ( self ): """simple docstring""" del self.pipe gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = pipeline( 'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass lowerCAmelCase = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class a ( a__ ): def UpperCamelCase__ ( self ): """simple docstring""" super().setUp() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=_snake_case , device_map='balanced' ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model lowerCAmelCase = self.tokenizer(self.input_text , return_tensors='pt' ) # Second real batch lowerCAmelCase = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_snake_case ) , self.EXPECTED_OUTPUTS ) class a ( a__ ): def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = 'facebook/opt-350m' super().setUp() def UpperCamelCase__ ( self ): """simple docstring""" if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ): return # Step 1: freeze all parameters lowerCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_snake_case ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): lowerCAmelCase = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability lowerCAmelCase = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(_snake_case ) ): lowerCAmelCase = LoRALayer(module.q_proj , rank=16 ) lowerCAmelCase = LoRALayer(module.k_proj , rank=16 ) lowerCAmelCase = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch lowerCAmelCase = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): lowerCAmelCase = model.forward(**_snake_case ) out.logits.norm().backward() for module in model.modules(): if isinstance(_snake_case , _snake_case ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(_snake_case , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class a ( a__ ): snake_case__ = '''gpt2-xl''' snake_case__ = 3.31_91_85_48_54_15_21_87
4
'''simple docstring''' from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase : Dict = logging.get_logger(__name__) # TODO Update this __lowerCamelCase : int = { '''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''', # See all ESM models at https://huggingface.co/models?filter=esm } class A_ (a_ ): """simple docstring""" a__ = '''esm''' def __init__( self :Dict , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :str=None , lowerCAmelCase__ :int=768 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :Dict=12 , lowerCAmelCase__ :Union[str, Any]=3_072 , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :List[Any]=1_026 , lowerCAmelCase__ :int=0.0_2 , lowerCAmelCase__ :Optional[int]=1E-1_2 , lowerCAmelCase__ :List[str]="absolute" , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :List[str]=False , lowerCAmelCase__ :List[Any]=False , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :str=None , **lowerCAmelCase__ :Union[str, Any] , ) -> Any: '''simple docstring''' super().__init__(pad_token_id=lowerCAmelCase__ , mask_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) snake_case_ : str = vocab_size snake_case_ : str = hidden_size snake_case_ : List[str] = num_hidden_layers snake_case_ : List[str] = num_attention_heads snake_case_ : Any = intermediate_size snake_case_ : Optional[Any] = hidden_dropout_prob snake_case_ : Tuple = attention_probs_dropout_prob snake_case_ : List[Any] = max_position_embeddings snake_case_ : str = initializer_range snake_case_ : List[Any] = layer_norm_eps snake_case_ : str = position_embedding_type snake_case_ : Optional[int] = use_cache snake_case_ : str = emb_layer_norm_before snake_case_ : List[Any] = token_dropout snake_case_ : str = is_folding_model if is_folding_model: if esmfold_config is None: logger.info("No esmfold_config supplied for folding model, using default values." ) snake_case_ : Optional[Any] = EsmFoldConfig() elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): snake_case_ : Union[str, Any] = EsmFoldConfig(**lowerCAmelCase__ ) snake_case_ : Optional[Any] = esmfold_config if vocab_list is None: logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" ) snake_case_ : List[str] = get_default_vocab_list() else: snake_case_ : List[str] = vocab_list else: snake_case_ : List[Any] = None snake_case_ : int = None if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , lowerCAmelCase__ ): raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" ) def _A ( self :Optional[int] ) -> List[Any]: '''simple docstring''' snake_case_ : Any = super().to_dict() if isinstance(self.esmfold_config , lowerCAmelCase__ ): snake_case_ : Optional[int] = self.esmfold_config.to_dict() return output @dataclass class A_ : """simple docstring""" a__ = None a__ = True a__ = False a__ = False a__ = False a__ = 0 a__ = True a__ = False a__ = 128 a__ = None def _A ( self :Dict ) -> int: '''simple docstring''' if self.trunk is None: snake_case_ : Dict = TrunkConfig() elif isinstance(self.trunk , lowerCAmelCase__ ): snake_case_ : int = TrunkConfig(**self.trunk ) def _A ( self :Optional[int] ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Tuple = asdict(self ) snake_case_ : Optional[int] = self.trunk.to_dict() return output @dataclass class A_ : """simple docstring""" a__ = 48 a__ = 1024 a__ = 128 a__ = 32 a__ = 32 a__ = 32 a__ = 0 a__ = 0 a__ = False a__ = 4 a__ = 128 a__ = None def _A ( self :List[Any] ) -> Union[str, Any]: '''simple docstring''' if self.structure_module is None: snake_case_ : Optional[int] = StructureModuleConfig() elif isinstance(self.structure_module , lowerCAmelCase__ ): snake_case_ : List[str] = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( "`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got" F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( "`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got" F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' ) snake_case_ : Dict = self.sequence_state_dim // self.sequence_head_width snake_case_ : Optional[int] = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( "`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got" F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( "`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got" F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' ) if self.dropout >= 0.4: raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' ) def _A ( self :Tuple ) -> List[str]: '''simple docstring''' snake_case_ : int = asdict(self ) snake_case_ : Dict = self.structure_module.to_dict() return output @dataclass class A_ : """simple docstring""" a__ = 384 a__ = 128 a__ = 16 a__ = 128 a__ = 12 a__ = 4 a__ = 8 a__ = 0.1 a__ = 8 a__ = 1 a__ = 2 a__ = 7 a__ = 10 a__ = 1E-8 a__ = 1E5 def _A ( self :Dict ) -> Dict: '''simple docstring''' return asdict(self ) def __UpperCAmelCase ( )-> int: """simple docstring""" return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
653
0
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 _lowercase = get_tests_dir("""fixtures""") class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = mock.Mock() _lowerCAmelCase = 500 _lowerCAmelCase = {} _lowerCAmelCase = HTTPError _lowerCAmelCase = {} # Download this model to make sure it's in the cache. _lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("""requests.Session.request""" , return_value=_lowercase ) as mock_head: _lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" ) # This check we did call the fake head request mock_head.assert_called() def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained( """https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json""" ) @is_staging_test class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @classmethod def _lowercase ( cls ): """simple docstring""" _lowerCAmelCase = TOKEN HfFolder.save_token(_lowercase ) @classmethod def _lowercase ( cls ): """simple docstring""" try: delete_repo(token=cls._token , repo_id="""test-feature-extractor""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-feature-extractor-org""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""test-dynamic-feature-extractor""" ) except HTTPError: pass def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(_lowercase ) feature_extractor.push_to_hub("""test-feature-extractor""" , use_auth_token=self._token ) _lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(F'{USER}/test-feature-extractor' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(_lowercase , getattr(_lowercase , _lowercase ) ) # Reset repo delete_repo(token=self._token , repo_id="""test-feature-extractor""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( _lowercase , repo_id="""test-feature-extractor""" , push_to_hub=_lowercase , use_auth_token=self._token ) _lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(F'{USER}/test-feature-extractor' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(_lowercase , getattr(_lowercase , _lowercase ) ) def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(_lowercase ) feature_extractor.push_to_hub("""valid_org/test-feature-extractor""" , use_auth_token=self._token ) _lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor""" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(_lowercase , getattr(_lowercase , _lowercase ) ) # Reset repo delete_repo(token=self._token , repo_id="""valid_org/test-feature-extractor""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( _lowercase , repo_id="""valid_org/test-feature-extractor-org""" , push_to_hub=_lowercase , use_auth_token=self._token ) _lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor-org""" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(_lowercase , getattr(_lowercase , _lowercase ) ) def _lowercase ( self ): """simple docstring""" CustomFeatureExtractor.register_for_auto_class() _lowerCAmelCase = CustomFeatureExtractor.from_pretrained(_lowercase ) feature_extractor.push_to_hub("""test-dynamic-feature-extractor""" , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( feature_extractor.auto_map , {"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor"""} , ) _lowerCAmelCase = AutoFeatureExtractor.from_pretrained( F'{USER}/test-dynamic-feature-extractor' , trust_remote_code=_lowercase ) # Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module self.assertEqual(new_feature_extractor.__class__.__name__ , """CustomFeatureExtractor""" )
5
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase : Any = { '''configuration_longformer''': [ '''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongformerConfig''', '''LongformerOnnxConfig''', ], '''tokenization_longformer''': ['''LongformerTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Any = ['''LongformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = [ '''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LongformerForMaskedLM''', '''LongformerForMultipleChoice''', '''LongformerForQuestionAnswering''', '''LongformerForSequenceClassification''', '''LongformerForTokenClassification''', '''LongformerModel''', '''LongformerPreTrainedModel''', '''LongformerSelfAttention''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Any = [ '''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLongformerForMaskedLM''', '''TFLongformerForMultipleChoice''', '''TFLongformerForQuestionAnswering''', '''TFLongformerForSequenceClassification''', '''TFLongformerForTokenClassification''', '''TFLongformerModel''', '''TFLongformerPreTrainedModel''', '''TFLongformerSelfAttention''', ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys __lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
653
0
from ..utils import DummyObject, requires_backends class UpperCamelCase_ ( metaclass=UpperCamelCase__ ): lowerCamelCase_ = ["flax"] def __init__( self :Dict , *__A :List[str] , **__A :Dict ) -> Union[str, Any]: """simple docstring""" requires_backends(self , ["""flax"""] ) @classmethod def _snake_case ( cls :Dict , *__A :Dict , **__A :Union[str, Any] ) -> Dict: """simple docstring""" requires_backends(cls , ["""flax"""] ) @classmethod def _snake_case ( cls :Tuple , *__A :Optional[Any] , **__A :str ) -> Any: """simple docstring""" requires_backends(cls , ["""flax"""] ) class UpperCamelCase_ ( metaclass=UpperCamelCase__ ): lowerCamelCase_ = ["flax"] def __init__( self :Dict , *__A :List[str] , **__A :str ) -> List[str]: """simple docstring""" requires_backends(self , ["""flax"""] ) @classmethod def _snake_case ( cls :Any , *__A :Tuple , **__A :Optional[int] ) -> List[Any]: """simple docstring""" requires_backends(cls , ["""flax"""] ) @classmethod def _snake_case ( cls :int , *__A :Optional[Any] , **__A :Any ) -> Optional[Any]: """simple docstring""" requires_backends(cls , ["""flax"""] ) class UpperCamelCase_ ( metaclass=UpperCamelCase__ ): lowerCamelCase_ = ["flax"] def __init__( self :List[Any] , *__A :Any , **__A :List[Any] ) -> Optional[int]: """simple docstring""" requires_backends(self , ["""flax"""] ) @classmethod def _snake_case ( cls :Tuple , *__A :Optional[Any] , **__A :List[Any] ) -> Any: """simple docstring""" requires_backends(cls , ["""flax"""] ) @classmethod def _snake_case ( cls :Any , *__A :List[str] , **__A :int ) -> Union[str, Any]: """simple docstring""" requires_backends(cls , ["""flax"""] ) class UpperCamelCase_ ( metaclass=UpperCamelCase__ ): lowerCamelCase_ = ["flax"] def __init__( self :int , *__A :Tuple , **__A :str ) -> str: """simple docstring""" requires_backends(self , ["""flax"""] ) @classmethod def _snake_case ( cls :List[str] , *__A :List[str] , **__A :Optional[Any] ) -> Dict: """simple docstring""" requires_backends(cls , ["""flax"""] ) @classmethod def _snake_case ( cls :List[Any] , *__A :Any , **__A :List[Any] ) -> Dict: """simple docstring""" requires_backends(cls , ["""flax"""] ) class UpperCamelCase_ ( metaclass=UpperCamelCase__ ): lowerCamelCase_ = ["flax"] def __init__( self :Any , *__A :List[Any] , **__A :int ) -> str: """simple docstring""" requires_backends(self , ["""flax"""] ) @classmethod def _snake_case ( cls :Union[str, Any] , *__A :Dict , **__A :str ) -> List[str]: """simple docstring""" requires_backends(cls , ["""flax"""] ) @classmethod def _snake_case ( cls :List[str] , *__A :Tuple , **__A :List[str] ) -> Union[str, Any]: """simple docstring""" requires_backends(cls , ["""flax"""] ) class UpperCamelCase_ ( metaclass=UpperCamelCase__ ): lowerCamelCase_ = ["flax"] def __init__( self :Optional[Any] , *__A :Optional[int] , **__A :Union[str, Any] ) -> int: """simple docstring""" requires_backends(self , ["""flax"""] ) @classmethod def _snake_case ( cls :Dict , *__A :Any , **__A :Union[str, Any] ) -> Any: """simple docstring""" requires_backends(cls , ["""flax"""] ) @classmethod def _snake_case ( cls :Optional[Any] , *__A :List[Any] , **__A :Dict ) -> List[str]: """simple docstring""" requires_backends(cls , ["""flax"""] ) class UpperCamelCase_ ( metaclass=UpperCamelCase__ ): lowerCamelCase_ = ["flax"] def __init__( self :Any , *__A :int , **__A :str ) -> List[str]: """simple docstring""" requires_backends(self , ["""flax"""] ) @classmethod def _snake_case ( cls :str , *__A :int , **__A :Any ) -> Tuple: """simple docstring""" requires_backends(cls , ["""flax"""] ) @classmethod def _snake_case ( cls :int , *__A :List[Any] , **__A :Dict ) -> List[Any]: """simple docstring""" requires_backends(cls , ["""flax"""] ) class UpperCamelCase_ ( metaclass=UpperCamelCase__ ): lowerCamelCase_ = ["flax"] def __init__( self :int , *__A :Optional[Any] , **__A :str ) -> Optional[int]: """simple docstring""" requires_backends(self , ["""flax"""] ) @classmethod def _snake_case ( cls :int , *__A :str , **__A :int ) -> Optional[Any]: """simple docstring""" requires_backends(cls , ["""flax"""] ) @classmethod def _snake_case ( cls :Any , *__A :Union[str, Any] , **__A :int ) -> Union[str, Any]: """simple docstring""" requires_backends(cls , ["""flax"""] ) class UpperCamelCase_ ( metaclass=UpperCamelCase__ ): lowerCamelCase_ = ["flax"] def __init__( self :Optional[Any] , *__A :Optional[Any] , **__A :Optional[Any] ) -> str: """simple docstring""" requires_backends(self , ["""flax"""] ) @classmethod def _snake_case ( cls :Tuple , *__A :List[str] , **__A :int ) -> List[str]: """simple docstring""" requires_backends(cls , ["""flax"""] ) @classmethod def _snake_case ( cls :Dict , *__A :List[Any] , **__A :Any ) -> Tuple: """simple docstring""" requires_backends(cls , ["""flax"""] ) class UpperCamelCase_ ( metaclass=UpperCamelCase__ ): lowerCamelCase_ = ["flax"] def __init__( self :Dict , *__A :Optional[Any] , **__A :List[str] ) -> Dict: """simple docstring""" requires_backends(self , ["""flax"""] ) @classmethod def _snake_case ( cls :Any , *__A :List[Any] , **__A :Dict ) -> Union[str, Any]: """simple docstring""" requires_backends(cls , ["""flax"""] ) @classmethod def _snake_case ( cls :List[Any] , *__A :Optional[int] , **__A :List[str] ) -> Tuple: """simple docstring""" requires_backends(cls , ["""flax"""] ) class UpperCamelCase_ ( metaclass=UpperCamelCase__ ): lowerCamelCase_ = ["flax"] def __init__( self :Any , *__A :Optional[Any] , **__A :List[Any] ) -> Tuple: """simple docstring""" requires_backends(self , ["""flax"""] ) @classmethod def _snake_case ( cls :Dict , *__A :Optional[Any] , **__A :Union[str, Any] ) -> str: """simple docstring""" requires_backends(cls , ["""flax"""] ) @classmethod def _snake_case ( cls :Union[str, Any] , *__A :str , **__A :str ) -> int: """simple docstring""" requires_backends(cls , ["""flax"""] ) class UpperCamelCase_ ( metaclass=UpperCamelCase__ ): lowerCamelCase_ = ["flax"] def __init__( self :int , *__A :List[str] , **__A :Optional[Any] ) -> Optional[Any]: """simple docstring""" requires_backends(self , ["""flax"""] ) @classmethod def _snake_case ( cls :List[Any] , *__A :Optional[Any] , **__A :Tuple ) -> int: """simple docstring""" requires_backends(cls , ["""flax"""] ) @classmethod def _snake_case ( cls :List[str] , *__A :Optional[Any] , **__A :Any ) -> Tuple: """simple docstring""" requires_backends(cls , ["""flax"""] ) class UpperCamelCase_ ( metaclass=UpperCamelCase__ ): lowerCamelCase_ = ["flax"] def __init__( self :Tuple , *__A :Dict , **__A :List[Any] ) -> str: """simple docstring""" requires_backends(self , ["""flax"""] ) @classmethod def _snake_case ( cls :Optional[int] , *__A :Any , **__A :int ) -> List[Any]: """simple docstring""" requires_backends(cls , ["""flax"""] ) @classmethod def _snake_case ( cls :int , *__A :Tuple , **__A :List[str] ) -> List[Any]: """simple docstring""" requires_backends(cls , ["""flax"""] )
6
'''simple docstring''' import inspect import tempfile import unittest from huggingface_hub import hf_hub_download from transformers import is_torch_available from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin __lowerCamelCase : Optional[int] = 1E-4 if is_torch_available(): import torch from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder @require_torch class A_ : """simple docstring""" def __init__( self :Tuple , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[Any]=16 , lowerCAmelCase__ :Any=13 , lowerCAmelCase__ :Optional[Any]=7 , lowerCAmelCase__ :str=14 , lowerCAmelCase__ :Union[str, Any]=10 , lowerCAmelCase__ :Tuple=19 , lowerCAmelCase__ :Optional[Any]=5 , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Any=16 , lowerCAmelCase__ :str=2 , lowerCAmelCase__ :List[Any]=4 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Optional[int]=[1, 2, 3, 4, 5] , lowerCAmelCase__ :str=25 , lowerCAmelCase__ :Optional[Any]=5 , ) -> Dict: '''simple docstring''' snake_case_ : List[str] = d_model snake_case_ : Dict = parent snake_case_ : Optional[Any] = batch_size snake_case_ : Optional[Any] = prediction_length snake_case_ : str = context_length snake_case_ : Tuple = cardinality snake_case_ : List[str] = num_time_features snake_case_ : Optional[Any] = lags_sequence snake_case_ : Union[str, Any] = embedding_dimension snake_case_ : Optional[Any] = is_training snake_case_ : Optional[Any] = hidden_size snake_case_ : Any = num_hidden_layers snake_case_ : Optional[Any] = num_attention_heads snake_case_ : int = intermediate_size snake_case_ : Any = hidden_act snake_case_ : Union[str, Any] = hidden_dropout_prob snake_case_ : Dict = attention_probs_dropout_prob snake_case_ : List[str] = context_length snake_case_ : Any = prediction_length + label_length snake_case_ : Union[str, Any] = label_length snake_case_ : List[Any] = moving_average snake_case_ : str = autocorrelation_factor def _A ( self :List[Any] ) -> Any: '''simple docstring''' return AutoformerConfig( d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , ) def _A ( self :Union[str, Any] , lowerCAmelCase__ :Optional[Any] ) -> Dict: '''simple docstring''' snake_case_ : Any = config.context_length + max(config.lags_sequence ) snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, 1] , config.cardinality[0] ) snake_case_ : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] ) snake_case_ : List[Any] = floats_tensor([self.batch_size, _past_length] ) snake_case_ : Dict = floats_tensor([self.batch_size, _past_length] ) > 0.5 # decoder inputs snake_case_ : List[Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] ) snake_case_ : List[Any] = floats_tensor([self.batch_size, config.prediction_length] ) snake_case_ : int = { "past_values": past_values, "static_categorical_features": static_categorical_features, "past_time_features": past_time_features, "past_observed_mask": past_observed_mask, "future_time_features": future_time_features, "future_values": future_values, } return inputs_dict def _A ( self :Dict ) -> Tuple: '''simple docstring''' snake_case_ : str = self.get_config() snake_case_ : int = self.prepare_autoformer_inputs_dict(lowerCAmelCase__ ) return config, inputs_dict def _A ( self :Optional[int] ) -> Dict: '''simple docstring''' snake_case_, snake_case_ : Union[str, Any] = self.prepare_config_and_inputs() return config, inputs_dict def _A ( self :Tuple , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int] ) -> List[str]: '''simple docstring''' snake_case_ : Dict = AutoformerModel(config=lowerCAmelCase__ ).to(lowerCAmelCase__ ).eval() snake_case_ : Optional[int] = model(**lowerCAmelCase__ ) snake_case_ : Any = outputs.encoder_last_hidden_state snake_case_ : Dict = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : Optional[Any] = model.get_encoder() encoder.save_pretrained(lowerCAmelCase__ ) snake_case_ : Tuple = AutoformerEncoder.from_pretrained(lowerCAmelCase__ ).to(lowerCAmelCase__ ) snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : List[str] = model.create_network_inputs(**lowerCAmelCase__ ) snake_case_, snake_case_ : Optional[int] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] ) snake_case_ : List[Any] = torch.cat( (transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , ) snake_case_ : Optional[int] = encoder(inputs_embeds=lowerCAmelCase__ )[0] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 ) snake_case_ : Any = ( torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 ) .unsqueeze(1 ) .repeat(1 , config.prediction_length , 1 ) ) snake_case_ : List[str] = torch.zeros( [transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , ) snake_case_ : Optional[Any] = torch.cat( ( torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) snake_case_ : Any = torch.cat( ( torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : List[Any] = model.get_decoder() decoder.save_pretrained(lowerCAmelCase__ ) snake_case_ : int = AutoformerDecoder.from_pretrained(lowerCAmelCase__ ).to(lowerCAmelCase__ ) snake_case_ : Tuple = decoder( trend=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 ) @require_torch class A_ (a_ , a_ , unittest.TestCase ): """simple docstring""" a__ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else () a__ = (AutoformerForPrediction,) if is_torch_available() else () a__ = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {} a__ = False a__ = False a__ = False a__ = False a__ = False a__ = False def _A ( self :Dict ) -> int: '''simple docstring''' snake_case_ : Tuple = AutoformerModelTester(self ) snake_case_ : str = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ ) def _A ( self :List[str] ) -> Tuple: '''simple docstring''' self.config_tester.run_common_tests() def _A ( self :List[Any] ) -> Union[str, Any]: '''simple docstring''' snake_case_, snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: snake_case_ : List[Any] = model_class(lowerCAmelCase__ ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCAmelCase__ ) snake_case_, snake_case_ : str = model_class.from_pretrained(lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ ) self.assertEqual(info["missing_keys"] , [] ) def _A ( self :Optional[int] ) -> Tuple: '''simple docstring''' snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*lowerCAmelCase__ ) @unittest.skip(reason="Model has no tokens embeddings" ) def _A ( self :str ) -> str: '''simple docstring''' pass def _A ( self :Optional[Any] ) -> Union[str, Any]: '''simple docstring''' snake_case_ : List[Any] = inspect.signature(getattr(lowerCAmelCase__ , "forward" ) ) # The main input is the name of the argument after `self` snake_case_ : Dict = list(model_signature.parameters.keys() )[1] self.assertEqual(AutoformerModel.main_input_name , lowerCAmelCase__ ) def _A ( self :Optional[Any] ) -> Optional[int]: '''simple docstring''' snake_case_, snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : Tuple = model_class(lowerCAmelCase__ ) snake_case_ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ : Optional[Any] = [*signature.parameters.keys()] snake_case_ : Dict = [ "past_values", "past_time_features", "past_observed_mask", "static_categorical_features", "static_real_features", "future_values", "future_time_features", ] if model.__class__.__name__ in ["AutoformerForPrediction"]: expected_arg_names.append("future_observed_mask" ) expected_arg_names.extend( [ "decoder_attention_mask", "head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs", "past_key_values", "output_hidden_states", "output_attentions", "use_cache", "return_dict", ] ) self.assertListEqual(arg_names[: len(lowerCAmelCase__ )] , lowerCAmelCase__ ) def _A ( self :int ) -> Any: '''simple docstring''' snake_case_, snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Union[str, Any] = True snake_case_ : List[str] = getattr(self.model_tester , "seq_length" , lowerCAmelCase__ ) snake_case_ : Dict = getattr(self.model_tester , "decoder_seq_length" , lowerCAmelCase__ ) snake_case_ : Union[str, Any] = getattr(self.model_tester , "encoder_seq_length" , lowerCAmelCase__ ) snake_case_ : Union[str, Any] = getattr(self.model_tester , "d_model" , lowerCAmelCase__ ) snake_case_ : Dict = getattr(self.model_tester , "num_attention_heads" , lowerCAmelCase__ ) snake_case_ : Optional[int] = d_model // num_attention_heads for model_class in self.all_model_classes: snake_case_ : Any = True snake_case_ : Any = False snake_case_ : Dict = True snake_case_ : List[str] = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): snake_case_ : Tuple = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) snake_case_ : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] snake_case_ : Optional[int] = True snake_case_ : Any = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): snake_case_ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) snake_case_ : str = outputs.encoder_attentions self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) snake_case_ : Tuple = len(lowerCAmelCase__ ) snake_case_ : List[str] = 7 if "last_hidden_state" in outputs: correct_outlen += 1 if "trend" in outputs: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned if "loss" in outputs: correct_outlen += 1 if "params" in outputs: correct_outlen += 1 self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) # decoder attentions snake_case_ : Optional[int] = outputs.decoder_attentions self.assertIsInstance(lowerCAmelCase__ , (list, tuple) ) self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # cross attentions snake_case_ : List[Any] = outputs.cross_attentions self.assertIsInstance(lowerCAmelCase__ , (list, tuple) ) self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # Check attention is always last and order is fine snake_case_ : Optional[int] = True snake_case_ : List[Any] = True snake_case_ : Union[str, Any] = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): snake_case_ : List[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) self.assertEqual(out_len + 2 , len(lowerCAmelCase__ ) ) snake_case_ : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) @is_flaky() def _A ( self :Any ) -> Optional[Any]: '''simple docstring''' super().test_retain_grad_hidden_states_attentions() def __UpperCAmelCase ( __magic_name__="train-batch.pt" )-> int: """simple docstring""" snake_case_ : List[str] = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" ,filename=__magic_name__ ,repo_type="dataset" ) snake_case_ : List[str] = torch.load(__magic_name__ ,map_location=__magic_name__ ) return batch @require_torch @slow class A_ (unittest.TestCase ): """simple docstring""" def _A ( self :str ) -> Any: '''simple docstring''' snake_case_ : Optional[int] = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ ) snake_case_ : List[str] = prepare_batch() with torch.no_grad(): snake_case_ : int = model( past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0] snake_case_ : Optional[int] = torch.Size( (64, model.config.prediction_length + model.config.label_length, model.config.feature_size) ) self.assertEqual(output.shape , lowerCAmelCase__ ) snake_case_ : Optional[Any] = torch.tensor( [[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=lowerCAmelCase__ ) self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) ) def _A ( self :Any ) -> str: '''simple docstring''' snake_case_ : str = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ ) snake_case_ : Optional[Any] = prepare_batch("val-batch.pt" ) with torch.no_grad(): snake_case_ : Tuple = model( past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state snake_case_ : Dict = torch.Size((64, model.config.context_length, model.config.d_model) ) self.assertEqual(output.shape , lowerCAmelCase__ ) snake_case_ : Any = torch.tensor( [[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=lowerCAmelCase__ ) self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) ) def _A ( self :List[str] ) -> Any: '''simple docstring''' snake_case_ : List[Any] = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ ) snake_case_ : str = prepare_batch("val-batch.pt" ) with torch.no_grad(): snake_case_ : Optional[Any] = model.generate( static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , ) snake_case_ : List[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) ) self.assertEqual(outputs.sequences.shape , lowerCAmelCase__ ) snake_case_ : Dict = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=lowerCAmelCase__ ) snake_case_ : Optional[Any] = outputs.sequences.mean(dim=1 ) self.assertTrue(torch.allclose(mean_prediction[0, -3:] , lowerCAmelCase__ , rtol=1E-1 ) )
653
0
"""simple docstring""" def _snake_case ( _snake_case : list ) -> list: '''simple docstring''' for i in range(len(_snake_case ) - 1 , 0 , -1 ): _A = False for j in range(_snake_case , 0 , -1 ): if unsorted[j] < unsorted[j - 1]: _A , _A = unsorted[j - 1], unsorted[j] _A = True for j in range(_snake_case ): if unsorted[j] > unsorted[j + 1]: _A , _A = unsorted[j + 1], unsorted[j] _A = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() a = input('''Enter numbers separated by a comma:\n''').strip() a = [int(item) for item in user_input.split(''',''')] print(F'''{cocktail_shaker_sort(unsorted) = }''')
7
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A_ (a_ , unittest.TestCase ): """simple docstring""" a__ = RobertaTokenizer a__ = RobertaTokenizerFast a__ = True a__ = {'''cls_token''': '''<s>'''} def _A ( self :Optional[int] ) -> List[Any]: '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt snake_case_ : List[Any] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] snake_case_ : Tuple = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) ) snake_case_ : List[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] snake_case_ : int = {"unk_token": "<unk>"} snake_case_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowerCAmelCase__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowerCAmelCase__ ) ) def _A ( self :Optional[Any] , **lowerCAmelCase__ :str ) -> str: '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def _A ( self :Any , **lowerCAmelCase__ :Tuple ) -> Optional[int]: '''simple docstring''' kwargs.update(self.special_tokens_map ) return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def _A ( self :Optional[int] , lowerCAmelCase__ :str ) -> Optional[int]: '''simple docstring''' snake_case_ : int = "lower newer" snake_case_ : Tuple = "lower newer" return input_text, output_text def _A ( self :Tuple ) -> Union[str, Any]: '''simple docstring''' snake_case_ : str = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) snake_case_ : Dict = "lower newer" snake_case_ : int = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"] snake_case_ : str = tokenizer.tokenize(lowerCAmelCase__ ) # , add_prefix_space=True) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) snake_case_ : List[str] = tokens + [tokenizer.unk_token] snake_case_ : Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ ) def _A ( self :Any ) -> str: '''simple docstring''' snake_case_ : List[str] = self.get_tokenizer() self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=lowerCAmelCase__ ) , [0, 31_414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=lowerCAmelCase__ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , ) @slow def _A ( self :str ) -> List[str]: '''simple docstring''' snake_case_ : Tuple = self.tokenizer_class.from_pretrained("roberta-base" ) snake_case_ : List[str] = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase__ ) snake_case_ : List[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase__ ) snake_case_ : List[str] = tokenizer.encode( "sequence builders" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ ) snake_case_ : Union[str, Any] = tokenizer.encode( "sequence builders" , "multi-sequence build" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ ) snake_case_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ ) snake_case_ : Any = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def _A ( self :List[Any] ) -> Any: '''simple docstring''' snake_case_ : Optional[Any] = self.get_tokenizer() snake_case_ : Tuple = "Encode this sequence." snake_case_ : Optional[Any] = tokenizer.byte_encoder[" ".encode("utf-8" )[0]] # Testing encoder arguments snake_case_ : str = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ ) snake_case_ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ ) snake_case_ : List[Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ ) snake_case_ : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) tokenizer.add_special_tokens({"bos_token": "<s>"} ) snake_case_ : str = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) snake_case_ : int = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ ) # Testing spaces after special tokens snake_case_ : List[Any] = "<mask>" tokenizer.add_special_tokens( {"mask_token": AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )} ) # mask token has a left space snake_case_ : str = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) snake_case_ : List[str] = "Encode <mask> sequence" snake_case_ : List[Any] = "Encode <mask>sequence" snake_case_ : Tuple = tokenizer.encode(lowerCAmelCase__ ) snake_case_ : int = encoded.index(lowerCAmelCase__ ) snake_case_ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) snake_case_ : List[str] = tokenizer.encode(lowerCAmelCase__ ) snake_case_ : Union[str, Any] = encoded.index(lowerCAmelCase__ ) snake_case_ : int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def _A ( self :Tuple ) -> Tuple: '''simple docstring''' pass def _A ( self :int ) -> Optional[Any]: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case_ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) snake_case_ : List[Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) snake_case_ : Any = "A, <mask> AllenNLP sentence." snake_case_ : str = tokenizer_r.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ ) snake_case_ : int = tokenizer_p.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) snake_case_ : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) snake_case_ : str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) def _A ( self :int ) -> Tuple: '''simple docstring''' for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): snake_case_ : str = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) snake_case_ : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) snake_case_ : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["add_prefix_space"] , lowerCAmelCase__ ) self.assertEqual(post_processor_state["add_prefix_space"] , lowerCAmelCase__ ) self.assertEqual(post_processor_state["trim_offsets"] , lowerCAmelCase__ ) def _A ( self :List[str] ) -> List[Any]: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case_ : str = "hello" # `hello` is a token in the vocabulary of `pretrained_name` snake_case_ : Tuple = F'''{text_of_1_token} {text_of_1_token}''' snake_case_ : Any = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , ) snake_case_ : List[str] = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) snake_case_ : Tuple = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , ) snake_case_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , ) snake_case_ : Dict = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) snake_case_ : str = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , ) snake_case_ : Tuple = F''' {text}''' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) snake_case_ : Dict = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ) + 1, 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , ) snake_case_ : Any = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) snake_case_ : Any = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , ) snake_case_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) snake_case_ : Optional[int] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
653
0
'''simple docstring''' def _lowerCAmelCase ( __snake_case : float ) -> float: if edge <= 0 or not isinstance(__snake_case , __snake_case ): raise ValueError('Length must be a positive.' ) return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def _lowerCAmelCase ( __snake_case : float ) -> float: if edge <= 0 or not isinstance(__snake_case , __snake_case ): raise ValueError('Length must be a positive.' ) return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
8
'''simple docstring''' import math def __UpperCAmelCase ( __magic_name__ )-> bool: """simple docstring""" snake_case_ : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(__magic_name__ ) def __UpperCAmelCase ( __magic_name__ = 1 / 1_2345 )-> int: """simple docstring""" snake_case_ : Any = 0 snake_case_ : int = 0 snake_case_ : Union[str, Any] = 3 while True: snake_case_ : Any = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(__magic_name__ ): snake_case_ : Optional[Any] = int(__magic_name__ ) total_partitions += 1 if check_partition_perfect(__magic_name__ ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(__magic_name__ ) integer += 1 if __name__ == "__main__": print(f'''{solution() = }''')
653
0
import os import numpy import onnx def A ( __UpperCamelCase , __UpperCamelCase ) -> int: A__ = a.name A__ = b.name A__ = '' A__ = '' A__ = a == b A__ = name_a A__ = name_b return res def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]: for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(__UpperCamelCase , __UpperCamelCase ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , __UpperCamelCase , __UpperCamelCase ) _graph_replace_input_with(node_proto.attribute[1].g , __UpperCamelCase , __UpperCamelCase ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , __UpperCamelCase , __UpperCamelCase ) def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict: for n in graph_proto.node: _node_replace_input_with(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]: A__ = list(model.graph.initializer ) A__ = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i A__ = inits[i].name A__ = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , __UpperCamelCase , __UpperCamelCase ) def A ( __UpperCamelCase ) -> Tuple: A__ = os.path.dirname(__UpperCamelCase ) A__ = os.path.basename(__UpperCamelCase ) A__ = onnx.load(os.path.join(__UpperCamelCase , __UpperCamelCase ) ) A__ = list(model.graph.initializer ) A__ = set() A__ = {} A__ = [] A__ = 0 for i in range(len(__UpperCamelCase ) ): if i in dup_set: continue for j in range(i + 1 , len(__UpperCamelCase ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(__UpperCamelCase ) dup_set.add(__UpperCamelCase ) A__ = inits[j].data_type A__ = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('unexpected data type: ' , __UpperCamelCase ) total_reduced_size += mem_size A__ = inits[i].name A__ = inits[j].name if name_i in dup_map: dup_map[name_i].append(__UpperCamelCase ) else: A__ = [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1_024 / 1_024 / 1_024 , 'GB' ) A__ = sorted(__UpperCamelCase ) _remove_dup_initializers_from_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) A__ = 'optimized_' + model_file_name A__ = os.path.join(__UpperCamelCase , __UpperCamelCase ) onnx.save(__UpperCamelCase , __UpperCamelCase ) return new_model
9
'''simple docstring''' import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() __lowerCamelCase : int = logging.get_logger() @dataclass class A_ : """simple docstring""" a__ = 42 a__ = field(default_factory=a_ ) a__ = field(default_factory=a_ ) def _A ( self :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tensor , lowerCAmelCase__ :Tensor ) -> int: '''simple docstring''' snake_case_ : int = len(list(m.modules() ) ) == 1 or isinstance(lowerCAmelCase__ , nn.Convad ) or isinstance(lowerCAmelCase__ , nn.BatchNormad ) if has_not_submodules: self.traced.append(lowerCAmelCase__ ) def __call__( self :List[Any] , lowerCAmelCase__ :Tensor ) -> Union[str, Any]: '''simple docstring''' for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(lowerCAmelCase__ ) [x.remove() for x in self.handles] return self @property def _A ( self :int ) -> List[Any]: '''simple docstring''' return list(filter(lambda lowerCAmelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class A_ : """simple docstring""" a__ = 42 a__ = 42 a__ = 0 a__ = field(default_factory=a_ ) a__ = field(default_factory=a_ ) def __call__( self :Tuple , lowerCAmelCase__ :Tensor ) -> Tuple: '''simple docstring''' snake_case_ : List[Any] = Tracker(self.dest )(lowerCAmelCase__ ).parametrized snake_case_ : Tuple = Tracker(self.src )(lowerCAmelCase__ ).parametrized snake_case_ : List[str] = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.src_skip , lowerCAmelCase__ ) ) snake_case_ : Tuple = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.dest_skip , lowerCAmelCase__ ) ) if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ): raise Exception( F'''Numbers of operations are different. Source module has {len(lowerCAmelCase__ )} operations while''' F''' destination module has {len(lowerCAmelCase__ )}.''' ) for dest_m, src_m in zip(lowerCAmelCase__ , lowerCAmelCase__ ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F'''Transfered from={src_m} to={dest_m}''' ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ = True )-> Optional[int]: """simple docstring""" print(F'''Converting {name}...''' ) with torch.no_grad(): snake_case_ : List[str] = timm.create_model(__magic_name__ ,pretrained=__magic_name__ ).eval() snake_case_ : Optional[int] = ResNetForImageClassification(__magic_name__ ).eval() snake_case_ : Dict = ModuleTransfer(src=__magic_name__ ,dest=__magic_name__ ) snake_case_ : Optional[int] = torch.randn((1, 3, 224, 224) ) module_transfer(__magic_name__ ) assert torch.allclose(from_model(__magic_name__ ) ,our_model(__magic_name__ ).logits ), "The model logits don't match the original one." snake_case_ : str = F'''resnet{'-'.join(name.split('resnet' ) )}''' print(__magic_name__ ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add model" ,use_temp_dir=__magic_name__ ,) # we can use the convnext one snake_case_ : Optional[Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add image processor" ,use_temp_dir=__magic_name__ ,) print(F'''Pushed {checkpoint_name}''' ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = None ,__magic_name__ = True )-> Tuple: """simple docstring""" snake_case_ : List[str] = "imagenet-1k-id2label.json" snake_case_ : Optional[Any] = 1000 snake_case_ : List[Any] = (1, num_labels) snake_case_ : Optional[Any] = "huggingface/label-files" snake_case_ : Dict = num_labels snake_case_ : List[Any] = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) ) snake_case_ : List[str] = {int(__magic_name__ ): v for k, v in idalabel.items()} snake_case_ : Any = idalabel snake_case_ : List[Any] = {v: k for k, v in idalabel.items()} snake_case_ : Optional[int] = partial(__magic_name__ ,num_labels=__magic_name__ ,idalabel=__magic_name__ ,labelaid=__magic_name__ ) snake_case_ : Optional[int] = { "resnet18": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ), "resnet26": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ), "resnet34": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ), "resnet50": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ), "resnet101": ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ), "resnet152": ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ), } if model_name: convert_weight_and_push(__magic_name__ ,names_to_config[model_name] ,__magic_name__ ,__magic_name__ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ) return config, expected_shape if __name__ == "__main__": __lowerCamelCase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default=None, type=str, help=( '''The name of the model you wish to convert, it must be one of the supported resnet* architecture,''' ''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=Path, required=True, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=True, type=bool, required=False, help='''If True, push model and image processor to the hub.''', ) __lowerCamelCase : Tuple = parser.parse_args() __lowerCamelCase : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
653
0
import math def _snake_case ( __snake_case = 100 ): _UpperCamelCase = sum(i * i for i in range(1 , n + 1 ) ) _UpperCamelCase = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) ) return square_of_sum - sum_of_squares if __name__ == "__main__": print(f'{solution() = }')
10
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase : List[Any] = logging.get_logger(__name__) __lowerCamelCase : Dict = { '''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''', } class A_ (a_ ): """simple docstring""" a__ = '''roc_bert''' def __init__( self :Dict , lowerCAmelCase__ :Optional[Any]=30_522 , lowerCAmelCase__ :Dict=768 , lowerCAmelCase__ :str=12 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :Optional[Any]=3_072 , lowerCAmelCase__ :Any="gelu" , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :List[str]=512 , lowerCAmelCase__ :int=2 , lowerCAmelCase__ :Optional[int]=0.0_2 , lowerCAmelCase__ :Tuple=1E-1_2 , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :List[str]=0 , lowerCAmelCase__ :Optional[Any]="absolute" , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :List[str]=768 , lowerCAmelCase__ :Optional[Any]=910 , lowerCAmelCase__ :str=512 , lowerCAmelCase__ :int=24_858 , lowerCAmelCase__ :List[Any]=True , **lowerCAmelCase__ :int , ) -> List[str]: '''simple docstring''' snake_case_ : int = vocab_size snake_case_ : Dict = max_position_embeddings snake_case_ : int = hidden_size snake_case_ : str = num_hidden_layers snake_case_ : Union[str, Any] = num_attention_heads snake_case_ : int = intermediate_size snake_case_ : Optional[Any] = hidden_act snake_case_ : Optional[int] = hidden_dropout_prob snake_case_ : List[Any] = attention_probs_dropout_prob snake_case_ : Dict = initializer_range snake_case_ : str = type_vocab_size snake_case_ : Tuple = layer_norm_eps snake_case_ : Optional[Any] = use_cache snake_case_ : Optional[Any] = enable_pronunciation snake_case_ : List[Any] = enable_shape snake_case_ : Optional[int] = pronunciation_embed_dim snake_case_ : Dict = pronunciation_vocab_size snake_case_ : int = shape_embed_dim snake_case_ : Any = shape_vocab_size snake_case_ : Optional[int] = concat_input snake_case_ : List[Any] = position_embedding_type snake_case_ : Any = classifier_dropout super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
653
0
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING lowercase_ = logging.get_logger(__name__) class __A ( A ): '''simple docstring''' __lowerCamelCase : str = 'upernet' def __init__(self , A=None , A=512 , A=0.02 , A=[1, 2, 3, 6] , A=True , A=0.4 , A=384 , A=256 , A=1 , A=False , A=255 , **A , ) -> Optional[int]: """simple docstring""" super().__init__(**A ) if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) _a = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ) elif isinstance(A , A ): _a = backbone_config.get('''model_type''' ) _a = CONFIG_MAPPING[backbone_model_type] _a = config_class.from_dict(A ) _a = backbone_config _a = hidden_size _a = initializer_range _a = pool_scales _a = use_auxiliary_head _a = auxiliary_loss_weight _a = auxiliary_in_channels _a = auxiliary_channels _a = auxiliary_num_convs _a = auxiliary_concat_input _a = loss_ignore_index def a__ (self ) -> Union[str, Any]: """simple docstring""" _a = copy.deepcopy(self.__dict__ ) _a = self.backbone_config.to_dict() _a = self.__class__.model_type return output
11
'''simple docstring''' def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int: """simple docstring""" def update_area_of_max_square(__magic_name__ ,__magic_name__ ) -> int: # BASE CASE if row >= rows or col >= cols: return 0 snake_case_ : str = update_area_of_max_square(__magic_name__ ,col + 1 ) snake_case_ : Dict = update_area_of_max_square(row + 1 ,col + 1 ) snake_case_ : int = update_area_of_max_square(row + 1 ,__magic_name__ ) if mat[row][col]: snake_case_ : str = 1 + min([right, diagonal, down] ) snake_case_ : Tuple = max(largest_square_area[0] ,__magic_name__ ) return sub_problem_sol else: return 0 snake_case_ : Union[str, Any] = [0] update_area_of_max_square(0 ,0 ) return largest_square_area[0] def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int: """simple docstring""" def update_area_of_max_square_using_dp_array( __magic_name__ ,__magic_name__ ,__magic_name__ ) -> int: if row >= rows or col >= cols: return 0 if dp_array[row][col] != -1: return dp_array[row][col] snake_case_ : Dict = update_area_of_max_square_using_dp_array(__magic_name__ ,col + 1 ,__magic_name__ ) snake_case_ : List[Any] = update_area_of_max_square_using_dp_array(row + 1 ,col + 1 ,__magic_name__ ) snake_case_ : Any = update_area_of_max_square_using_dp_array(row + 1 ,__magic_name__ ,__magic_name__ ) if mat[row][col]: snake_case_ : int = 1 + min([right, diagonal, down] ) snake_case_ : Tuple = max(largest_square_area[0] ,__magic_name__ ) snake_case_ : Optional[Any] = sub_problem_sol return sub_problem_sol else: return 0 snake_case_ : List[Any] = [0] snake_case_ : Optional[int] = [[-1] * cols for _ in range(__magic_name__ )] update_area_of_max_square_using_dp_array(0 ,0 ,__magic_name__ ) return largest_square_area[0] def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int: """simple docstring""" snake_case_ : Dict = [[0] * (cols + 1) for _ in range(rows + 1 )] snake_case_ : Dict = 0 for row in range(rows - 1 ,-1 ,-1 ): for col in range(cols - 1 ,-1 ,-1 ): snake_case_ : List[str] = dp_array[row][col + 1] snake_case_ : Any = dp_array[row + 1][col + 1] snake_case_ : Any = dp_array[row + 1][col] if mat[row][col] == 1: snake_case_ : Any = 1 + min(__magic_name__ ,__magic_name__ ,__magic_name__ ) snake_case_ : str = max(dp_array[row][col] ,__magic_name__ ) else: snake_case_ : Optional[Any] = 0 return largest_square_area def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int: """simple docstring""" snake_case_ : str = [0] * (cols + 1) snake_case_ : Tuple = [0] * (cols + 1) snake_case_ : List[str] = 0 for row in range(rows - 1 ,-1 ,-1 ): for col in range(cols - 1 ,-1 ,-1 ): snake_case_ : Optional[Any] = current_row[col + 1] snake_case_ : Optional[int] = next_row[col + 1] snake_case_ : Dict = next_row[col] if mat[row][col] == 1: snake_case_ : Union[str, Any] = 1 + min(__magic_name__ ,__magic_name__ ,__magic_name__ ) snake_case_ : Any = max(current_row[col] ,__magic_name__ ) else: snake_case_ : Dict = 0 snake_case_ : Optional[Any] = current_row return largest_square_area if __name__ == "__main__": import doctest doctest.testmod() print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
653
0
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _snake_case ( UpperCAmelCase_ ): __lowerCAmelCase : List[str] = 'ClapFeatureExtractor' __lowerCAmelCase : Tuple = ('RobertaTokenizer', 'RobertaTokenizerFast') def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) def __call__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : int = kwargs.pop("""sampling_rate""" , SCREAMING_SNAKE_CASE_) if text is None and audios is None: raise ValueError("""You have to specify either text or audios. Both cannot be none.""") if text is not None: lowercase__ : int = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) if audios is not None: lowercase__ : List[str] = self.feature_extractor( SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) if text is not None and audios is not None: lowercase__ : Optional[Any] = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_) , tensor_type=SCREAMING_SNAKE_CASE_) def lowercase__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) def lowercase__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_) @property def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[Any] = self.tokenizer.model_input_names lowercase__ : List[Any] = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names))
12
'''simple docstring''' import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def __UpperCAmelCase ( __magic_name__ ,__magic_name__=7 )-> Tuple: """simple docstring""" snake_case_ : List[str] = None if token is not None: snake_case_ : List[str] = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''} # The id of a workflow (not of a workflow run) snake_case_ : Dict = "636036" snake_case_ : List[str] = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs''' # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}''' snake_case_ : Optional[Any] = requests.get(__magic_name__ ,headers=__magic_name__ ).json() return result["workflow_runs"] def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]: """simple docstring""" snake_case_ : str = get_daily_ci_runs(__magic_name__ ) snake_case_ : Optional[int] = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": snake_case_ : Dict = workflow_run["id"] break return workflow_run_id def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[Any]: """simple docstring""" snake_case_ : Optional[Any] = get_last_daily_ci_runs(__magic_name__ ) if workflow_run_id is not None: snake_case_ : Union[str, Any] = get_artifacts_links(worflow_run_id=__magic_name__ ,token=__magic_name__ ) for artifact_name in artifact_names: if artifact_name in artifacts_links: snake_case_ : Union[str, Any] = artifacts_links[artifact_name] download_artifact( artifact_name=__magic_name__ ,artifact_url=__magic_name__ ,output_dir=__magic_name__ ,token=__magic_name__ ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[Any]: """simple docstring""" get_last_daily_ci_artifacts(__magic_name__ ,__magic_name__ ,__magic_name__ ) snake_case_ : Union[str, Any] = {} for artifact_name in artifact_names: snake_case_ : Any = os.path.join(__magic_name__ ,F'''{artifact_name}.zip''' ) if os.path.isfile(__magic_name__ ): snake_case_ : Tuple = {} with zipfile.ZipFile(__magic_name__ ) as z: for filename in z.namelist(): if not os.path.isdir(__magic_name__ ): # read the file with z.open(__magic_name__ ) as f: snake_case_ : Optional[Any] = f.read().decode("UTF-8" ) return results
653
0
'''simple docstring''' import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class UpperCAmelCase_ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=[1, 2, 1] , SCREAMING_SNAKE_CASE_=[2, 2, 4] , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=["stage1", "stage2", "stage3"] , SCREAMING_SNAKE_CASE_=[1, 2, 3] , ) -> Any: __lowerCamelCase : Optional[Any] = parent __lowerCamelCase : int = batch_size __lowerCamelCase : Optional[int] = image_size __lowerCamelCase : Optional[int] = patch_size __lowerCamelCase : Optional[Any] = num_channels __lowerCamelCase : Dict = embed_dim __lowerCamelCase : List[Any] = depths __lowerCamelCase : int = num_heads __lowerCamelCase : Optional[Any] = window_size __lowerCamelCase : Optional[Any] = mlp_ratio __lowerCamelCase : List[str] = qkv_bias __lowerCamelCase : List[str] = hidden_dropout_prob __lowerCamelCase : int = attention_probs_dropout_prob __lowerCamelCase : List[Any] = drop_path_rate __lowerCamelCase : Any = hidden_act __lowerCamelCase : Union[str, Any] = use_absolute_embeddings __lowerCamelCase : Any = patch_norm __lowerCamelCase : Optional[Any] = layer_norm_eps __lowerCamelCase : str = initializer_range __lowerCamelCase : Dict = is_training __lowerCamelCase : Optional[Any] = scope __lowerCamelCase : Dict = use_labels __lowerCamelCase : List[str] = type_sequence_label_size __lowerCamelCase : Dict = encoder_stride __lowerCamelCase : Union[str, Any] = out_features __lowerCamelCase : str = out_indices def lowercase_ ( self ) -> Optional[Any]: __lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCamelCase : List[str] = None if self.use_labels: __lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase : List[str] = self.get_config() return config, pixel_values, labels def lowercase_ ( self ) -> Optional[int]: return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: __lowerCamelCase : Dict = MaskFormerSwinModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) __lowerCamelCase : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: __lowerCamelCase : Tuple = MaskFormerSwinBackbone(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __lowerCamelCase : Any = model(SCREAMING_SNAKE_CASE_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : str = ['stem'] __lowerCamelCase : Optional[Any] = MaskFormerSwinBackbone(config=SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Union[str, Any]: __lowerCamelCase : Optional[int] = self.prepare_config_and_inputs() __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = config_and_inputs __lowerCamelCase : Optional[int] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : List[Any] = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) lowerCamelCase : int = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {} lowerCamelCase : int = False lowerCamelCase : int = False lowerCamelCase : str = False lowerCamelCase : int = False lowerCamelCase : Union[str, Any] = False def lowercase_ ( self ) -> Tuple: __lowerCamelCase : Optional[Any] = MaskFormerSwinModelTester(self ) __lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( '`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with' ' `nn.DataParallel`' ) ) def lowercase_ ( self ) -> int: pass def lowercase_ ( self ) -> Union[str, Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowercase_ ( self ) -> Tuple: return def lowercase_ ( self ) -> Dict: __lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> List[str]: __lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*SCREAMING_SNAKE_CASE_ ) @unittest.skip('Swin does not use inputs_embeds' ) def lowercase_ ( self ) -> Optional[int]: pass @unittest.skip('Swin does not support feedforward chunking' ) def lowercase_ ( self ) -> Dict: pass def lowercase_ ( self ) -> Union[str, Any]: __lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __lowerCamelCase : Optional[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) ) def lowercase_ ( self ) -> Optional[int]: __lowerCamelCase , __lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase : List[str] = model_class(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase : str = [*signature.parameters.keys()] __lowerCamelCase : Any = ['pixel_values'] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ ) @unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' ) def lowercase_ ( self ) -> Any: pass @unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' ) def lowercase_ ( self ) -> List[Any]: pass def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: __lowerCamelCase : Tuple = model_class(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() with torch.no_grad(): __lowerCamelCase : Optional[int] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) __lowerCamelCase : int = outputs.hidden_states __lowerCamelCase : Tuple = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) # Swin has a different seq_length __lowerCamelCase : Optional[Any] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __lowerCamelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def lowercase_ ( self ) -> Tuple: __lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() __lowerCamelCase : List[Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: __lowerCamelCase : Dict = True self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowerCamelCase : Optional[int] = True self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Any: __lowerCamelCase , __lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() __lowerCamelCase : Union[str, Any] = 3 __lowerCamelCase : Dict = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) __lowerCamelCase : str = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __lowerCamelCase : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) __lowerCamelCase : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: __lowerCamelCase : str = True self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowerCamelCase : Tuple = True self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) ) @unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' ) def lowercase_ ( self ) -> Optional[Any]: pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def lowercase_ ( self ) -> Any: pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def lowercase_ ( self ) -> Union[str, Any]: pass def lowercase_ ( self ) -> Tuple: __lowerCamelCase , __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : Any = 0 return t def check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_={} ): with torch.no_grad(): __lowerCamelCase : Optional[int] = model(**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).to_tuple() def recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if isinstance(SCREAMING_SNAKE_CASE_ , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ) , set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ) , atol=1E-5 ) , msg=( 'Tuple and dict output are not equal. Difference:' f' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:' f' {torch.isnan(SCREAMING_SNAKE_CASE_ ).any()} and `inf`: {torch.isinf(SCREAMING_SNAKE_CASE_ )}. Dict has' f' `nan`: {torch.isnan(SCREAMING_SNAKE_CASE_ ).any()} and `inf`: {torch.isinf(SCREAMING_SNAKE_CASE_ )}.' ) , ) recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for model_class in self.all_model_classes: __lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __lowerCamelCase : Optional[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : int = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : int = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ ) check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'output_hidden_states': True} ) __lowerCamelCase : Any = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ ) check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'output_hidden_states': True} ) @require_torch class UpperCAmelCase_ (unittest.TestCase , _UpperCAmelCase ): """simple docstring""" lowerCamelCase : Union[str, Any] = (MaskFormerSwinBackbone,) if is_torch_available() else () lowerCamelCase : List[str] = MaskFormerSwinConfig def lowercase_ ( self ) -> Tuple: __lowerCamelCase : List[str] = MaskFormerSwinModelTester(self ) def lowercase_ ( self ) -> Optional[Any]: __lowerCamelCase , __lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __lowerCamelCase : Any = inputs_dict['pixel_values'].shape[0] for backbone_class in self.all_model_classes: __lowerCamelCase : Optional[Any] = backbone_class(SCREAMING_SNAKE_CASE_ ) backbone.to(SCREAMING_SNAKE_CASE_ ) backbone.eval() __lowerCamelCase : int = backbone(**SCREAMING_SNAKE_CASE_ ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , SCREAMING_SNAKE_CASE_ ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True __lowerCamelCase : Union[str, Any] = backbone(**SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: __lowerCamelCase : Optional[int] = backbone(**SCREAMING_SNAKE_CASE_ , output_attentions=SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(outputs.attentions )
13
'''simple docstring''' from string import ascii_uppercase __lowerCamelCase : Optional[Any] = {char: i for i, char in enumerate(ascii_uppercase)} __lowerCamelCase : List[str] = dict(enumerate(ascii_uppercase)) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str: """simple docstring""" snake_case_ : Tuple = len(__magic_name__ ) snake_case_ : str = 0 while True: if x == i: snake_case_ : List[str] = 0 if len(__magic_name__ ) == len(__magic_name__ ): break key += key[i] i += 1 return key def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str: """simple docstring""" snake_case_ : str = "" snake_case_ : List[Any] = 0 for letter in message: if letter == " ": cipher_text += " " else: snake_case_ : Optional[Any] = (dicta[letter] - dicta[key_new[i]]) % 26 i += 1 cipher_text += dicta[x] return cipher_text def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str: """simple docstring""" snake_case_ : Dict = "" snake_case_ : Dict = 0 for letter in cipher_text: if letter == " ": or_txt += " " else: snake_case_ : str = (dicta[letter] + dicta[key_new[i]] + 26) % 26 i += 1 or_txt += dicta[x] return or_txt def __UpperCAmelCase ( )-> None: """simple docstring""" snake_case_ : List[str] = "THE GERMAN ATTACK" snake_case_ : List[str] = "SECRET" snake_case_ : Optional[int] = generate_key(__magic_name__ ,__magic_name__ ) snake_case_ : Any = cipher_text(__magic_name__ ,__magic_name__ ) print(F'''Encrypted Text = {s}''' ) print(F'''Original Text = {original_text(__magic_name__ ,__magic_name__ )}''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
653
0
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __lowercase ( self , _a ) -> str: for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ): _a : Optional[Any] = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(_a ) def __lowercase ( self ) -> Tuple: _a : Tuple = '''sshleifer/tiny-gpt2''' _a : Dict = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , ) _a : int = PyTorchBenchmark(_a ) _a : str = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __lowercase ( self ) -> str: _a : str = '''sgugger/tiny-distilbert-classification''' _a : str = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , only_pretrain_model=_a , ) _a : Any = PyTorchBenchmark(_a ) _a : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __lowercase ( self ) -> int: _a : Optional[Any] = '''sshleifer/tiny-gpt2''' _a : Optional[int] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_a , inference=_a , torchscript=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , ) _a : int = PyTorchBenchmark(_a ) _a : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' ) def __lowercase ( self ) -> int: _a : Optional[int] = '''sshleifer/tiny-gpt2''' _a : Any = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_a , inference=_a , fpaa=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , ) _a : int = PyTorchBenchmark(_a ) _a : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __lowercase ( self ) -> List[Any]: _a : Union[str, Any] = '''sshleifer/tiny-gpt2''' _a : Dict = AutoConfig.from_pretrained(_a ) # set architectures equal to `None` _a : List[Any] = None _a : Tuple = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , ) _a : List[Any] = PyTorchBenchmark(_a , configs=[config] ) _a : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __lowercase ( self ) -> Dict: _a : Tuple = '''sshleifer/tiny-gpt2''' _a : int = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , ) _a : Dict = PyTorchBenchmark(_a ) _a : str = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' ) def __lowercase ( self ) -> Optional[Any]: _a : Any = '''sshleifer/tiny-gpt2''' _a : Tuple = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_a , multi_process=_a , ) _a : Tuple = PyTorchBenchmark(_a ) _a : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def __lowercase ( self ) -> Any: _a : int = '''sshleifer/tiny-gpt2''' _a : List[str] = AutoConfig.from_pretrained(_a ) _a : List[str] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , ) _a : Tuple = PyTorchBenchmark(_a , configs=[config] ) _a : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __lowercase ( self ) -> Union[str, Any]: _a : Union[str, Any] = '''sshleifer/tinier_bart''' _a : Optional[int] = AutoConfig.from_pretrained(_a ) _a : Optional[int] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , ) _a : List[str] = PyTorchBenchmark(_a , configs=[config] ) _a : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __lowercase ( self ) -> int: _a : Optional[int] = '''sshleifer/tiny-gpt2''' _a : int = AutoConfig.from_pretrained(_a ) _a : Union[str, Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , ) _a : Tuple = PyTorchBenchmark(_a , configs=[config] ) _a : str = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def __lowercase ( self ) -> List[Any]: _a : Dict = '''sshleifer/tinier_bart''' _a : Dict = AutoConfig.from_pretrained(_a ) _a : List[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , ) _a : int = PyTorchBenchmark(_a , configs=[config] ) _a : Any = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def __lowercase ( self ) -> List[str]: _a : List[str] = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: _a : Tuple = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_a , inference=_a , save_to_csv=_a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_a , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(_a , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(_a , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(_a , '''train_time.csv''' ) , env_info_csv_file=os.path.join(_a , '''env.csv''' ) , multi_process=_a , ) _a : List[str] = PyTorchBenchmark(_a ) benchmark.run() self.assertTrue(Path(os.path.join(_a , '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_a , '''train_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_a , '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_a , '''train_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_a , '''env.csv''' ) ).exists() ) def __lowercase ( self ) -> Optional[Any]: _a : Optional[int] = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(_a ): self.assertTrue(hasattr(_a , '''sequential''' ) ) self.assertTrue(hasattr(_a , '''cumulative''' ) ) self.assertTrue(hasattr(_a , '''current''' ) ) self.assertTrue(hasattr(_a , '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: _a : str = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_a , '''log.txt''' ) , log_print=_a , trace_memory_line_by_line=_a , multi_process=_a , ) _a : List[str] = PyTorchBenchmark(_a ) _a : Any = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(_a , '''log.txt''' ) ).exists() )
14
'''simple docstring''' import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Dict: """simple docstring""" snake_case_ : Tuple = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") snake_case_ : Union[str, Any] = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(__magic_name__ ): os.makedirs(__magic_name__ ) snake_case_ : str = model.state_dict() def to_tf_var_name(__magic_name__ ): for patt, repl in iter(__magic_name__ ): snake_case_ : List[str] = name.replace(__magic_name__ ,__magic_name__ ) return F'''bert/{name}''' def create_tf_var(__magic_name__ ,__magic_name__ ,__magic_name__ ): snake_case_ : List[Any] = tf.dtypes.as_dtype(tensor.dtype ) snake_case_ : Union[str, Any] = tf.get_variable(dtype=__magic_name__ ,shape=tensor.shape ,name=__magic_name__ ,initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(__magic_name__ ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: snake_case_ : Optional[int] = to_tf_var_name(__magic_name__ ) snake_case_ : Dict = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): snake_case_ : List[Any] = torch_tensor.T snake_case_ : Union[str, Any] = create_tf_var(tensor=__magic_name__ ,name=__magic_name__ ,session=__magic_name__ ) tf.keras.backend.set_value(__magic_name__ ,__magic_name__ ) snake_case_ : List[str] = session.run(__magic_name__ ) print(F'''Successfully created {tf_name}: {np.allclose(__magic_name__ ,__magic_name__ )}''' ) snake_case_ : Any = tf.train.Saver(tf.trainable_variables() ) saver.save(__magic_name__ ,os.path.join(__magic_name__ ,model_name.replace("-" ,"_" ) + ".ckpt" ) ) def __UpperCAmelCase ( __magic_name__=None )-> Optional[Any]: """simple docstring""" snake_case_ : Any = argparse.ArgumentParser() parser.add_argument("--model_name" ,type=__magic_name__ ,required=__magic_name__ ,help="model name e.g. bert-base-uncased" ) parser.add_argument( "--cache_dir" ,type=__magic_name__ ,default=__magic_name__ ,required=__magic_name__ ,help="Directory containing pytorch model" ) parser.add_argument("--pytorch_model_path" ,type=__magic_name__ ,required=__magic_name__ ,help="/path/to/<pytorch-model-name>.bin" ) parser.add_argument("--tf_cache_dir" ,type=__magic_name__ ,required=__magic_name__ ,help="Directory in which to save tensorflow model" ) snake_case_ : Optional[int] = parser.parse_args(__magic_name__ ) snake_case_ : Optional[int] = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name ,state_dict=torch.load(args.pytorch_model_path ) ,cache_dir=args.cache_dir ,) convert_pytorch_checkpoint_to_tf(model=__magic_name__ ,ckpt_dir=args.tf_cache_dir ,model_name=args.model_name ) if __name__ == "__main__": main()
653
0
import requests A : str = 'https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=' def UpperCamelCase ( __magic_name__ : str ) -> None: """simple docstring""" lowercase__ = requests.get(_NEWS_API + bbc_news_api_key ).json() # each article in the list is a dict for i, article in enumerate(bbc_news_page["""articles"""] , 1 ): print(f'''{i}.) {article["title"]}''' ) if __name__ == "__main__": fetch_bbc_news(bbc_news_api_key='<Your BBC News API key goes here>')
15
'''simple docstring''' from collections import deque from .hash_table import HashTable class A_ (a_ ): """simple docstring""" def __init__( self :List[str] , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Dict ) -> Union[str, Any]: '''simple docstring''' super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ ) def _A ( self :Optional[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Optional[int] = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(lowerCAmelCase__ ) snake_case_ : Tuple = self.values[key] def _A ( self :int ) -> Dict: '''simple docstring''' return ( sum(self.charge_factor - len(lowerCAmelCase__ ) for slot in self.values ) / self.size_table * self.charge_factor ) def _A ( self :str , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple=None ) -> Any: '''simple docstring''' if not ( len(self.values[key] ) == self.charge_factor and self.values.count(lowerCAmelCase__ ) == 0 ): return key return super()._collision_resolution(lowerCAmelCase__ , lowerCAmelCase__ )
653
0
import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() __A : Dict = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) __A : List[str] = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias')) rename_keys.append( (f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append( ( f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight', f'decoder.layers.{i}.encoder_attn.out_proj.weight', ) ) rename_keys.append( ( f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias', f'decoder.layers.{i}.encoder_attn.out_proj.bias', ) ) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias')) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ('input_proj.weight', 'input_projection.weight'), ('input_proj.bias', 'input_projection.bias'), ('query_embed.weight', 'query_position_embeddings.weight'), ('transformer.encoder.norm.weight', 'encoder.layernorm.weight'), ('transformer.encoder.norm.bias', 'encoder.layernorm.bias'), ('transformer.decoder.norm.weight', 'decoder.layernorm.weight'), ('transformer.decoder.norm.bias', 'decoder.layernorm.bias'), ('class_embed.weight', 'class_labels_classifier.weight'), ('class_embed.bias', 'class_labels_classifier.bias'), ('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'), ('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'), ('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'), ('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'), ('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'), ('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'), ] ) def __a ( A__ : str , A__ : Any , A__ : Optional[Any] ): SCREAMING_SNAKE_CASE = state_dict.pop(A__ ) SCREAMING_SNAKE_CASE = val def __a ( A__ : Tuple ): SCREAMING_SNAKE_CASE = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: SCREAMING_SNAKE_CASE = key.replace("backbone.0.body" , "backbone.conv_encoder.model" ) SCREAMING_SNAKE_CASE = value else: SCREAMING_SNAKE_CASE = value return new_state_dict def __a ( A__ : Optional[int] ): SCREAMING_SNAKE_CASE = "" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" ) SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE = in_proj_weight[:256, :] SCREAMING_SNAKE_CASE = in_proj_bias[:256] SCREAMING_SNAKE_CASE = in_proj_weight[256:512, :] SCREAMING_SNAKE_CASE = in_proj_bias[256:512] SCREAMING_SNAKE_CASE = in_proj_weight[-256:, :] SCREAMING_SNAKE_CASE = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" ) SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE = in_proj_weight[:256, :] SCREAMING_SNAKE_CASE = in_proj_bias[:256] SCREAMING_SNAKE_CASE = in_proj_weight[256:512, :] SCREAMING_SNAKE_CASE = in_proj_bias[256:512] SCREAMING_SNAKE_CASE = in_proj_weight[-256:, :] SCREAMING_SNAKE_CASE = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention SCREAMING_SNAKE_CASE = state_dict.pop( F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" ) SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" ) # next, add query, keys and values (in that order) of cross-attention to the state dict SCREAMING_SNAKE_CASE = in_proj_weight_cross_attn[:256, :] SCREAMING_SNAKE_CASE = in_proj_bias_cross_attn[:256] SCREAMING_SNAKE_CASE = in_proj_weight_cross_attn[256:512, :] SCREAMING_SNAKE_CASE = in_proj_bias_cross_attn[256:512] SCREAMING_SNAKE_CASE = in_proj_weight_cross_attn[-256:, :] SCREAMING_SNAKE_CASE = in_proj_bias_cross_attn[-256:] def __a ( A__ : Tuple , A__ : int ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image.size SCREAMING_SNAKE_CASE = max(A__ , A__ ) SCREAMING_SNAKE_CASE = 800 if "detection" in checkpoint_url else 1000 SCREAMING_SNAKE_CASE = target_max_size / current_max_size SCREAMING_SNAKE_CASE = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def __a ( A__ : List[Any] ): SCREAMING_SNAKE_CASE = F.to_tensor(A__ ) SCREAMING_SNAKE_CASE = F.normalize(A__ , mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ) return image @torch.no_grad() def __a ( A__ : Union[str, Any] , A__ : int , A__ : str ): logger.info("Converting model..." ) # load original state dict SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(A__ , map_location="cpu" ) # rename keys for src, dest in rename_keys: rename_key(A__ , A__ , A__ ) SCREAMING_SNAKE_CASE = rename_backbone_keys(A__ ) # query, key and value matrices need special treatment read_in_q_k_v(A__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them SCREAMING_SNAKE_CASE = "model." for key in state_dict.copy().keys(): if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): SCREAMING_SNAKE_CASE = state_dict.pop(A__ ) SCREAMING_SNAKE_CASE = val # create HuggingFace model and load state dict SCREAMING_SNAKE_CASE = TableTransformerConfig( backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: SCREAMING_SNAKE_CASE = 15 SCREAMING_SNAKE_CASE = 2 SCREAMING_SNAKE_CASE = {0: "table", 1: "table rotated"} SCREAMING_SNAKE_CASE = idalabel SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} else: SCREAMING_SNAKE_CASE = 125 SCREAMING_SNAKE_CASE = 6 SCREAMING_SNAKE_CASE = { 0: "table", 1: "table column", 2: "table row", 3: "table column header", 4: "table projected row header", 5: "table spanning cell", } SCREAMING_SNAKE_CASE = idalabel SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE = DetrImageProcessor( format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1000 ) SCREAMING_SNAKE_CASE = TableTransformerForObjectDetection(A__ ) model.load_state_dict(A__ ) model.eval() # verify our conversion SCREAMING_SNAKE_CASE = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png" SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=A__ ) SCREAMING_SNAKE_CASE = Image.open(A__ ).convert("RGB" ) SCREAMING_SNAKE_CASE = normalize(resize(A__ , A__ ) ).unsqueeze(0 ) SCREAMING_SNAKE_CASE = model(A__ ) if "detection" in checkpoint_url: SCREAMING_SNAKE_CASE = (1, 15, 3) SCREAMING_SNAKE_CASE = torch.tensor( [[-6.7_8_9_7, -1_6.9_9_8_5, 6.7_9_3_7], [-8.0_1_8_6, -2_2.2_1_9_2, 6.9_6_7_7], [-7.3_1_1_7, -2_1.0_7_0_8, 7.4_0_5_5]] ) SCREAMING_SNAKE_CASE = torch.tensor([[0.4_8_6_7, 0.1_7_6_7, 0.6_7_3_2], [0.6_7_1_8, 0.4_4_7_9, 0.3_8_3_0], [0.4_7_1_6, 0.1_7_6_0, 0.6_3_6_4]] ) else: SCREAMING_SNAKE_CASE = (1, 125, 7) SCREAMING_SNAKE_CASE = torch.tensor( [[-1_8.1_4_3_0, -8.3_2_1_4, 4.8_2_7_4], [-1_8.4_6_8_5, -7.1_3_6_1, -4.2_6_6_7], [-2_6.3_6_9_3, -9.3_4_2_9, -4.9_9_6_2]] ) SCREAMING_SNAKE_CASE = torch.tensor([[0.4_9_8_3, 0.5_5_9_5, 0.9_4_4_0], [0.4_9_1_6, 0.6_3_1_5, 0.5_9_5_4], [0.6_1_0_8, 0.8_6_3_7, 0.1_1_3_5]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , A__ , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , A__ , atol=1E-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." ) Path(A__ ).mkdir(exist_ok=A__ ) model.save_pretrained(A__ ) image_processor.save_pretrained(A__ ) if push_to_hub: # Push model to HF hub logger.info("Pushing model to the hub..." ) SCREAMING_SNAKE_CASE = ( "microsoft/table-transformer-detection" if "detection" in checkpoint_url else "microsoft/table-transformer-structure-recognition" ) model.push_to_hub(A__ ) image_processor.push_to_hub(A__ ) if __name__ == "__main__": __A : Optional[int] = argparse.ArgumentParser() parser.add_argument( '--checkpoint_url', default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth', type=str, choices=[ 'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth', 'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth', ], help='URL of the Table Transformer checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __A : Optional[Any] = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
16
'''simple docstring''' from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar __lowerCamelCase : Dict = TypeVar('''KEY''') __lowerCamelCase : int = TypeVar('''VAL''') @dataclass(frozen=a_ , slots=a_ ) class A_ (Generic[KEY, VAL] ): """simple docstring""" a__ = 42 a__ = 42 class A_ (_Item ): """simple docstring""" def __init__( self :List[Any] ) -> None: '''simple docstring''' super().__init__(lowerCAmelCase__ , lowerCAmelCase__ ) def __bool__( self :Optional[int] ) -> bool: '''simple docstring''' return False __lowerCamelCase : Dict = _DeletedItem() class A_ (MutableMapping[KEY, VAL] ): """simple docstring""" def __init__( self :Dict , lowerCAmelCase__ :int = 8 , lowerCAmelCase__ :float = 0.7_5 ) -> None: '''simple docstring''' snake_case_ : Any = initial_block_size snake_case_ : list[_Item | None] = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 snake_case_ : Tuple = capacity_factor snake_case_ : List[Any] = 0 def _A ( self :Tuple , lowerCAmelCase__ :KEY ) -> int: '''simple docstring''' return hash(lowerCAmelCase__ ) % len(self._buckets ) def _A ( self :Any , lowerCAmelCase__ :int ) -> int: '''simple docstring''' return (ind + 1) % len(self._buckets ) def _A ( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> bool: '''simple docstring''' snake_case_ : Optional[int] = self._buckets[ind] if not stored: snake_case_ : int = _Item(lowerCAmelCase__ , lowerCAmelCase__ ) self._len += 1 return True elif stored.key == key: snake_case_ : Optional[int] = _Item(lowerCAmelCase__ , lowerCAmelCase__ ) return True else: return False def _A ( self :int ) -> bool: '''simple docstring''' snake_case_ : Any = len(self._buckets ) * self._capacity_factor return len(self ) >= int(lowerCAmelCase__ ) def _A ( self :Any ) -> bool: '''simple docstring''' if len(self._buckets ) <= self._initial_block_size: return False snake_case_ : Optional[int] = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def _A ( self :Tuple , lowerCAmelCase__ :int ) -> None: '''simple docstring''' snake_case_ : Tuple = self._buckets snake_case_ : int = [None] * new_size snake_case_ : Any = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def _A ( self :Optional[int] ) -> None: '''simple docstring''' self._resize(len(self._buckets ) * 2 ) def _A ( self :str ) -> None: '''simple docstring''' self._resize(len(self._buckets ) // 2 ) def _A ( self :Optional[int] , lowerCAmelCase__ :KEY ) -> Iterator[int]: '''simple docstring''' snake_case_ : str = self._get_bucket_index(lowerCAmelCase__ ) for _ in range(len(self._buckets ) ): yield ind snake_case_ : List[Any] = self._get_next_ind(lowerCAmelCase__ ) def _A ( self :Union[str, Any] , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> None: '''simple docstring''' for ind in self._iterate_buckets(lowerCAmelCase__ ): if self._try_set(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): break def __setitem__( self :Optional[int] , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> None: '''simple docstring''' if self._is_full(): self._size_up() self._add_item(lowerCAmelCase__ , lowerCAmelCase__ ) def __delitem__( self :List[Any] , lowerCAmelCase__ :KEY ) -> None: '''simple docstring''' for ind in self._iterate_buckets(lowerCAmelCase__ ): snake_case_ : int = self._buckets[ind] if item is None: raise KeyError(lowerCAmelCase__ ) if item is _deleted: continue if item.key == key: snake_case_ : List[str] = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self :List[str] , lowerCAmelCase__ :KEY ) -> VAL: '''simple docstring''' for ind in self._iterate_buckets(lowerCAmelCase__ ): snake_case_ : Optional[Any] = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(lowerCAmelCase__ ) def __len__( self :Optional[Any] ) -> int: '''simple docstring''' return self._len def __iter__( self :List[Any] ) -> Iterator[KEY]: '''simple docstring''' yield from (item.key for item in self._buckets if item) def __repr__( self :Any ) -> str: '''simple docstring''' snake_case_ : Dict = " ,".join( F'''{item.key}: {item.val}''' for item in self._buckets if item ) return F'''HashMap({val_string})'''
653
0
from __future__ import annotations import pandas as pd def __SCREAMING_SNAKE_CASE ( a__ : list[int] ,a__ : list[int] ,a__ : int ) -> list[int]: __A : List[str] = [0] * no_of_processes __A : Dict = [0] * no_of_processes # Copy the burst time into remaining_time[] for i in range(a__ ): __A : Tuple = burst_time[i] __A : Dict = 0 __A : Any = 0 __A : List[Any] = 999999999 __A : List[str] = 0 __A : List[str] = False # Process until all processes are completed while complete != no_of_processes: for j in range(a__ ): if arrival_time[j] <= increment_time and remaining_time[j] > 0: if remaining_time[j] < minm: __A : List[Any] = remaining_time[j] __A : Union[str, Any] = j __A : int = True if not check: increment_time += 1 continue remaining_time[short] -= 1 __A : int = remaining_time[short] if minm == 0: __A : List[str] = 999999999 if remaining_time[short] == 0: complete += 1 __A : Optional[Any] = False # Find finish time of current process __A : Union[str, Any] = increment_time + 1 # Calculate waiting time __A : Tuple = finish_time - arrival_time[short] __A : Tuple = finar - burst_time[short] if waiting_time[short] < 0: __A : str = 0 # Increment time increment_time += 1 return waiting_time def __SCREAMING_SNAKE_CASE ( a__ : list[int] ,a__ : int ,a__ : list[int] ) -> list[int]: __A : Any = [0] * no_of_processes for i in range(a__ ): __A : Optional[Any] = burst_time[i] + waiting_time[i] return turn_around_time def __SCREAMING_SNAKE_CASE ( a__ : list[int] ,a__ : list[int] ,a__ : int ) -> None: __A : int = 0 __A : Any = 0 for i in range(a__ ): __A : Dict = total_waiting_time + waiting_time[i] __A : List[str] = total_turn_around_time + turn_around_time[i] print(f"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" ) print("""Average turn around time =""" ,total_turn_around_time / no_of_processes ) if __name__ == "__main__": print('''Enter how many process you want to analyze''') UpperCAmelCase_ : Tuple = int(input()) UpperCAmelCase_ : Dict = [0] * no_of_processes UpperCAmelCase_ : Union[str, Any] = [0] * no_of_processes UpperCAmelCase_ : Optional[int] = list(range(1, no_of_processes + 1)) for i in range(no_of_processes): print('''Enter the arrival time and burst time for process:--''' + str(i + 1)) UpperCAmelCase_ , UpperCAmelCase_ : Dict = map(int, input().split()) UpperCAmelCase_ : str = calculate_waitingtime(arrival_time, burst_time, no_of_processes) UpperCAmelCase_ : Union[str, Any] = burst_time UpperCAmelCase_ : Any = no_of_processes UpperCAmelCase_ : int = waiting_time UpperCAmelCase_ : Optional[Any] = calculate_turnaroundtime(bt, n, wt) calculate_average_times(waiting_time, turn_around_time, no_of_processes) UpperCAmelCase_ : Tuple = pd.DataFrame( list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)), columns=[ '''Process''', '''BurstTime''', '''ArrivalTime''', '''WaitingTime''', '''TurnAroundTime''', ], ) # Printing the dataFrame pd.set_option('''display.max_rows''', fcfs.shape[0] + 1) print(fcfs)
17
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase : str = logging.get_logger(__name__) __lowerCamelCase : Tuple = { '''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''', } class A_ (a_ ): """simple docstring""" a__ = '''gpt_bigcode''' a__ = ['''past_key_values'''] a__ = { '''hidden_size''': '''n_embd''', '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self :List[Any] , lowerCAmelCase__ :Any=50_257 , lowerCAmelCase__ :Dict=1_024 , lowerCAmelCase__ :Optional[int]=768 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :int=12 , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :List[str]="gelu_pytorch_tanh" , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Any=1E-5 , lowerCAmelCase__ :Union[str, Any]=0.0_2 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :int=50_256 , lowerCAmelCase__ :List[str]=50_256 , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :int=True , **lowerCAmelCase__ :Union[str, Any] , ) -> Any: '''simple docstring''' snake_case_ : List[Any] = vocab_size snake_case_ : Any = n_positions snake_case_ : Any = n_embd snake_case_ : Optional[Any] = n_layer snake_case_ : List[Any] = n_head snake_case_ : Tuple = n_inner snake_case_ : str = activation_function snake_case_ : Union[str, Any] = resid_pdrop snake_case_ : Optional[Any] = embd_pdrop snake_case_ : Any = attn_pdrop snake_case_ : List[Any] = layer_norm_epsilon snake_case_ : Tuple = initializer_range snake_case_ : int = scale_attn_weights snake_case_ : Union[str, Any] = use_cache snake_case_ : Dict = attention_softmax_in_fpaa snake_case_ : Any = scale_attention_softmax_in_fpaa snake_case_ : List[str] = multi_query snake_case_ : List[str] = bos_token_id snake_case_ : Any = eos_token_id super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
653
0
'''simple docstring''' from __future__ import annotations def __a(SCREAMING_SNAKE_CASE_ : list[int | float] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' if len(SCREAMING_SNAKE_CASE_ ) == 0: raise ValueError("find_max() arg is an empty sequence" ) if ( left >= len(SCREAMING_SNAKE_CASE_ ) or left < -len(SCREAMING_SNAKE_CASE_ ) or right >= len(SCREAMING_SNAKE_CASE_ ) or right < -len(SCREAMING_SNAKE_CASE_ ) ): raise IndexError("list index out of range" ) if left == right: return nums[left] _lowerCAmelCase = (left + right) >> 1 # the middle _lowerCAmelCase = find_max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # find max in range[left, mid] _lowerCAmelCase = find_max(SCREAMING_SNAKE_CASE_ , mid + 1 , SCREAMING_SNAKE_CASE_ ) # find max in range[mid + 1, right] return left_max if left_max >= right_max else right_max if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
18
'''simple docstring''' import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) __lowerCamelCase : Union[str, Any] = logging.getLogger(__name__) def __UpperCAmelCase ( __magic_name__ )-> str: """simple docstring""" snake_case_ : Dict = git.Repo(search_parent_directories=__magic_name__ ) snake_case_ : Optional[int] = { "repo_id": str(__magic_name__ ), "repo_sha": str(repo.head.object.hexsha ), "repo_branch": str(repo.active_branch ), } with open(os.path.join(__magic_name__ ,"git_log.json" ) ,"w" ) as f: json.dump(__magic_name__ ,__magic_name__ ,indent=4 ) def __UpperCAmelCase ( __magic_name__ )-> Tuple: """simple docstring""" if params.n_gpu <= 0: snake_case_ : Any = 0 snake_case_ : Any = -1 snake_case_ : Tuple = True snake_case_ : List[str] = False return assert torch.cuda.is_available() logger.info("Initializing GPUs" ) if params.n_gpu > 1: assert params.local_rank != -1 snake_case_ : Optional[int] = int(os.environ["WORLD_SIZE"] ) snake_case_ : int = int(os.environ["N_GPU_NODE"] ) snake_case_ : Any = int(os.environ["RANK"] ) # number of nodes / node ID snake_case_ : Dict = params.world_size // params.n_gpu_per_node snake_case_ : Optional[int] = params.global_rank // params.n_gpu_per_node snake_case_ : Tuple = True assert params.n_nodes == int(os.environ["N_NODES"] ) assert params.node_id == int(os.environ["NODE_RANK"] ) # local job (single GPU) else: assert params.local_rank == -1 snake_case_ : Optional[int] = 1 snake_case_ : str = 0 snake_case_ : List[Any] = 0 snake_case_ : int = 0 snake_case_ : Dict = 1 snake_case_ : Optional[Any] = 1 snake_case_ : str = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode snake_case_ : str = params.node_id == 0 and params.local_rank == 0 snake_case_ : str = params.n_nodes > 1 # summary snake_case_ : str = F'''--- Global rank: {params.global_rank} - ''' logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes ) logger.info(PREFIX + "Node ID : %i" % params.node_id ) logger.info(PREFIX + "Local rank : %i" % params.local_rank ) logger.info(PREFIX + "World size : %i" % params.world_size ) logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node ) logger.info(PREFIX + "Master : %s" % str(params.is_master ) ) logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) ) logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) ) logger.info(PREFIX + "Hostname : %s" % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info("Initializing PyTorch distributed" ) torch.distributed.init_process_group( init_method="env://" ,backend="nccl" ,) def __UpperCAmelCase ( __magic_name__ )-> Dict: """simple docstring""" np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
653
0
"""simple docstring""" _a = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ _a = [{"""type""": """code""", """content""": INSTALL_CONTENT}] _a = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
19
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import YolosImageProcessor class A_ (unittest.TestCase ): """simple docstring""" def __init__( self :Any , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict=7 , lowerCAmelCase__ :Union[str, Any]=3 , lowerCAmelCase__ :List[str]=30 , lowerCAmelCase__ :List[str]=400 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :str=True , lowerCAmelCase__ :Optional[int]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :Optional[int]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :str=True , lowerCAmelCase__ :int=1 / 255 , lowerCAmelCase__ :int=True , ) -> str: '''simple docstring''' snake_case_ : List[Any] = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333} snake_case_ : Dict = parent snake_case_ : Union[str, Any] = batch_size snake_case_ : Optional[Any] = num_channels snake_case_ : str = min_resolution snake_case_ : Dict = max_resolution snake_case_ : Optional[Any] = do_resize snake_case_ : str = size snake_case_ : Optional[int] = do_normalize snake_case_ : Dict = image_mean snake_case_ : Optional[int] = image_std snake_case_ : List[str] = do_rescale snake_case_ : Dict = rescale_factor snake_case_ : str = do_pad def _A ( self :List[Any] ) -> Dict: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def _A ( self :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str]=False ) -> str: '''simple docstring''' if not batched: snake_case_ : List[str] = image_inputs[0] if isinstance(lowerCAmelCase__ , Image.Image ): snake_case_, snake_case_ : int = image.size else: snake_case_, snake_case_ : Any = image.shape[1], image.shape[2] if w < h: snake_case_ : int = int(self.size["shortest_edge"] * h / w ) snake_case_ : List[Any] = self.size["shortest_edge"] elif w > h: snake_case_ : Optional[int] = self.size["shortest_edge"] snake_case_ : str = int(self.size["shortest_edge"] * w / h ) else: snake_case_ : Tuple = self.size["shortest_edge"] snake_case_ : Dict = self.size["shortest_edge"] else: snake_case_ : List[str] = [] for image in image_inputs: snake_case_, snake_case_ : Any = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) snake_case_ : str = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0] snake_case_ : int = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class A_ (a_ , unittest.TestCase ): """simple docstring""" a__ = YolosImageProcessor if is_vision_available() else None def _A ( self :Optional[Any] ) -> str: '''simple docstring''' snake_case_ : int = YolosImageProcessingTester(self ) @property def _A ( self :List[str] ) -> Any: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _A ( self :List[str] ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase__ , "image_mean" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "image_std" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) ) def _A ( self :List[Any] ) -> Any: '''simple docstring''' snake_case_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} ) self.assertEqual(image_processor.do_pad , lowerCAmelCase__ ) snake_case_ : Optional[int] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase__ ) self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} ) self.assertEqual(image_processor.do_pad , lowerCAmelCase__ ) def _A ( self :List[str] ) -> int: '''simple docstring''' pass def _A ( self :Optional[Any] ) -> Optional[Any]: '''simple docstring''' snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image ) # Test not batched input snake_case_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case_, snake_case_ : int = self.image_processor_tester.get_expected_values(lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ ) snake_case_ : Any = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _A ( self :Dict ) -> Dict: '''simple docstring''' snake_case_ : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , np.ndarray ) # Test not batched input snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case_, snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case_ : Tuple = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values snake_case_, snake_case_ : Dict = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _A ( self :Tuple ) -> Tuple: '''simple docstring''' snake_case_ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , torch.Tensor ) # Test not batched input snake_case_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case_ : List[Any] = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _A ( self :Tuple ) -> Dict: '''simple docstring''' snake_case_ : str = self.image_processing_class(**self.image_processor_dict ) snake_case_ : List[Any] = self.image_processing_class(do_resize=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ , do_rescale=lowerCAmelCase__ ) # create random PyTorch tensors snake_case_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , torch.Tensor ) # Test whether the method "pad" and calling the image processor return the same tensors snake_case_ : Tuple = image_processing_a.pad(lowerCAmelCase__ , return_tensors="pt" ) snake_case_ : Union[str, Any] = image_processing_a(lowerCAmelCase__ , return_tensors="pt" ) self.assertTrue( torch.allclose(encoded_images_with_method["pixel_values"] , encoded_images["pixel_values"] , atol=1E-4 ) ) @slow def _A ( self :str ) -> Any: '''simple docstring''' snake_case_ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: snake_case_ : int = json.loads(f.read() ) snake_case_ : Optional[int] = {"image_id": 39_769, "annotations": target} # encode them snake_case_ : Tuple = YolosImageProcessor.from_pretrained("hustvl/yolos-small" ) snake_case_ : Dict = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors="pt" ) # verify pixel values snake_case_ : Optional[int] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase__ ) snake_case_ : str = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) ) # verify area snake_case_ : Dict = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase__ ) ) # verify boxes snake_case_ : Optional[int] = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase__ ) snake_case_ : Any = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase__ , atol=1E-3 ) ) # verify image_id snake_case_ : Dict = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase__ ) ) # verify is_crowd snake_case_ : int = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase__ ) ) # verify class_labels snake_case_ : List[str] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase__ ) ) # verify orig_size snake_case_ : Any = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase__ ) ) # verify size snake_case_ : List[Any] = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase__ ) ) @slow def _A ( self :Dict ) -> int: '''simple docstring''' snake_case_ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: snake_case_ : Optional[int] = json.loads(f.read() ) snake_case_ : Tuple = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target} snake_case_ : Any = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them snake_case_ : int = YolosImageProcessor(format="coco_panoptic" ) snake_case_ : Union[str, Any] = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors="pt" ) # verify pixel values snake_case_ : Optional[int] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase__ ) snake_case_ : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) ) # verify area snake_case_ : int = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase__ ) ) # verify boxes snake_case_ : Optional[int] = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase__ ) snake_case_ : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase__ , atol=1E-3 ) ) # verify image_id snake_case_ : List[str] = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase__ ) ) # verify is_crowd snake_case_ : Dict = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase__ ) ) # verify class_labels snake_case_ : str = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase__ ) ) # verify masks snake_case_ : Any = 822_873 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCAmelCase__ ) # verify orig_size snake_case_ : int = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase__ ) ) # verify size snake_case_ : Union[str, Any] = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase__ ) )
653
0
import argparse import json import os import torch from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def _lowercase( __a : Union[str, Any] , __a : Union[str, Any] , __a : Tuple , __a : List[str] , __a : Tuple ): # Load configuration defined in the metadata file with open(__a ) as metadata_file: a__ =json.load(__a ) a__ =LukeConfig(use_entity_aware_attention=__a , **metadata['model_config'] ) # Load in the weights from the checkpoint_path a__ =torch.load(__a , map_location='cpu' ) # Load the entity vocab file a__ =load_entity_vocab(__a ) a__ =RobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] ) # Add special tokens to the token vocabulary for downstream tasks a__ =AddedToken('<ent>' , lstrip=__a , rstrip=__a ) a__ =AddedToken('<ent2>' , lstrip=__a , rstrip=__a ) tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" ) tokenizer.save_pretrained(__a ) with open(os.path.join(__a , LukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f: json.dump(__a , __a ) a__ =LukeTokenizer.from_pretrained(__a ) # Initialize the embeddings of the special tokens a__ =state_dict['embeddings.word_embeddings.weight'] a__ =word_emb[tokenizer.convert_tokens_to_ids(['@'] )[0]].unsqueeze(0 ) a__ =word_emb[tokenizer.convert_tokens_to_ids(['#'] )[0]].unsqueeze(0 ) a__ =torch.cat([word_emb, ent_emb, enta_emb] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: a__ =f"""encoder.layer.{layer_index}.attention.self.""" a__ =state_dict[prefix + matrix_name] a__ =state_dict[prefix + matrix_name] a__ =state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks a__ =state_dict['entity_embeddings.entity_embeddings.weight'] a__ =entity_emb[entity_vocab['[MASK]']] a__ =LukeModel(config=__a ).eval() a__ , a__ =model.load_state_dict(__a , strict=__a ) if not (len(__a ) == 1 and missing_keys[0] == "embeddings.position_ids"): raise ValueError(f"""Missing keys {', '.join(__a )}. Expected only missing embeddings.position_ids""" ) if not (all(key.startswith('entity_predictions' ) or key.startswith('lm_head' ) for key in unexpected_keys )): raise ValueError( 'Unexpected keys' f""" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}""" ) # Check outputs a__ =LukeTokenizer.from_pretrained(__a , task='entity_classification' ) a__ =( 'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the' ' new world number one avoid a humiliating second- round exit at Wimbledon .' ) a__ =(39, 42) a__ =tokenizer(__a , entity_spans=[span] , add_prefix_space=__a , return_tensors='pt' ) a__ =model(**__a ) # Verify word hidden states if model_size == "large": a__ =torch.Size((1, 42, 1024) ) a__ =torch.tensor( [[0.01_33, 0.08_65, 0.00_95], [0.30_93, -0.25_76, -0.74_18], [-0.17_20, -0.21_17, -0.28_69]] ) else: # base a__ =torch.Size((1, 42, 768) ) a__ =torch.tensor([[0.00_37, 0.13_68, -0.00_91], [0.10_99, 0.33_29, -0.10_95], [0.07_65, 0.53_35, 0.11_79]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": a__ =torch.Size((1, 1, 1024) ) a__ =torch.tensor([[0.04_66, -0.01_06, -0.01_79]] ) else: # base a__ =torch.Size((1, 1, 768) ) a__ =torch.tensor([[0.14_57, 0.10_44, 0.01_74]] ) if not (outputs.entity_last_hidden_state.shape != expected_shape): raise ValueError( f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is""" f""" {expected_shape}""" ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __a , atol=1e-4 ): raise ValueError # Finally, save our PyTorch model and tokenizer print('Saving PyTorch model to {}'.format(__a ) ) model.save_pretrained(__a ) def _lowercase( __a : int ): a__ ={} with open(__a , 'r' , encoding='utf-8' ) as f: for index, line in enumerate(__a ): a__ , a__ =line.rstrip().split('\t' ) a__ =index return entity_vocab if __name__ == "__main__": _lowerCAmelCase: Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.') parser.add_argument( '--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.' ) parser.add_argument( '--entity_vocab_path', default=None, type=str, help='Path to an entity_vocab.tsv file, containing the entity vocabulary.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.' ) parser.add_argument( '--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.' ) _lowerCAmelCase: List[Any] = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
20
'''simple docstring''' def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str: """simple docstring""" if not isinstance(__magic_name__ ,__magic_name__ ): raise ValueError("iterations must be defined as integers" ) if not isinstance(__magic_name__ ,__magic_name__ ) or not number >= 1: raise ValueError( "starting number must be\n and integer and be more than 0" ) if not iterations >= 1: raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" ) snake_case_ : Dict = "" while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(__magic_name__ ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
653
0
import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration UpperCAmelCase_ : List[str] = 500000 UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = os.path.split(__file__) UpperCAmelCase_ : Any = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json")) @get_duration def lowerCAmelCase_ ( lowerCamelCase , **lowerCamelCase ): __magic_name__ : Tuple =dataset.map(**lowerCamelCase ) @get_duration def lowerCAmelCase_ ( lowerCamelCase , **lowerCamelCase ): __magic_name__ : List[str] =dataset.filter(**lowerCamelCase ) def lowerCAmelCase_ ( ): __magic_name__ : Optional[int] ={"""num examples""": SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: __magic_name__ : str =datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} ) __magic_name__ : str =generate_example_dataset( os.path.join(lowerCamelCase , """dataset.arrow""" ) , lowerCamelCase , num_examples=lowerCamelCase ) __magic_name__ : Dict =transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowerCamelCase ) def tokenize(lowerCamelCase ): return tokenizer(examples["""text"""] ) __magic_name__ : Union[str, Any] =map(lowerCamelCase ) __magic_name__ : List[Any] =map(lowerCamelCase , batched=lowerCamelCase ) __magic_name__ : Optional[int] =map(lowerCamelCase , function=lambda lowerCamelCase : None , batched=lowerCamelCase ) with dataset.formatted_as(type="""numpy""" ): __magic_name__ : Optional[Any] =map(lowerCamelCase , function=lambda lowerCamelCase : None , batched=lowerCamelCase ) with dataset.formatted_as(type="""pandas""" ): __magic_name__ : int =map(lowerCamelCase , function=lambda lowerCamelCase : None , batched=lowerCamelCase ) with dataset.formatted_as(type="""torch""" , columns="""numbers""" ): __magic_name__ : List[Any] =map(lowerCamelCase , function=lambda lowerCamelCase : None , batched=lowerCamelCase ) with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ): __magic_name__ : Optional[Any] =map(lowerCamelCase , function=lambda lowerCamelCase : None , batched=lowerCamelCase ) __magic_name__ : Optional[Any] =map(lowerCamelCase , function=lowerCamelCase , batched=lowerCamelCase ) __magic_name__ : Optional[int] =filter(lowerCamelCase ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(lowerCamelCase , """wb""" ) as f: f.write(json.dumps(lowerCamelCase ).encode("""utf-8""" ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
21
'''simple docstring''' import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __lowerCamelCase : Tuple = 16 __lowerCamelCase : Optional[int] = 32 def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = 16 )-> int: """simple docstring""" snake_case_ : Optional[int] = AutoTokenizer.from_pretrained("bert-base-cased" ) snake_case_ : str = load_dataset("glue" ,"mrpc" ) def tokenize_function(__magic_name__ ): # max_length=None => use the model max length (it's actually the default) snake_case_ : Dict = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=__magic_name__ ,max_length=__magic_name__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): snake_case_ : Any = datasets.map( __magic_name__ ,batched=__magic_name__ ,remove_columns=["idx", "sentence1", "sentence2"] ,) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library snake_case_ : List[Any] = tokenized_datasets.rename_column("label" ,"labels" ) def collate_fn(__magic_name__ ): # On TPU it's best to pad everything to the same length or training will be very slow. snake_case_ : int = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": snake_case_ : Tuple = 16 elif accelerator.mixed_precision != "no": snake_case_ : str = 8 else: snake_case_ : Optional[Any] = None return tokenizer.pad( __magic_name__ ,padding="longest" ,max_length=__magic_name__ ,pad_to_multiple_of=__magic_name__ ,return_tensors="pt" ,) # Instantiate dataloaders. snake_case_ : str = DataLoader( tokenized_datasets["train"] ,shuffle=__magic_name__ ,collate_fn=__magic_name__ ,batch_size=__magic_name__ ) snake_case_ : Optional[Any] = DataLoader( tokenized_datasets["validation"] ,shuffle=__magic_name__ ,collate_fn=__magic_name__ ,batch_size=__magic_name__ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders __lowerCamelCase : Optional[Any] = mocked_dataloaders # noqa: F811 def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Dict: """simple docstring""" if os.environ.get("TESTING_MOCKED_DATALOADERS" ,__magic_name__ ) == "1": snake_case_ : List[str] = 2 # Initialize accelerator snake_case_ : Union[str, Any] = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs snake_case_ : List[str] = config["lr"] snake_case_ : Dict = int(config["num_epochs"] ) snake_case_ : Dict = int(config["seed"] ) snake_case_ : Optional[int] = int(config["batch_size"] ) snake_case_ : Dict = evaluate.load("glue" ,"mrpc" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=__magic_name__ ) def inner_training_loop(__magic_name__ ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(__magic_name__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) snake_case_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" ,return_dict=__magic_name__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). snake_case_ : Optional[int] = model.to(accelerator.device ) # Instantiate optimizer snake_case_ : List[Any] = AdamW(params=model.parameters() ,lr=__magic_name__ ) snake_case_, snake_case_ : int = get_dataloaders(__magic_name__ ,__magic_name__ ) # Instantiate scheduler snake_case_ : Tuple = get_linear_schedule_with_warmup( optimizer=__magic_name__ ,num_warmup_steps=100 ,num_training_steps=(len(__magic_name__ ) * num_epochs) ,) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : Tuple = accelerator.prepare( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ) # Now we train the model for epoch in range(__magic_name__ ): model.train() for step, batch in enumerate(__magic_name__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) snake_case_ : int = model(**__magic_name__ ) snake_case_ : Any = outputs.loss accelerator.backward(__magic_name__ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__magic_name__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): snake_case_ : Union[str, Any] = model(**__magic_name__ ) snake_case_ : List[str] = outputs.logits.argmax(dim=-1 ) snake_case_, snake_case_ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=__magic_name__ ,references=__magic_name__ ,) snake_case_ : Tuple = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' ,__magic_name__ ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def __UpperCAmelCase ( )-> List[str]: """simple docstring""" snake_case_ : List[Any] = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" ,type=__magic_name__ ,default=__magic_name__ ,choices=["no", "fp16", "bf16", "fp8"] ,help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." ,) parser.add_argument("--cpu" ,action="store_true" ,help="If passed, will train on the CPU." ) snake_case_ : str = parser.parse_args() snake_case_ : Optional[int] = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(__magic_name__ ,__magic_name__ ) if __name__ == "__main__": main()
653
0
'''simple docstring''' import argparse import torch from datasets import load_dataset from donut import DonutModel from transformers import ( DonutImageProcessor, DonutProcessor, DonutSwinConfig, DonutSwinModel, MBartConfig, MBartForCausalLM, VisionEncoderDecoderModel, XLMRobertaTokenizerFast, ) def snake_case_ (UpperCamelCase : Optional[Any] ): '''simple docstring''' _a = model.config _a = DonutSwinConfig( image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , ) _a = MBartConfig( is_decoder=UpperCamelCase , is_encoder_decoder=UpperCamelCase , add_cross_attention=UpperCamelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len( model.decoder.tokenizer ) , scale_embedding=UpperCamelCase , add_final_layer_norm=UpperCamelCase , ) return encoder_config, decoder_config def snake_case_ (UpperCamelCase : Tuple ): '''simple docstring''' if "encoder.model" in name: _a = name.replace('''encoder.model''' , '''encoder''' ) if "decoder.model" in name: _a = name.replace('''decoder.model''' , '''decoder''' ) if "patch_embed.proj" in name: _a = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: _a = name.replace('''patch_embed.norm''' , '''embeddings.norm''' ) if name.startswith('''encoder''' ): if "layers" in name: _a = '''encoder.''' + name if "attn.proj" in name: _a = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name and "mask" not in name: _a = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: _a = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: _a = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: _a = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: _a = name.replace('''mlp.fc2''' , '''output.dense''' ) if name == "encoder.norm.weight": _a = '''encoder.layernorm.weight''' if name == "encoder.norm.bias": _a = '''encoder.layernorm.bias''' return name def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : Optional[int] ): '''simple docstring''' for key in orig_state_dict.copy().keys(): _a = orig_state_dict.pop(UpperCamelCase ) if "qkv" in key: _a = key.split('''.''' ) _a = int(key_split[3] ) _a = int(key_split[5] ) _a = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: _a = val[:dim, :] _a = val[dim : dim * 2, :] _a = val[-dim:, :] else: _a = val[:dim] _a = val[dim : dim * 2] _a = val[-dim:] elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]: # HuggingFace implementation doesn't use attn_mask buffer # and model doesn't use final LayerNorms for the encoder pass else: _a = val return orig_state_dict def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Tuple=None , UpperCamelCase : List[str]=False ): '''simple docstring''' _a = DonutModel.from_pretrained(UpperCamelCase ).eval() # load HuggingFace model _a , _a = get_configs(UpperCamelCase ) _a = DonutSwinModel(UpperCamelCase ) _a = MBartForCausalLM(UpperCamelCase ) _a = VisionEncoderDecoderModel(encoder=UpperCamelCase , decoder=UpperCamelCase ) model.eval() _a = original_model.state_dict() _a = convert_state_dict(UpperCamelCase , UpperCamelCase ) model.load_state_dict(UpperCamelCase ) # verify results on scanned document _a = load_dataset('''hf-internal-testing/example-documents''' ) _a = dataset['''test'''][0]['''image'''].convert('''RGB''' ) _a = XLMRobertaTokenizerFast.from_pretrained(UpperCamelCase , from_slow=UpperCamelCase ) _a = DonutImageProcessor( do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] ) _a = DonutProcessor(UpperCamelCase , UpperCamelCase ) _a = processor(UpperCamelCase , return_tensors='''pt''' ).pixel_values if model_name == "naver-clova-ix/donut-base-finetuned-docvqa": _a = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>''' _a = '''When is the coffee break?''' _a = task_prompt.replace('''{user_input}''' , UpperCamelCase ) elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip": _a = '''<s_rvlcdip>''' elif model_name in [ "naver-clova-ix/donut-base-finetuned-cord-v1", "naver-clova-ix/donut-base-finetuned-cord-v1-2560", ]: _a = '''<s_cord>''' elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2": _a = '''s_cord-v2>''' elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket": _a = '''<s_zhtrainticket>''' elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]: # use a random prompt _a = '''hello world''' else: raise ValueError('''Model name not supported''' ) _a = original_model.decoder.tokenizer(UpperCamelCase , add_special_tokens=UpperCamelCase , return_tensors='''pt''' )[ '''input_ids''' ] _a = original_model.encoder.model.patch_embed(UpperCamelCase ) _a , _a = model.encoder.embeddings(UpperCamelCase ) assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) # verify encoder hidden states _a = original_model.encoder(UpperCamelCase ) _a = model.encoder(UpperCamelCase ).last_hidden_state assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-2 ) # verify decoder hidden states _a = original_model(UpperCamelCase , UpperCamelCase , UpperCamelCase ).logits _a = model(UpperCamelCase , decoder_input_ids=UpperCamelCase ).logits assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f'Saving model and processor to {pytorch_dump_folder_path}' ) model.save_pretrained(UpperCamelCase ) processor.save_pretrained(UpperCamelCase ) if push_to_hub: model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' ) processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' ) if __name__ == "__main__": _snake_case : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='naver-clova-ix/donut-base-finetuned-docvqa', required=False, type=str, help='Name of the original model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, required=False, type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model and processor to the 🤗 hub.', ) _snake_case : Union[str, Any] = parser.parse_args() convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
22
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class A_ (a_ ): """simple docstring""" a__ = '''facebook/bart-large-mnli''' a__ = ( '''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which ''' '''should be the text to classify, and `labels`, which should be the list of labels to use for classification. ''' '''It returns the most likely label in the list of provided `labels` for the input text.''' ) a__ = '''text_classifier''' a__ = AutoTokenizer a__ = AutoModelForSequenceClassification a__ = ['''text''', ['''text''']] a__ = ['''text'''] def _A ( self :List[str] ) -> Union[str, Any]: '''simple docstring''' super().setup() snake_case_ : Optional[int] = self.model.config snake_case_ : Any = -1 for idx, label in config.idalabel.items(): if label.lower().startswith("entail" ): snake_case_ : Union[str, Any] = int(lowerCAmelCase__ ) if self.entailment_id == -1: raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." ) def _A ( self :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :Tuple ) -> int: '''simple docstring''' snake_case_ : Tuple = labels return self.pre_processor( [text] * len(lowerCAmelCase__ ) , [F'''This example is {label}''' for label in labels] , return_tensors="pt" , padding="max_length" , ) def _A ( self :Any , lowerCAmelCase__ :str ) -> List[str]: '''simple docstring''' snake_case_ : Optional[int] = outputs.logits snake_case_ : Tuple = torch.argmax(logits[:, 2] ).item() return self._labels[label_id]
653
0
import os import re import warnings from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_ta import TaTokenizer else: snake_case__ : str = None snake_case__ : Optional[int] = logging.get_logger(__name__) snake_case__ : Dict = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} snake_case__ : Any = { """vocab_file""": { """t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""", """t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""", """t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""", """t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""", """t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""", }, """tokenizer_file""": { """t5-small""": """https://huggingface.co/t5-small/resolve/main/tokenizer.json""", """t5-base""": """https://huggingface.co/t5-base/resolve/main/tokenizer.json""", """t5-large""": """https://huggingface.co/t5-large/resolve/main/tokenizer.json""", """t5-3b""": """https://huggingface.co/t5-3b/resolve/main/tokenizer.json""", """t5-11b""": """https://huggingface.co/t5-11b/resolve/main/tokenizer.json""", }, } # TODO(PVP) - this should be removed in Transformers v5 snake_case__ : List[Any] = { """t5-small""": 5_1_2, """t5-base""": 5_1_2, """t5-large""": 5_1_2, """t5-3b""": 5_1_2, """t5-11b""": 5_1_2, } class _a ( UpperCAmelCase__ ): """simple docstring""" A_ = VOCAB_FILES_NAMES A_ = PRETRAINED_VOCAB_FILES_MAP A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ = ["""input_ids""", """attention_mask"""] A_ = TaTokenizer A_ = [] def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="</s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase=100 , _UpperCAmelCase=None , **_UpperCAmelCase , ) -> Dict: # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: UpperCamelCase_ = [f"""<extra_id_{i}>""" for i in range(_UpperCAmelCase )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra special tokens UpperCamelCase_ = len(set(filter(lambda _UpperCAmelCase : bool('extra_id_' in str(_UpperCAmelCase ) ) , _UpperCAmelCase ) ) ) if extra_tokens != extra_ids: raise ValueError( f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are""" ' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids' ' tokens' ) super().__init__( _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , extra_ids=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , ) UpperCamelCase_ = vocab_file UpperCamelCase_ = False if not self.vocab_file else True UpperCamelCase_ = extra_ids @staticmethod def _UpperCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes: UpperCamelCase_ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( 'This tokenizer was incorrectly instantiated with a model max length of' f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this""" ' behavior is kept to avoid breaking backwards compatibility when padding/encoding with' ' `truncation is True`.\n- Be aware that you SHOULD NOT rely on' f""" {pretrained_model_name_or_path} automatically truncating your input to""" f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences""" f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with""" ' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please' ' instantiate this tokenizer with `model_max_length` set to your preferred value.' , _UpperCAmelCase , ) return max_model_length def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(_UpperCAmelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCamelCase_ = os.path.join( _UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ): copyfile(self.vocab_file , _UpperCAmelCase ) logger.info(f"""Copy vocab file to {out_vocab_file}""" ) return (out_vocab_file,) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> List[int]: UpperCamelCase_ = token_ids_a + [self.eos_token_id] if token_ids_a is None: return self.prefix_tokens + token_ids_a else: UpperCamelCase_ = token_ids_a + [self.eos_token_id] return self.prefix_tokens + token_ids_a + token_ids_a def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> List[int]: UpperCamelCase_ = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def _UpperCAmelCase ( self ) -> List[str]: return list( set(filter(lambda _UpperCAmelCase : bool(re.search(R'<extra_id_\d+>' , _UpperCAmelCase ) ) is not None , self.additional_special_tokens ) ) ) def _UpperCAmelCase ( self ) -> Optional[int]: return [self.convert_tokens_to_ids(_UpperCAmelCase ) for token in self.get_sentinel_tokens()]
23
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) __lowerCamelCase : Any = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Any = ['''ViTFeatureExtractor'''] __lowerCamelCase : Any = ['''ViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Optional[Any] = [ '''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ViTForImageClassification''', '''ViTForMaskedImageModeling''', '''ViTModel''', '''ViTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Union[str, Any] = [ '''TFViTForImageClassification''', '''TFViTModel''', '''TFViTPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Tuple = [ '''FlaxViTForImageClassification''', '''FlaxViTModel''', '''FlaxViTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys __lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
653
0
'''simple docstring''' import math from numpy import inf from scipy.integrate import quad def _UpperCamelCase (_lowerCamelCase : float )-> float: '''simple docstring''' if num <= 0: raise ValueError('''math domain error''' ) return quad(_lowerCamelCase , 0 , _lowerCamelCase , args=(_lowerCamelCase) )[0] def _UpperCamelCase (_lowerCamelCase : float , _lowerCamelCase : float )-> float: '''simple docstring''' return math.pow(_lowerCamelCase , z - 1 ) * math.exp(-x ) if __name__ == "__main__": from doctest import testmod testmod()
24
'''simple docstring''' import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class A_ : """simple docstring""" def __init__( self :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str]=2 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :List[str]=7 , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :List[str]=99 , lowerCAmelCase__ :Union[str, Any]=36 , lowerCAmelCase__ :Dict=3 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Optional[int]=37 , lowerCAmelCase__ :Dict="gelu" , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Optional[int]=512 , lowerCAmelCase__ :Union[str, Any]=16 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :Any=0.0_2 , lowerCAmelCase__ :Dict=6 , lowerCAmelCase__ :Optional[int]=6 , lowerCAmelCase__ :Any=3 , lowerCAmelCase__ :int=4 , lowerCAmelCase__ :int=None , lowerCAmelCase__ :Any=1_000 , ) -> Any: '''simple docstring''' snake_case_ : Optional[int] = parent snake_case_ : Union[str, Any] = batch_size snake_case_ : Optional[int] = num_channels snake_case_ : List[Any] = image_size snake_case_ : Optional[int] = patch_size snake_case_ : Union[str, Any] = text_seq_length snake_case_ : Dict = is_training snake_case_ : Optional[Any] = use_input_mask snake_case_ : Union[str, Any] = use_token_type_ids snake_case_ : Dict = use_labels snake_case_ : List[str] = vocab_size snake_case_ : Optional[Any] = hidden_size snake_case_ : List[str] = num_hidden_layers snake_case_ : int = num_attention_heads snake_case_ : List[str] = intermediate_size snake_case_ : str = hidden_act snake_case_ : Optional[Any] = hidden_dropout_prob snake_case_ : Optional[int] = attention_probs_dropout_prob snake_case_ : Union[str, Any] = max_position_embeddings snake_case_ : List[Any] = type_vocab_size snake_case_ : Union[str, Any] = type_sequence_label_size snake_case_ : List[Any] = initializer_range snake_case_ : Union[str, Any] = coordinate_size snake_case_ : int = shape_size snake_case_ : Tuple = num_labels snake_case_ : List[Any] = num_choices snake_case_ : List[str] = scope snake_case_ : Dict = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) snake_case_ : str = text_seq_length snake_case_ : Optional[int] = (image_size // patch_size) ** 2 + 1 snake_case_ : str = self.text_seq_length + self.image_seq_length def _A ( self :Union[str, Any] ) -> Tuple: '''simple docstring''' snake_case_ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) snake_case_ : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: snake_case_ : Optional[Any] = bbox[i, j, 3] snake_case_ : Any = bbox[i, j, 1] snake_case_ : Tuple = t if bbox[i, j, 2] < bbox[i, j, 0]: snake_case_ : str = bbox[i, j, 2] snake_case_ : Dict = bbox[i, j, 0] snake_case_ : Union[str, Any] = t snake_case_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ : Dict = None if self.use_input_mask: snake_case_ : str = random_attention_mask([self.batch_size, self.text_seq_length] ) snake_case_ : Any = None if self.use_token_type_ids: snake_case_ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) snake_case_ : Union[str, Any] = None snake_case_ : str = None if self.use_labels: snake_case_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) snake_case_ : str = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def _A ( self :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] ) -> Optional[Any]: '''simple docstring''' snake_case_ : Tuple = LayoutLMvaModel(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() # text + image snake_case_ : Tuple = model(lowerCAmelCase__ , pixel_values=lowerCAmelCase__ ) snake_case_ : Optional[int] = model( lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ ) snake_case_ : Optional[int] = model(lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ ) snake_case_ : int = model(lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only snake_case_ : List[Any] = model(lowerCAmelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only snake_case_ : Union[str, Any] = model(pixel_values=lowerCAmelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def _A ( self :str , lowerCAmelCase__ :str , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple ) -> List[Any]: '''simple docstring''' snake_case_ : str = self.num_labels snake_case_ : List[Any] = LayoutLMvaForSequenceClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case_ : Optional[int] = model( lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] ) -> str: '''simple docstring''' snake_case_ : Optional[int] = self.num_labels snake_case_ : str = LayoutLMvaForTokenClassification(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case_ : List[Any] = model( lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def _A ( self :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :str ) -> Tuple: '''simple docstring''' snake_case_ : List[str] = LayoutLMvaForQuestionAnswering(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case_ : List[Any] = model( lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _A ( self :int ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Dict = self.prepare_config_and_inputs() ( ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ) : Optional[Any] = config_and_inputs snake_case_ : Tuple = { "input_ids": input_ids, "bbox": bbox, "pixel_values": pixel_values, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class A_ (a_ , a_ , unittest.TestCase ): """simple docstring""" a__ = False a__ = False a__ = False a__ = ( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) a__ = ( {'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel} if is_torch_available() else {} ) def _A ( self :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] ) -> List[str]: '''simple docstring''' return True def _A ( self :List[Any] ) -> str: '''simple docstring''' snake_case_ : Tuple = LayoutLMvaModelTester(self ) snake_case_ : Optional[int] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 ) def _A ( self :Tuple , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any]=False ) -> Any: '''simple docstring''' snake_case_ : List[str] = copy.deepcopy(lowerCAmelCase__ ) if model_class in get_values(lowerCAmelCase__ ): snake_case_ : Optional[Any] = { k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous() if isinstance(lowerCAmelCase__ , torch.Tensor ) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(lowerCAmelCase__ ): snake_case_ : Union[str, Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ ) elif model_class in get_values(lowerCAmelCase__ ): snake_case_ : List[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ ) snake_case_ : str = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ ) elif model_class in [ *get_values(lowerCAmelCase__ ), ]: snake_case_ : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ ) elif model_class in [ *get_values(lowerCAmelCase__ ), ]: snake_case_ : List[str] = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCAmelCase__ , ) return inputs_dict def _A ( self :Any ) -> Any: '''simple docstring''' self.config_tester.run_common_tests() def _A ( self :int ) -> int: '''simple docstring''' snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def _A ( self :Any ) -> Dict: '''simple docstring''' snake_case_ : Dict = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: snake_case_ : int = type self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def _A ( self :int ) -> str: '''simple docstring''' snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ ) def _A ( self :List[Any] ) -> Optional[Any]: '''simple docstring''' snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ ) def _A ( self :int ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ ) @slow def _A ( self :Tuple ) -> List[Any]: '''simple docstring''' for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ : str = LayoutLMvaModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) def __UpperCAmelCase ( )-> List[str]: """simple docstring""" snake_case_ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch class A_ (unittest.TestCase ): """simple docstring""" @cached_property def _A ( self :Union[str, Any] ) -> Optional[Any]: '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase__ ) if is_vision_available() else None @slow def _A ( self :Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Optional[int] = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(lowerCAmelCase__ ) snake_case_ : Optional[Any] = self.default_image_processor snake_case_ : Optional[int] = prepare_img() snake_case_ : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).pixel_values.to(lowerCAmelCase__ ) snake_case_ : List[str] = torch.tensor([[1, 2]] ) snake_case_ : Any = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 ) # forward pass snake_case_ : Any = model( input_ids=input_ids.to(lowerCAmelCase__ ) , bbox=bbox.to(lowerCAmelCase__ ) , pixel_values=pixel_values.to(lowerCAmelCase__ ) , ) # verify the logits snake_case_ : Optional[Any] = torch.Size((1, 199, 768) ) self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase__ ) snake_case_ : str = torch.tensor( [[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(lowerCAmelCase__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
653
0
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, get_velocity_common, ) @flax.struct.dataclass class _UpperCamelCase : '''simple docstring''' lowerCamelCase__ =42 # setable values lowerCamelCase__ =42 lowerCamelCase__ =42 lowerCamelCase__ =None @classmethod def __UpperCamelCase ( cls : Tuple , a : CommonSchedulerState , a : jnp.ndarray , a : jnp.ndarray ) -> Dict: """simple docstring""" return cls(common=a , init_noise_sigma=a , timesteps=a ) @dataclass class _UpperCamelCase ( __A ): '''simple docstring''' lowerCamelCase__ =42 class _UpperCamelCase ( __A , __A ): '''simple docstring''' lowerCamelCase__ =[e.name for e in FlaxKarrasDiffusionSchedulers] lowerCamelCase__ =42 @property def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" return True @register_to_config def __init__( self : str , a : int = 1000 , a : float = 0.0001 , a : float = 0.02 , a : str = "linear" , a : Optional[jnp.ndarray] = None , a : str = "fixed_small" , a : bool = True , a : str = "epsilon" , a : jnp.dtype = jnp.floataa , ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = dtype def __UpperCamelCase ( self : Any , a : Optional[CommonSchedulerState] = None ) -> DDPMSchedulerState: """simple docstring""" if common is None: SCREAMING_SNAKE_CASE : Optional[int] = CommonSchedulerState.create(self ) # standard deviation of the initial noise distribution SCREAMING_SNAKE_CASE : Tuple = jnp.array(1.0 , dtype=self.dtype ) SCREAMING_SNAKE_CASE : List[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1] return DDPMSchedulerState.create( common=a , init_noise_sigma=a , timesteps=a , ) def __UpperCamelCase ( self : List[Any] , a : DDPMSchedulerState , a : jnp.ndarray , a : Optional[int] = None ) -> jnp.ndarray: """simple docstring""" return sample def __UpperCamelCase ( self : Optional[int] , a : DDPMSchedulerState , a : int , a : Tuple = () ) -> DDPMSchedulerState: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = self.config.num_train_timesteps // num_inference_steps # creates integer timesteps by multiplying by ratio # rounding to avoid issues when num_inference_step is power of 3 SCREAMING_SNAKE_CASE : Dict = (jnp.arange(0 , a ) * step_ratio).round()[::-1] return state.replace( num_inference_steps=a , timesteps=a , ) def __UpperCamelCase ( self : Optional[Any] , a : DDPMSchedulerState , a : List[str] , a : Any=None , a : Dict=None ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = state.common.alphas_cumprod[t] SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample SCREAMING_SNAKE_CASE : List[str] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t] if variance_type is None: SCREAMING_SNAKE_CASE : int = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small": SCREAMING_SNAKE_CASE : str = jnp.clip(a , a_min=1e-20 ) # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": SCREAMING_SNAKE_CASE : Any = jnp.log(jnp.clip(a , a_min=1e-20 ) ) elif variance_type == "fixed_large": SCREAMING_SNAKE_CASE : int = state.common.betas[t] elif variance_type == "fixed_large_log": # Glide max_log SCREAMING_SNAKE_CASE : str = jnp.log(state.common.betas[t] ) elif variance_type == "learned": return predicted_variance elif variance_type == "learned_range": SCREAMING_SNAKE_CASE : Any = variance SCREAMING_SNAKE_CASE : int = state.common.betas[t] SCREAMING_SNAKE_CASE : Dict = (predicted_variance + 1) / 2 SCREAMING_SNAKE_CASE : List[Any] = frac * max_log + (1 - frac) * min_log return variance def __UpperCamelCase ( self : Any , a : DDPMSchedulerState , a : jnp.ndarray , a : int , a : jnp.ndarray , a : Optional[jax.random.KeyArray] = None , a : bool = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = timestep if key is None: SCREAMING_SNAKE_CASE : str = jax.random.PRNGKey(0 ) if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]: SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.split(a , sample.shape[1] , axis=1 ) else: SCREAMING_SNAKE_CASE : int = None # 1. compute alphas, betas SCREAMING_SNAKE_CASE : Tuple = state.common.alphas_cumprod[t] SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) SCREAMING_SNAKE_CASE : str = 1 - alpha_prod_t SCREAMING_SNAKE_CASE : str = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": SCREAMING_SNAKE_CASE : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": SCREAMING_SNAKE_CASE : Union[str, Any] = model_output elif self.config.prediction_type == "v_prediction": SCREAMING_SNAKE_CASE : List[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` " " for the FlaxDDPMScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: SCREAMING_SNAKE_CASE : Tuple = jnp.clip(a , -1 , 1 ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf SCREAMING_SNAKE_CASE : Optional[int] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t SCREAMING_SNAKE_CASE : int = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf SCREAMING_SNAKE_CASE : Dict = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise def random_variance(): SCREAMING_SNAKE_CASE : Optional[int] = jax.random.split(a , num=1 ) SCREAMING_SNAKE_CASE : Dict = jax.random.normal(a , shape=model_output.shape , dtype=self.dtype ) return (self._get_variance(a , a , predicted_variance=a ) ** 0.5) * noise SCREAMING_SNAKE_CASE : str = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) ) SCREAMING_SNAKE_CASE : List[str] = pred_prev_sample + variance if not return_dict: return (pred_prev_sample, state) return FlaxDDPMSchedulerOutput(prev_sample=a , state=a ) def __UpperCamelCase ( self : Union[str, Any] , a : DDPMSchedulerState , a : jnp.ndarray , a : jnp.ndarray , a : jnp.ndarray , ) -> jnp.ndarray: """simple docstring""" return add_noise_common(state.common , a , a , a ) def __UpperCamelCase ( self : str , a : DDPMSchedulerState , a : jnp.ndarray , a : jnp.ndarray , a : jnp.ndarray , ) -> jnp.ndarray: """simple docstring""" return get_velocity_common(state.common , a , a , a ) def __len__( self : int ) -> Any: """simple docstring""" return self.config.num_train_timesteps
25
'''simple docstring''' import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def __UpperCAmelCase ( __magic_name__ )-> int: # picklable for multiprocessing """simple docstring""" return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def __UpperCAmelCase ( )-> List[str]: """simple docstring""" with parallel_backend("spark" ): assert ParallelBackendConfig.backend_name == "spark" snake_case_ : str = [1, 2, 3] with pytest.raises(__magic_name__ ): with parallel_backend("unsupported backend" ): map_nested(__magic_name__ ,__magic_name__ ,num_proc=2 ) with pytest.raises(__magic_name__ ): with parallel_backend("unsupported backend" ): map_nested(__magic_name__ ,__magic_name__ ,num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize("num_proc" ,[2, -1] ) def __UpperCAmelCase ( __magic_name__ )-> List[Any]: """simple docstring""" snake_case_ : Optional[Any] = [1, 2] snake_case_ : Union[str, Any] = {"a": 1, "b": 2} snake_case_ : str = {"a": [1, 2], "b": [3, 4]} snake_case_ : List[str] = {"a": {"1": 1}, "b": 2} snake_case_ : Optional[int] = {"a": 1, "b": 2, "c": 3, "d": 4} snake_case_ : Tuple = [2, 3] snake_case_ : str = {"a": 2, "b": 3} snake_case_ : Dict = {"a": [2, 3], "b": [4, 5]} snake_case_ : List[Any] = {"a": {"1": 2}, "b": 3} snake_case_ : str = {"a": 2, "b": 3, "c": 4, "d": 5} with parallel_backend("spark" ): assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
653
0
'''simple docstring''' def _a ( _lowerCamelCase ) -> int: """simple docstring""" return 1 if digit in (0, 1) else (digit * factorial(digit - 1 )) def _a ( _lowerCamelCase ) -> bool: """simple docstring""" __snake_case : str = 0 __snake_case : Any = number while duplicate > 0: __snake_case , __snake_case : Optional[int] = divmod(_lowerCamelCase , 10 ) fact_sum += factorial(_lowerCamelCase ) return fact_sum == number if __name__ == "__main__": print("Program to check whether a number is a Krisnamurthy Number or not.") __UpperCamelCase = int(input("Enter number: ").strip()) print( f"""{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number.""" )
26
'''simple docstring''' from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase : Dict = logging.get_logger(__name__) # TODO Update this __lowerCamelCase : int = { '''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''', # See all ESM models at https://huggingface.co/models?filter=esm } class A_ (a_ ): """simple docstring""" a__ = '''esm''' def __init__( self :Dict , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :str=None , lowerCAmelCase__ :int=768 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :Dict=12 , lowerCAmelCase__ :Union[str, Any]=3_072 , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :List[Any]=1_026 , lowerCAmelCase__ :int=0.0_2 , lowerCAmelCase__ :Optional[int]=1E-1_2 , lowerCAmelCase__ :List[str]="absolute" , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :List[str]=False , lowerCAmelCase__ :List[Any]=False , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :str=None , **lowerCAmelCase__ :Union[str, Any] , ) -> Any: '''simple docstring''' super().__init__(pad_token_id=lowerCAmelCase__ , mask_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) snake_case_ : str = vocab_size snake_case_ : str = hidden_size snake_case_ : List[str] = num_hidden_layers snake_case_ : List[str] = num_attention_heads snake_case_ : Any = intermediate_size snake_case_ : Optional[Any] = hidden_dropout_prob snake_case_ : Tuple = attention_probs_dropout_prob snake_case_ : List[Any] = max_position_embeddings snake_case_ : str = initializer_range snake_case_ : List[Any] = layer_norm_eps snake_case_ : str = position_embedding_type snake_case_ : Optional[int] = use_cache snake_case_ : str = emb_layer_norm_before snake_case_ : List[Any] = token_dropout snake_case_ : str = is_folding_model if is_folding_model: if esmfold_config is None: logger.info("No esmfold_config supplied for folding model, using default values." ) snake_case_ : Optional[Any] = EsmFoldConfig() elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): snake_case_ : Union[str, Any] = EsmFoldConfig(**lowerCAmelCase__ ) snake_case_ : Optional[Any] = esmfold_config if vocab_list is None: logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" ) snake_case_ : List[str] = get_default_vocab_list() else: snake_case_ : List[str] = vocab_list else: snake_case_ : List[Any] = None snake_case_ : int = None if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , lowerCAmelCase__ ): raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" ) def _A ( self :Optional[int] ) -> List[Any]: '''simple docstring''' snake_case_ : Any = super().to_dict() if isinstance(self.esmfold_config , lowerCAmelCase__ ): snake_case_ : Optional[int] = self.esmfold_config.to_dict() return output @dataclass class A_ : """simple docstring""" a__ = None a__ = True a__ = False a__ = False a__ = False a__ = 0 a__ = True a__ = False a__ = 128 a__ = None def _A ( self :Dict ) -> int: '''simple docstring''' if self.trunk is None: snake_case_ : Dict = TrunkConfig() elif isinstance(self.trunk , lowerCAmelCase__ ): snake_case_ : int = TrunkConfig(**self.trunk ) def _A ( self :Optional[int] ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Tuple = asdict(self ) snake_case_ : Optional[int] = self.trunk.to_dict() return output @dataclass class A_ : """simple docstring""" a__ = 48 a__ = 1024 a__ = 128 a__ = 32 a__ = 32 a__ = 32 a__ = 0 a__ = 0 a__ = False a__ = 4 a__ = 128 a__ = None def _A ( self :List[Any] ) -> Union[str, Any]: '''simple docstring''' if self.structure_module is None: snake_case_ : Optional[int] = StructureModuleConfig() elif isinstance(self.structure_module , lowerCAmelCase__ ): snake_case_ : List[str] = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( "`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got" F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( "`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got" F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' ) snake_case_ : Dict = self.sequence_state_dim // self.sequence_head_width snake_case_ : Optional[int] = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( "`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got" F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( "`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got" F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' ) if self.dropout >= 0.4: raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' ) def _A ( self :Tuple ) -> List[str]: '''simple docstring''' snake_case_ : int = asdict(self ) snake_case_ : Dict = self.structure_module.to_dict() return output @dataclass class A_ : """simple docstring""" a__ = 384 a__ = 128 a__ = 16 a__ = 128 a__ = 12 a__ = 4 a__ = 8 a__ = 0.1 a__ = 8 a__ = 1 a__ = 2 a__ = 7 a__ = 10 a__ = 1E-8 a__ = 1E5 def _A ( self :Dict ) -> Dict: '''simple docstring''' return asdict(self ) def __UpperCAmelCase ( )-> int: """simple docstring""" return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
653
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __A : List[str] = { "configuration_conditional_detr": [ "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConditionalDetrConfig", "ConditionalDetrOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Tuple = ["ConditionalDetrFeatureExtractor"] __A : Optional[Any] = ["ConditionalDetrImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = [ "CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", "ConditionalDetrForObjectDetection", "ConditionalDetrForSegmentation", "ConditionalDetrModel", "ConditionalDetrPreTrainedModel", ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys __A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
27
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase : Any = { '''configuration_longformer''': [ '''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongformerConfig''', '''LongformerOnnxConfig''', ], '''tokenization_longformer''': ['''LongformerTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Any = ['''LongformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = [ '''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LongformerForMaskedLM''', '''LongformerForMultipleChoice''', '''LongformerForQuestionAnswering''', '''LongformerForSequenceClassification''', '''LongformerForTokenClassification''', '''LongformerModel''', '''LongformerPreTrainedModel''', '''LongformerSelfAttention''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Any = [ '''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLongformerForMaskedLM''', '''TFLongformerForMultipleChoice''', '''TFLongformerForQuestionAnswering''', '''TFLongformerForSequenceClassification''', '''TFLongformerForTokenClassification''', '''TFLongformerModel''', '''TFLongformerPreTrainedModel''', '''TFLongformerSelfAttention''', ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys __lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
653
0
'''simple docstring''' from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: float = 1 / sqrt(2 ) ): """simple docstring""" SCREAMING_SNAKE_CASE : int = tau * frequency / samplerate SCREAMING_SNAKE_CASE : Tuple = sin(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = cos(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Tuple = _sin / (2 * q_factor) SCREAMING_SNAKE_CASE : int = (1 - _cos) / 2 SCREAMING_SNAKE_CASE : List[str] = 1 - _cos SCREAMING_SNAKE_CASE : List[Any] = 1 + alpha SCREAMING_SNAKE_CASE : Union[str, Any] = -2 * _cos SCREAMING_SNAKE_CASE : Optional[int] = 1 - alpha SCREAMING_SNAKE_CASE : Any = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: float = 1 / sqrt(2 ) ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = tau * frequency / samplerate SCREAMING_SNAKE_CASE : Any = sin(__UpperCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = cos(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Dict = _sin / (2 * q_factor) SCREAMING_SNAKE_CASE : str = (1 + _cos) / 2 SCREAMING_SNAKE_CASE : int = -1 - _cos SCREAMING_SNAKE_CASE : Tuple = 1 + alpha SCREAMING_SNAKE_CASE : Union[str, Any] = -2 * _cos SCREAMING_SNAKE_CASE : Tuple = 1 - alpha SCREAMING_SNAKE_CASE : Any = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: float = 1 / sqrt(2 ) ): """simple docstring""" SCREAMING_SNAKE_CASE : str = tau * frequency / samplerate SCREAMING_SNAKE_CASE : Tuple = sin(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Dict = cos(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Dict = _sin / (2 * q_factor) SCREAMING_SNAKE_CASE : List[str] = _sin / 2 SCREAMING_SNAKE_CASE : List[str] = 0 SCREAMING_SNAKE_CASE : int = -ba SCREAMING_SNAKE_CASE : List[str] = 1 + alpha SCREAMING_SNAKE_CASE : Optional[int] = -2 * _cos SCREAMING_SNAKE_CASE : Dict = 1 - alpha SCREAMING_SNAKE_CASE : str = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: float = 1 / sqrt(2 ) ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = tau * frequency / samplerate SCREAMING_SNAKE_CASE : Optional[Any] = sin(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = cos(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Dict = _sin / (2 * q_factor) SCREAMING_SNAKE_CASE : Dict = 1 - alpha SCREAMING_SNAKE_CASE : int = -2 * _cos SCREAMING_SNAKE_CASE : Tuple = 1 + alpha SCREAMING_SNAKE_CASE : Optional[Any] = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] ,[ba, ba, ba] ) return filt def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: float ,__UpperCamelCase: float = 1 / sqrt(2 ) ,): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = tau * frequency / samplerate SCREAMING_SNAKE_CASE : Tuple = sin(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = cos(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = _sin / (2 * q_factor) SCREAMING_SNAKE_CASE : List[str] = 10 ** (gain_db / 40) SCREAMING_SNAKE_CASE : Union[str, Any] = 1 + alpha * big_a SCREAMING_SNAKE_CASE : str = -2 * _cos SCREAMING_SNAKE_CASE : List[Any] = 1 - alpha * big_a SCREAMING_SNAKE_CASE : List[str] = 1 + alpha / big_a SCREAMING_SNAKE_CASE : Optional[int] = -2 * _cos SCREAMING_SNAKE_CASE : Any = 1 - alpha / big_a SCREAMING_SNAKE_CASE : List[str] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: float ,__UpperCamelCase: float = 1 / sqrt(2 ) ,): """simple docstring""" SCREAMING_SNAKE_CASE : int = tau * frequency / samplerate SCREAMING_SNAKE_CASE : List[Any] = sin(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = cos(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = _sin / (2 * q_factor) SCREAMING_SNAKE_CASE : Tuple = 10 ** (gain_db / 40) SCREAMING_SNAKE_CASE : str = (big_a + 1) - (big_a - 1) * _cos SCREAMING_SNAKE_CASE : Optional[Any] = (big_a + 1) + (big_a - 1) * _cos SCREAMING_SNAKE_CASE : Optional[Any] = (big_a - 1) - (big_a + 1) * _cos SCREAMING_SNAKE_CASE : str = (big_a - 1) + (big_a + 1) * _cos SCREAMING_SNAKE_CASE : Optional[Any] = 2 * sqrt(__UpperCamelCase ) * alpha SCREAMING_SNAKE_CASE : Optional[Any] = big_a * (pmc + aaa) SCREAMING_SNAKE_CASE : str = 2 * big_a * mpc SCREAMING_SNAKE_CASE : Dict = big_a * (pmc - aaa) SCREAMING_SNAKE_CASE : str = ppmc + aaa SCREAMING_SNAKE_CASE : Union[str, Any] = -2 * pmpc SCREAMING_SNAKE_CASE : List[Any] = ppmc - aaa SCREAMING_SNAKE_CASE : List[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: float ,__UpperCamelCase: float = 1 / sqrt(2 ) ,): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = tau * frequency / samplerate SCREAMING_SNAKE_CASE : Any = sin(__UpperCamelCase ) SCREAMING_SNAKE_CASE : List[str] = cos(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = _sin / (2 * q_factor) SCREAMING_SNAKE_CASE : List[Any] = 10 ** (gain_db / 40) SCREAMING_SNAKE_CASE : Dict = (big_a + 1) - (big_a - 1) * _cos SCREAMING_SNAKE_CASE : str = (big_a + 1) + (big_a - 1) * _cos SCREAMING_SNAKE_CASE : Optional[int] = (big_a - 1) - (big_a + 1) * _cos SCREAMING_SNAKE_CASE : Optional[int] = (big_a - 1) + (big_a + 1) * _cos SCREAMING_SNAKE_CASE : Dict = 2 * sqrt(__UpperCamelCase ) * alpha SCREAMING_SNAKE_CASE : List[str] = big_a * (ppmc + aaa) SCREAMING_SNAKE_CASE : Tuple = -2 * big_a * pmpc SCREAMING_SNAKE_CASE : Optional[Any] = big_a * (ppmc - aaa) SCREAMING_SNAKE_CASE : Optional[Any] = pmc + aaa SCREAMING_SNAKE_CASE : Dict = 2 * mpc SCREAMING_SNAKE_CASE : int = pmc - aaa SCREAMING_SNAKE_CASE : Tuple = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt
28
'''simple docstring''' import inspect import tempfile import unittest from huggingface_hub import hf_hub_download from transformers import is_torch_available from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin __lowerCamelCase : Optional[int] = 1E-4 if is_torch_available(): import torch from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder @require_torch class A_ : """simple docstring""" def __init__( self :Tuple , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[Any]=16 , lowerCAmelCase__ :Any=13 , lowerCAmelCase__ :Optional[Any]=7 , lowerCAmelCase__ :str=14 , lowerCAmelCase__ :Union[str, Any]=10 , lowerCAmelCase__ :Tuple=19 , lowerCAmelCase__ :Optional[Any]=5 , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Any=16 , lowerCAmelCase__ :str=2 , lowerCAmelCase__ :List[Any]=4 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Optional[int]=[1, 2, 3, 4, 5] , lowerCAmelCase__ :str=25 , lowerCAmelCase__ :Optional[Any]=5 , ) -> Dict: '''simple docstring''' snake_case_ : List[str] = d_model snake_case_ : Dict = parent snake_case_ : Optional[Any] = batch_size snake_case_ : Optional[Any] = prediction_length snake_case_ : str = context_length snake_case_ : Tuple = cardinality snake_case_ : List[str] = num_time_features snake_case_ : Optional[Any] = lags_sequence snake_case_ : Union[str, Any] = embedding_dimension snake_case_ : Optional[Any] = is_training snake_case_ : Optional[Any] = hidden_size snake_case_ : Any = num_hidden_layers snake_case_ : Optional[Any] = num_attention_heads snake_case_ : int = intermediate_size snake_case_ : Any = hidden_act snake_case_ : Union[str, Any] = hidden_dropout_prob snake_case_ : Dict = attention_probs_dropout_prob snake_case_ : List[str] = context_length snake_case_ : Any = prediction_length + label_length snake_case_ : Union[str, Any] = label_length snake_case_ : List[Any] = moving_average snake_case_ : str = autocorrelation_factor def _A ( self :List[Any] ) -> Any: '''simple docstring''' return AutoformerConfig( d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , ) def _A ( self :Union[str, Any] , lowerCAmelCase__ :Optional[Any] ) -> Dict: '''simple docstring''' snake_case_ : Any = config.context_length + max(config.lags_sequence ) snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, 1] , config.cardinality[0] ) snake_case_ : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] ) snake_case_ : List[Any] = floats_tensor([self.batch_size, _past_length] ) snake_case_ : Dict = floats_tensor([self.batch_size, _past_length] ) > 0.5 # decoder inputs snake_case_ : List[Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] ) snake_case_ : List[Any] = floats_tensor([self.batch_size, config.prediction_length] ) snake_case_ : int = { "past_values": past_values, "static_categorical_features": static_categorical_features, "past_time_features": past_time_features, "past_observed_mask": past_observed_mask, "future_time_features": future_time_features, "future_values": future_values, } return inputs_dict def _A ( self :Dict ) -> Tuple: '''simple docstring''' snake_case_ : str = self.get_config() snake_case_ : int = self.prepare_autoformer_inputs_dict(lowerCAmelCase__ ) return config, inputs_dict def _A ( self :Optional[int] ) -> Dict: '''simple docstring''' snake_case_, snake_case_ : Union[str, Any] = self.prepare_config_and_inputs() return config, inputs_dict def _A ( self :Tuple , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int] ) -> List[str]: '''simple docstring''' snake_case_ : Dict = AutoformerModel(config=lowerCAmelCase__ ).to(lowerCAmelCase__ ).eval() snake_case_ : Optional[int] = model(**lowerCAmelCase__ ) snake_case_ : Any = outputs.encoder_last_hidden_state snake_case_ : Dict = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : Optional[Any] = model.get_encoder() encoder.save_pretrained(lowerCAmelCase__ ) snake_case_ : Tuple = AutoformerEncoder.from_pretrained(lowerCAmelCase__ ).to(lowerCAmelCase__ ) snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : List[str] = model.create_network_inputs(**lowerCAmelCase__ ) snake_case_, snake_case_ : Optional[int] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] ) snake_case_ : List[Any] = torch.cat( (transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , ) snake_case_ : Optional[int] = encoder(inputs_embeds=lowerCAmelCase__ )[0] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 ) snake_case_ : Any = ( torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 ) .unsqueeze(1 ) .repeat(1 , config.prediction_length , 1 ) ) snake_case_ : List[str] = torch.zeros( [transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , ) snake_case_ : Optional[Any] = torch.cat( ( torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) snake_case_ : Any = torch.cat( ( torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : List[Any] = model.get_decoder() decoder.save_pretrained(lowerCAmelCase__ ) snake_case_ : int = AutoformerDecoder.from_pretrained(lowerCAmelCase__ ).to(lowerCAmelCase__ ) snake_case_ : Tuple = decoder( trend=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 ) @require_torch class A_ (a_ , a_ , unittest.TestCase ): """simple docstring""" a__ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else () a__ = (AutoformerForPrediction,) if is_torch_available() else () a__ = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {} a__ = False a__ = False a__ = False a__ = False a__ = False a__ = False def _A ( self :Dict ) -> int: '''simple docstring''' snake_case_ : Tuple = AutoformerModelTester(self ) snake_case_ : str = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ ) def _A ( self :List[str] ) -> Tuple: '''simple docstring''' self.config_tester.run_common_tests() def _A ( self :List[Any] ) -> Union[str, Any]: '''simple docstring''' snake_case_, snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: snake_case_ : List[Any] = model_class(lowerCAmelCase__ ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCAmelCase__ ) snake_case_, snake_case_ : str = model_class.from_pretrained(lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ ) self.assertEqual(info["missing_keys"] , [] ) def _A ( self :Optional[int] ) -> Tuple: '''simple docstring''' snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*lowerCAmelCase__ ) @unittest.skip(reason="Model has no tokens embeddings" ) def _A ( self :str ) -> str: '''simple docstring''' pass def _A ( self :Optional[Any] ) -> Union[str, Any]: '''simple docstring''' snake_case_ : List[Any] = inspect.signature(getattr(lowerCAmelCase__ , "forward" ) ) # The main input is the name of the argument after `self` snake_case_ : Dict = list(model_signature.parameters.keys() )[1] self.assertEqual(AutoformerModel.main_input_name , lowerCAmelCase__ ) def _A ( self :Optional[Any] ) -> Optional[int]: '''simple docstring''' snake_case_, snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : Tuple = model_class(lowerCAmelCase__ ) snake_case_ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ : Optional[Any] = [*signature.parameters.keys()] snake_case_ : Dict = [ "past_values", "past_time_features", "past_observed_mask", "static_categorical_features", "static_real_features", "future_values", "future_time_features", ] if model.__class__.__name__ in ["AutoformerForPrediction"]: expected_arg_names.append("future_observed_mask" ) expected_arg_names.extend( [ "decoder_attention_mask", "head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs", "past_key_values", "output_hidden_states", "output_attentions", "use_cache", "return_dict", ] ) self.assertListEqual(arg_names[: len(lowerCAmelCase__ )] , lowerCAmelCase__ ) def _A ( self :int ) -> Any: '''simple docstring''' snake_case_, snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Union[str, Any] = True snake_case_ : List[str] = getattr(self.model_tester , "seq_length" , lowerCAmelCase__ ) snake_case_ : Dict = getattr(self.model_tester , "decoder_seq_length" , lowerCAmelCase__ ) snake_case_ : Union[str, Any] = getattr(self.model_tester , "encoder_seq_length" , lowerCAmelCase__ ) snake_case_ : Union[str, Any] = getattr(self.model_tester , "d_model" , lowerCAmelCase__ ) snake_case_ : Dict = getattr(self.model_tester , "num_attention_heads" , lowerCAmelCase__ ) snake_case_ : Optional[int] = d_model // num_attention_heads for model_class in self.all_model_classes: snake_case_ : Any = True snake_case_ : Any = False snake_case_ : Dict = True snake_case_ : List[str] = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): snake_case_ : Tuple = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) snake_case_ : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] snake_case_ : Optional[int] = True snake_case_ : Any = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): snake_case_ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) snake_case_ : str = outputs.encoder_attentions self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) snake_case_ : Tuple = len(lowerCAmelCase__ ) snake_case_ : List[str] = 7 if "last_hidden_state" in outputs: correct_outlen += 1 if "trend" in outputs: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned if "loss" in outputs: correct_outlen += 1 if "params" in outputs: correct_outlen += 1 self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) # decoder attentions snake_case_ : Optional[int] = outputs.decoder_attentions self.assertIsInstance(lowerCAmelCase__ , (list, tuple) ) self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # cross attentions snake_case_ : List[Any] = outputs.cross_attentions self.assertIsInstance(lowerCAmelCase__ , (list, tuple) ) self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # Check attention is always last and order is fine snake_case_ : Optional[int] = True snake_case_ : List[Any] = True snake_case_ : Union[str, Any] = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): snake_case_ : List[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) self.assertEqual(out_len + 2 , len(lowerCAmelCase__ ) ) snake_case_ : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) @is_flaky() def _A ( self :Any ) -> Optional[Any]: '''simple docstring''' super().test_retain_grad_hidden_states_attentions() def __UpperCAmelCase ( __magic_name__="train-batch.pt" )-> int: """simple docstring""" snake_case_ : List[str] = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" ,filename=__magic_name__ ,repo_type="dataset" ) snake_case_ : List[str] = torch.load(__magic_name__ ,map_location=__magic_name__ ) return batch @require_torch @slow class A_ (unittest.TestCase ): """simple docstring""" def _A ( self :str ) -> Any: '''simple docstring''' snake_case_ : Optional[int] = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ ) snake_case_ : List[str] = prepare_batch() with torch.no_grad(): snake_case_ : int = model( past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0] snake_case_ : Optional[int] = torch.Size( (64, model.config.prediction_length + model.config.label_length, model.config.feature_size) ) self.assertEqual(output.shape , lowerCAmelCase__ ) snake_case_ : Optional[Any] = torch.tensor( [[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=lowerCAmelCase__ ) self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) ) def _A ( self :Any ) -> str: '''simple docstring''' snake_case_ : str = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ ) snake_case_ : Optional[Any] = prepare_batch("val-batch.pt" ) with torch.no_grad(): snake_case_ : Tuple = model( past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state snake_case_ : Dict = torch.Size((64, model.config.context_length, model.config.d_model) ) self.assertEqual(output.shape , lowerCAmelCase__ ) snake_case_ : Any = torch.tensor( [[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=lowerCAmelCase__ ) self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) ) def _A ( self :List[str] ) -> Any: '''simple docstring''' snake_case_ : List[Any] = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ ) snake_case_ : str = prepare_batch("val-batch.pt" ) with torch.no_grad(): snake_case_ : Optional[Any] = model.generate( static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , ) snake_case_ : List[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) ) self.assertEqual(outputs.sequences.shape , lowerCAmelCase__ ) snake_case_ : Dict = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=lowerCAmelCase__ ) snake_case_ : Optional[Any] = outputs.sequences.mean(dim=1 ) self.assertTrue(torch.allclose(mean_prediction[0, -3:] , lowerCAmelCase__ , rtol=1E-1 ) )
653
0
"""simple docstring""" import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device A_ = False class __lowerCamelCase ( unittest.TestCase ): pass @slow @require_torch_gpu class __lowerCamelCase ( unittest.TestCase ): def UpperCAmelCase__ ( self ): lowerCamelCase_ = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' ) pipe.to(UpperCAmelCase ) pipe.set_progress_bar_config(disable=UpperCAmelCase ) lowerCamelCase_ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) lowerCamelCase_ = torch.manual_seed(0 ) lowerCamelCase_ = pipe( image=UpperCAmelCase , generator=UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images lowerCamelCase_ = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) lowerCamelCase_ = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
29
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A_ (a_ , unittest.TestCase ): """simple docstring""" a__ = RobertaTokenizer a__ = RobertaTokenizerFast a__ = True a__ = {'''cls_token''': '''<s>'''} def _A ( self :Optional[int] ) -> List[Any]: '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt snake_case_ : List[Any] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] snake_case_ : Tuple = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) ) snake_case_ : List[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] snake_case_ : int = {"unk_token": "<unk>"} snake_case_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowerCAmelCase__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowerCAmelCase__ ) ) def _A ( self :Optional[Any] , **lowerCAmelCase__ :str ) -> str: '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def _A ( self :Any , **lowerCAmelCase__ :Tuple ) -> Optional[int]: '''simple docstring''' kwargs.update(self.special_tokens_map ) return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def _A ( self :Optional[int] , lowerCAmelCase__ :str ) -> Optional[int]: '''simple docstring''' snake_case_ : int = "lower newer" snake_case_ : Tuple = "lower newer" return input_text, output_text def _A ( self :Tuple ) -> Union[str, Any]: '''simple docstring''' snake_case_ : str = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) snake_case_ : Dict = "lower newer" snake_case_ : int = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"] snake_case_ : str = tokenizer.tokenize(lowerCAmelCase__ ) # , add_prefix_space=True) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) snake_case_ : List[str] = tokens + [tokenizer.unk_token] snake_case_ : Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ ) def _A ( self :Any ) -> str: '''simple docstring''' snake_case_ : List[str] = self.get_tokenizer() self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=lowerCAmelCase__ ) , [0, 31_414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=lowerCAmelCase__ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , ) @slow def _A ( self :str ) -> List[str]: '''simple docstring''' snake_case_ : Tuple = self.tokenizer_class.from_pretrained("roberta-base" ) snake_case_ : List[str] = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase__ ) snake_case_ : List[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase__ ) snake_case_ : List[str] = tokenizer.encode( "sequence builders" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ ) snake_case_ : Union[str, Any] = tokenizer.encode( "sequence builders" , "multi-sequence build" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ ) snake_case_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ ) snake_case_ : Any = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def _A ( self :List[Any] ) -> Any: '''simple docstring''' snake_case_ : Optional[Any] = self.get_tokenizer() snake_case_ : Tuple = "Encode this sequence." snake_case_ : Optional[Any] = tokenizer.byte_encoder[" ".encode("utf-8" )[0]] # Testing encoder arguments snake_case_ : str = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ ) snake_case_ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ ) snake_case_ : List[Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ ) snake_case_ : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) tokenizer.add_special_tokens({"bos_token": "<s>"} ) snake_case_ : str = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) snake_case_ : int = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ ) # Testing spaces after special tokens snake_case_ : List[Any] = "<mask>" tokenizer.add_special_tokens( {"mask_token": AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )} ) # mask token has a left space snake_case_ : str = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) snake_case_ : List[str] = "Encode <mask> sequence" snake_case_ : List[Any] = "Encode <mask>sequence" snake_case_ : Tuple = tokenizer.encode(lowerCAmelCase__ ) snake_case_ : int = encoded.index(lowerCAmelCase__ ) snake_case_ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) snake_case_ : List[str] = tokenizer.encode(lowerCAmelCase__ ) snake_case_ : Union[str, Any] = encoded.index(lowerCAmelCase__ ) snake_case_ : int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def _A ( self :Tuple ) -> Tuple: '''simple docstring''' pass def _A ( self :int ) -> Optional[Any]: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case_ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) snake_case_ : List[Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) snake_case_ : Any = "A, <mask> AllenNLP sentence." snake_case_ : str = tokenizer_r.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ ) snake_case_ : int = tokenizer_p.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) snake_case_ : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) snake_case_ : str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) def _A ( self :int ) -> Tuple: '''simple docstring''' for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): snake_case_ : str = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) snake_case_ : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) snake_case_ : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["add_prefix_space"] , lowerCAmelCase__ ) self.assertEqual(post_processor_state["add_prefix_space"] , lowerCAmelCase__ ) self.assertEqual(post_processor_state["trim_offsets"] , lowerCAmelCase__ ) def _A ( self :List[str] ) -> List[Any]: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case_ : str = "hello" # `hello` is a token in the vocabulary of `pretrained_name` snake_case_ : Tuple = F'''{text_of_1_token} {text_of_1_token}''' snake_case_ : Any = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , ) snake_case_ : List[str] = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) snake_case_ : Tuple = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , ) snake_case_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , ) snake_case_ : Dict = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) snake_case_ : str = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , ) snake_case_ : Tuple = F''' {text}''' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) snake_case_ : Dict = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ) + 1, 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , ) snake_case_ : Any = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) snake_case_ : Any = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , ) snake_case_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) snake_case_ : Optional[int] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
653
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) __a = { 'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'], 'processing_trocr': ['TrOCRProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST', 'TrOCRForCausalLM', 'TrOCRPreTrainedModel', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
'''simple docstring''' import math def __UpperCAmelCase ( __magic_name__ )-> bool: """simple docstring""" snake_case_ : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(__magic_name__ ) def __UpperCAmelCase ( __magic_name__ = 1 / 1_2345 )-> int: """simple docstring""" snake_case_ : Any = 0 snake_case_ : int = 0 snake_case_ : Union[str, Any] = 3 while True: snake_case_ : Any = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(__magic_name__ ): snake_case_ : Optional[Any] = int(__magic_name__ ) total_partitions += 1 if check_partition_perfect(__magic_name__ ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(__magic_name__ ) integer += 1 if __name__ == "__main__": print(f'''{solution() = }''')
653
0
import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class lowerCamelCase_ : '''simple docstring''' def __init__( self : Any , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Tuple=64 , _lowerCAmelCase : List[str]=None ): SCREAMING_SNAKE_CASE_ = np.random.default_rng(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = length SCREAMING_SNAKE_CASE_ = rng.normal(size=(length,) ).astype(np.floataa ) SCREAMING_SNAKE_CASE_ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa ) def __len__( self : Optional[int] ): return self.length def __getitem__( self : str , _lowerCAmelCase : Union[str, Any] ): return {"x": self.x[i], "y": self.y[i]} class lowerCamelCase_ ( torch.nn.Module ): '''simple docstring''' def __init__( self : Tuple , _lowerCAmelCase : Dict=0 , _lowerCAmelCase : List[str]=0 , _lowerCAmelCase : str=False ): super().__init__() SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) SCREAMING_SNAKE_CASE_ = True def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Union[str, Any]=None ): if self.first_batch: print(F"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" ) SCREAMING_SNAKE_CASE_ = False return x * self.a[0] + self.b[0] class lowerCamelCase_ ( torch.nn.Module ): '''simple docstring''' def __init__( self : Optional[int] , _lowerCAmelCase : Any=0 , _lowerCAmelCase : Any=0 , _lowerCAmelCase : Optional[Any]=False ): super().__init__() SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor(_lowerCAmelCase ).float() ) SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor(_lowerCAmelCase ).float() ) SCREAMING_SNAKE_CASE_ = True def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Optional[int]=None ): if self.first_batch: print(F"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" ) SCREAMING_SNAKE_CASE_ = False return x * self.a + self.b def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : int = 16 ) -> Union[str, Any]: from datasets import load_dataset from transformers import AutoTokenizer SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('bert-base-cased' ) SCREAMING_SNAKE_CASE_ = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'} SCREAMING_SNAKE_CASE_ = load_dataset('csv' , data_files=__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ = datasets['train'].unique('label' ) SCREAMING_SNAKE_CASE_ = {v: i for i, v in enumerate(__UpperCAmelCase )} def tokenize_function(__UpperCAmelCase : Optional[int] ): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE_ = tokenizer( examples['sentence1'] , examples['sentence2'] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='max_length' ) if "label" in examples: SCREAMING_SNAKE_CASE_ = [label_to_id[l] for l in examples['label']] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset SCREAMING_SNAKE_CASE_ = datasets.map( __UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=['sentence1', 'sentence2', 'label'] , ) def collate_fn(__UpperCAmelCase : Dict ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(__UpperCAmelCase , padding='max_length' , max_length=1_28 , return_tensors='pt' ) return tokenizer.pad(__UpperCAmelCase , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE_ = DataLoader(tokenized_datasets['train'] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=2 ) SCREAMING_SNAKE_CASE_ = DataLoader(tokenized_datasets['validation'] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=1 ) return train_dataloader, eval_dataloader
31
'''simple docstring''' import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() __lowerCamelCase : int = logging.get_logger() @dataclass class A_ : """simple docstring""" a__ = 42 a__ = field(default_factory=a_ ) a__ = field(default_factory=a_ ) def _A ( self :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tensor , lowerCAmelCase__ :Tensor ) -> int: '''simple docstring''' snake_case_ : int = len(list(m.modules() ) ) == 1 or isinstance(lowerCAmelCase__ , nn.Convad ) or isinstance(lowerCAmelCase__ , nn.BatchNormad ) if has_not_submodules: self.traced.append(lowerCAmelCase__ ) def __call__( self :List[Any] , lowerCAmelCase__ :Tensor ) -> Union[str, Any]: '''simple docstring''' for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(lowerCAmelCase__ ) [x.remove() for x in self.handles] return self @property def _A ( self :int ) -> List[Any]: '''simple docstring''' return list(filter(lambda lowerCAmelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class A_ : """simple docstring""" a__ = 42 a__ = 42 a__ = 0 a__ = field(default_factory=a_ ) a__ = field(default_factory=a_ ) def __call__( self :Tuple , lowerCAmelCase__ :Tensor ) -> Tuple: '''simple docstring''' snake_case_ : List[Any] = Tracker(self.dest )(lowerCAmelCase__ ).parametrized snake_case_ : Tuple = Tracker(self.src )(lowerCAmelCase__ ).parametrized snake_case_ : List[str] = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.src_skip , lowerCAmelCase__ ) ) snake_case_ : Tuple = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.dest_skip , lowerCAmelCase__ ) ) if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ): raise Exception( F'''Numbers of operations are different. Source module has {len(lowerCAmelCase__ )} operations while''' F''' destination module has {len(lowerCAmelCase__ )}.''' ) for dest_m, src_m in zip(lowerCAmelCase__ , lowerCAmelCase__ ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F'''Transfered from={src_m} to={dest_m}''' ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ = True )-> Optional[int]: """simple docstring""" print(F'''Converting {name}...''' ) with torch.no_grad(): snake_case_ : List[str] = timm.create_model(__magic_name__ ,pretrained=__magic_name__ ).eval() snake_case_ : Optional[int] = ResNetForImageClassification(__magic_name__ ).eval() snake_case_ : Dict = ModuleTransfer(src=__magic_name__ ,dest=__magic_name__ ) snake_case_ : Optional[int] = torch.randn((1, 3, 224, 224) ) module_transfer(__magic_name__ ) assert torch.allclose(from_model(__magic_name__ ) ,our_model(__magic_name__ ).logits ), "The model logits don't match the original one." snake_case_ : str = F'''resnet{'-'.join(name.split('resnet' ) )}''' print(__magic_name__ ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add model" ,use_temp_dir=__magic_name__ ,) # we can use the convnext one snake_case_ : Optional[Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add image processor" ,use_temp_dir=__magic_name__ ,) print(F'''Pushed {checkpoint_name}''' ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = None ,__magic_name__ = True )-> Tuple: """simple docstring""" snake_case_ : List[str] = "imagenet-1k-id2label.json" snake_case_ : Optional[Any] = 1000 snake_case_ : List[Any] = (1, num_labels) snake_case_ : Optional[Any] = "huggingface/label-files" snake_case_ : Dict = num_labels snake_case_ : List[Any] = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) ) snake_case_ : List[str] = {int(__magic_name__ ): v for k, v in idalabel.items()} snake_case_ : Any = idalabel snake_case_ : List[Any] = {v: k for k, v in idalabel.items()} snake_case_ : Optional[int] = partial(__magic_name__ ,num_labels=__magic_name__ ,idalabel=__magic_name__ ,labelaid=__magic_name__ ) snake_case_ : Optional[int] = { "resnet18": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ), "resnet26": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ), "resnet34": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ), "resnet50": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ), "resnet101": ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ), "resnet152": ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ), } if model_name: convert_weight_and_push(__magic_name__ ,names_to_config[model_name] ,__magic_name__ ,__magic_name__ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ) return config, expected_shape if __name__ == "__main__": __lowerCamelCase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default=None, type=str, help=( '''The name of the model you wish to convert, it must be one of the supported resnet* architecture,''' ''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=Path, required=True, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=True, type=bool, required=False, help='''If True, push model and image processor to the hub.''', ) __lowerCamelCase : Tuple = parser.parse_args() __lowerCamelCase : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
653
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { "bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json", "bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json", "bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json", "bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json", "bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json", "bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json", "bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json", "bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json", "bert-large-uncased-whole-word-masking": ( "https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json" ), "bert-large-cased-whole-word-masking": ( "https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json" ), "bert-large-uncased-whole-word-masking-finetuned-squad": ( "https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json" ), "bert-large-cased-whole-word-masking-finetuned-squad": ( "https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json" ), "bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json", "bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json", "bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json", "cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json", "cl-tohoku/bert-base-japanese-whole-word-masking": ( "https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json" ), "cl-tohoku/bert-base-japanese-char": ( "https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json" ), "cl-tohoku/bert-base-japanese-char-whole-word-masking": ( "https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json" ), "TurkuNLP/bert-base-finnish-cased-v1": ( "https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json" ), "TurkuNLP/bert-base-finnish-uncased-v1": ( "https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json" ), "wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json", # See all BERT models at https://huggingface.co/models?filter=bert } class __UpperCamelCase ( A__ ): __A : Union[str, Any] = """bert""" def __init__( self , _UpperCamelCase=30522 , _UpperCamelCase=768 , _UpperCamelCase=12 , _UpperCamelCase=12 , _UpperCamelCase=3072 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-12 , _UpperCamelCase=0 , _UpperCamelCase="absolute" , _UpperCamelCase=True , _UpperCamelCase=None , **_UpperCamelCase , ): super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = position_embedding_type _UpperCAmelCase = use_cache _UpperCAmelCase = classifier_dropout class __UpperCamelCase ( A__ ): @property def UpperCamelCase( self ): if self.task == "multiple-choice": _UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _UpperCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
32
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase : List[Any] = logging.get_logger(__name__) __lowerCamelCase : Dict = { '''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''', } class A_ (a_ ): """simple docstring""" a__ = '''roc_bert''' def __init__( self :Dict , lowerCAmelCase__ :Optional[Any]=30_522 , lowerCAmelCase__ :Dict=768 , lowerCAmelCase__ :str=12 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :Optional[Any]=3_072 , lowerCAmelCase__ :Any="gelu" , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :List[str]=512 , lowerCAmelCase__ :int=2 , lowerCAmelCase__ :Optional[int]=0.0_2 , lowerCAmelCase__ :Tuple=1E-1_2 , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :List[str]=0 , lowerCAmelCase__ :Optional[Any]="absolute" , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :List[str]=768 , lowerCAmelCase__ :Optional[Any]=910 , lowerCAmelCase__ :str=512 , lowerCAmelCase__ :int=24_858 , lowerCAmelCase__ :List[Any]=True , **lowerCAmelCase__ :int , ) -> List[str]: '''simple docstring''' snake_case_ : int = vocab_size snake_case_ : Dict = max_position_embeddings snake_case_ : int = hidden_size snake_case_ : str = num_hidden_layers snake_case_ : Union[str, Any] = num_attention_heads snake_case_ : int = intermediate_size snake_case_ : Optional[Any] = hidden_act snake_case_ : Optional[int] = hidden_dropout_prob snake_case_ : List[Any] = attention_probs_dropout_prob snake_case_ : Dict = initializer_range snake_case_ : str = type_vocab_size snake_case_ : Tuple = layer_norm_eps snake_case_ : Optional[Any] = use_cache snake_case_ : Optional[Any] = enable_pronunciation snake_case_ : List[Any] = enable_shape snake_case_ : Optional[int] = pronunciation_embed_dim snake_case_ : Dict = pronunciation_vocab_size snake_case_ : int = shape_embed_dim snake_case_ : Any = shape_vocab_size snake_case_ : Optional[int] = concat_input snake_case_ : List[Any] = position_embedding_type snake_case_ : Any = classifier_dropout super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
653
0
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
33
'''simple docstring''' def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int: """simple docstring""" def update_area_of_max_square(__magic_name__ ,__magic_name__ ) -> int: # BASE CASE if row >= rows or col >= cols: return 0 snake_case_ : str = update_area_of_max_square(__magic_name__ ,col + 1 ) snake_case_ : Dict = update_area_of_max_square(row + 1 ,col + 1 ) snake_case_ : int = update_area_of_max_square(row + 1 ,__magic_name__ ) if mat[row][col]: snake_case_ : str = 1 + min([right, diagonal, down] ) snake_case_ : Tuple = max(largest_square_area[0] ,__magic_name__ ) return sub_problem_sol else: return 0 snake_case_ : Union[str, Any] = [0] update_area_of_max_square(0 ,0 ) return largest_square_area[0] def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int: """simple docstring""" def update_area_of_max_square_using_dp_array( __magic_name__ ,__magic_name__ ,__magic_name__ ) -> int: if row >= rows or col >= cols: return 0 if dp_array[row][col] != -1: return dp_array[row][col] snake_case_ : Dict = update_area_of_max_square_using_dp_array(__magic_name__ ,col + 1 ,__magic_name__ ) snake_case_ : List[Any] = update_area_of_max_square_using_dp_array(row + 1 ,col + 1 ,__magic_name__ ) snake_case_ : Any = update_area_of_max_square_using_dp_array(row + 1 ,__magic_name__ ,__magic_name__ ) if mat[row][col]: snake_case_ : int = 1 + min([right, diagonal, down] ) snake_case_ : Tuple = max(largest_square_area[0] ,__magic_name__ ) snake_case_ : Optional[Any] = sub_problem_sol return sub_problem_sol else: return 0 snake_case_ : List[Any] = [0] snake_case_ : Optional[int] = [[-1] * cols for _ in range(__magic_name__ )] update_area_of_max_square_using_dp_array(0 ,0 ,__magic_name__ ) return largest_square_area[0] def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int: """simple docstring""" snake_case_ : Dict = [[0] * (cols + 1) for _ in range(rows + 1 )] snake_case_ : Dict = 0 for row in range(rows - 1 ,-1 ,-1 ): for col in range(cols - 1 ,-1 ,-1 ): snake_case_ : List[str] = dp_array[row][col + 1] snake_case_ : Any = dp_array[row + 1][col + 1] snake_case_ : Any = dp_array[row + 1][col] if mat[row][col] == 1: snake_case_ : Any = 1 + min(__magic_name__ ,__magic_name__ ,__magic_name__ ) snake_case_ : str = max(dp_array[row][col] ,__magic_name__ ) else: snake_case_ : Optional[Any] = 0 return largest_square_area def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int: """simple docstring""" snake_case_ : str = [0] * (cols + 1) snake_case_ : Tuple = [0] * (cols + 1) snake_case_ : List[str] = 0 for row in range(rows - 1 ,-1 ,-1 ): for col in range(cols - 1 ,-1 ,-1 ): snake_case_ : Optional[Any] = current_row[col + 1] snake_case_ : Optional[int] = next_row[col + 1] snake_case_ : Dict = next_row[col] if mat[row][col] == 1: snake_case_ : Union[str, Any] = 1 + min(__magic_name__ ,__magic_name__ ,__magic_name__ ) snake_case_ : Any = max(current_row[col] ,__magic_name__ ) else: snake_case_ : Dict = 0 snake_case_ : Optional[Any] = current_row return largest_square_area if __name__ == "__main__": import doctest doctest.testmod() print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
653
0
"""simple docstring""" import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def __snake_case ( _lowercase ,_lowercase ,_lowercase ): """simple docstring""" UpperCamelCase = AutoConfig.from_pretrained(_lowercase ) UpperCamelCase = FlaxAutoModelForSeqaSeqLM.from_config(config=_lowercase ) UpperCamelCase = checkpoints.load_tax_checkpoint(_lowercase ) UpperCamelCase = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp'''] if config.model_type == "t5": UpperCamelCase = '''SelfAttention''' if config.model_type == "longt5" and config.encoder_attention_type == "local": UpperCamelCase = '''LocalSelfAttention''' elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": UpperCamelCase = '''TransientGlobalSelfAttention''' else: raise ValueError( '''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`''' ''' attribute with a value from [\'local\', \'transient-global].''' ) # Encoder for layer_index in range(config.num_layers ): UpperCamelCase = f'layers_{str(_lowercase )}' # Self-Attention UpperCamelCase = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel'''] UpperCamelCase = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel'''] UpperCamelCase = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel'''] UpperCamelCase = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel'''] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": UpperCamelCase = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale'''] # Layer Normalization UpperCamelCase = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale'''] if split_mlp_wi: UpperCamelCase = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel'''] UpperCamelCase = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel'''] else: UpperCamelCase = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel'''] UpperCamelCase = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel'''] # Layer Normalization UpperCamelCase = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale'''] # Assigning UpperCamelCase = flax_model.params['''encoder''']['''block'''][str(_lowercase )]['''layer'''] UpperCamelCase = tax_attention_key UpperCamelCase = tax_attention_out UpperCamelCase = tax_attention_query UpperCamelCase = tax_attention_value UpperCamelCase = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": UpperCamelCase = tax_global_layer_norm if split_mlp_wi: UpperCamelCase = tax_mlp_wi_a UpperCamelCase = tax_mlp_wi_a else: UpperCamelCase = tax_mlp_wi UpperCamelCase = tax_mlp_wo UpperCamelCase = tax_mlp_layer_norm UpperCamelCase = flax_model_encoder_layer_block # Only for layer 0: UpperCamelCase = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T UpperCamelCase = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": UpperCamelCase = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T UpperCamelCase = tax_encoder_global_rel_embedding # Assigning UpperCamelCase = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale'''] UpperCamelCase = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): UpperCamelCase = f'layers_{str(_lowercase )}' # Self-Attention UpperCamelCase = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel'''] UpperCamelCase = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel'''] UpperCamelCase = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel'''] UpperCamelCase = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel'''] # Layer Normalization UpperCamelCase = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][ '''scale''' ] # Encoder-Decoder-Attention UpperCamelCase = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention'''] UpperCamelCase = tax_enc_dec_attention_module['''key''']['''kernel'''] UpperCamelCase = tax_enc_dec_attention_module['''out''']['''kernel'''] UpperCamelCase = tax_enc_dec_attention_module['''query''']['''kernel'''] UpperCamelCase = tax_enc_dec_attention_module['''value''']['''kernel'''] # Layer Normalization UpperCamelCase = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale'''] # MLP if split_mlp_wi: UpperCamelCase = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel'''] UpperCamelCase = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel'''] else: UpperCamelCase = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel'''] UpperCamelCase = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel'''] # Layer Normalization UpperCamelCase = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale'''] # Assigning UpperCamelCase = flax_model.params['''decoder''']['''block'''][str(_lowercase )]['''layer'''] UpperCamelCase = tax_attention_key UpperCamelCase = tax_attention_out UpperCamelCase = tax_attention_query UpperCamelCase = tax_attention_value UpperCamelCase = tax_pre_attention_layer_norm UpperCamelCase = tax_enc_dec_attention_key UpperCamelCase = tax_enc_dec_attention_out UpperCamelCase = tax_enc_dec_attention_query UpperCamelCase = tax_enc_dec_attention_value UpperCamelCase = tax_cross_layer_norm if split_mlp_wi: UpperCamelCase = tax_mlp_wi_a UpperCamelCase = tax_mlp_wi_a else: UpperCamelCase = tax_mlp_wi UpperCamelCase = tax_mlp_wo UpperCamelCase = txa_mlp_layer_norm UpperCamelCase = flax_model_decoder_layer_block # Decoder Normalization UpperCamelCase = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale'''] UpperCamelCase = txa_decoder_norm # Only for layer 0: UpperCamelCase = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T UpperCamelCase = tax_decoder_rel_embedding # Token Embeddings UpperCamelCase = tax_model['''target''']['''token_embedder''']['''embedding'''] UpperCamelCase = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: UpperCamelCase = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel'''] flax_model.save_pretrained(_lowercase ) print('''T5X Model was sucessfully converted!''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.' ) parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.') parser.add_argument( '--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.' ) SCREAMING_SNAKE_CASE_ = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
34
'''simple docstring''' import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def __UpperCAmelCase ( __magic_name__ ,__magic_name__=7 )-> Tuple: """simple docstring""" snake_case_ : List[str] = None if token is not None: snake_case_ : List[str] = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''} # The id of a workflow (not of a workflow run) snake_case_ : Dict = "636036" snake_case_ : List[str] = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs''' # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}''' snake_case_ : Optional[Any] = requests.get(__magic_name__ ,headers=__magic_name__ ).json() return result["workflow_runs"] def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]: """simple docstring""" snake_case_ : str = get_daily_ci_runs(__magic_name__ ) snake_case_ : Optional[int] = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": snake_case_ : Dict = workflow_run["id"] break return workflow_run_id def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[Any]: """simple docstring""" snake_case_ : Optional[Any] = get_last_daily_ci_runs(__magic_name__ ) if workflow_run_id is not None: snake_case_ : Union[str, Any] = get_artifacts_links(worflow_run_id=__magic_name__ ,token=__magic_name__ ) for artifact_name in artifact_names: if artifact_name in artifacts_links: snake_case_ : Union[str, Any] = artifacts_links[artifact_name] download_artifact( artifact_name=__magic_name__ ,artifact_url=__magic_name__ ,output_dir=__magic_name__ ,token=__magic_name__ ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[Any]: """simple docstring""" get_last_daily_ci_artifacts(__magic_name__ ,__magic_name__ ,__magic_name__ ) snake_case_ : Union[str, Any] = {} for artifact_name in artifact_names: snake_case_ : Any = os.path.join(__magic_name__ ,F'''{artifact_name}.zip''' ) if os.path.isfile(__magic_name__ ): snake_case_ : Tuple = {} with zipfile.ZipFile(__magic_name__ ) as z: for filename in z.namelist(): if not os.path.isdir(__magic_name__ ): # read the file with z.open(__magic_name__ ) as f: snake_case_ : Optional[Any] = f.read().decode("UTF-8" ) return results
653
0
import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def a ( A__ ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple = np.inf def set_batch_size(A__ ) -> None: nonlocal batch_size if isinstance(A__ , A__ ): SCREAMING_SNAKE_CASE__ : List[str] = min(A__ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(A__ , A__ ): SCREAMING_SNAKE_CASE__ : Any = min(A__ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(A__ , A__ ) and feature.dtype == "binary": SCREAMING_SNAKE_CASE__ : Optional[Any] = min(A__ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(A__ , A__ ) return None if batch_size is np.inf else batch_size class lowercase ( _UpperCAmelCase ): def __init__( self : Optional[Any] , _lowercase : NestedDataStructureLike[PathLike] , _lowercase : Optional[NamedSplit] = None , _lowercase : Optional[Features] = None , _lowercase : str = None , _lowercase : bool = False , _lowercase : bool = False , _lowercase : Optional[int] = None , **_lowercase : Tuple , ): super().__init__( _lowercase , split=_lowercase , features=_lowercase , cache_dir=_lowercase , keep_in_memory=_lowercase , streaming=_lowercase , num_proc=_lowercase , **_lowercase , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = path_or_paths if isinstance(_lowercase , _lowercase ) else {self.split: path_or_paths} SCREAMING_SNAKE_CASE__ : str = _PACKAGED_DATASETS_MODULES['''parquet'''][1] SCREAMING_SNAKE_CASE__ : Optional[int] = Parquet( cache_dir=_lowercase , data_files=_lowercase , features=_lowercase , hash=_lowercase , **_lowercase , ) def lowercase__ ( self : Any ): # Build iterable dataset if self.streaming: SCREAMING_SNAKE_CASE__ : List[Any] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: SCREAMING_SNAKE_CASE__ : List[Any] = None SCREAMING_SNAKE_CASE__ : List[Any] = None SCREAMING_SNAKE_CASE__ : int = None SCREAMING_SNAKE_CASE__ : List[Any] = None self.builder.download_and_prepare( download_config=_lowercase , download_mode=_lowercase , verification_mode=_lowercase , base_path=_lowercase , num_proc=self.num_proc , ) SCREAMING_SNAKE_CASE__ : Any = self.builder.as_dataset( split=self.split , verification_mode=_lowercase , in_memory=self.keep_in_memory ) return dataset class lowercase : def __init__( self : Dict , _lowercase : Dataset , _lowercase : Union[PathLike, BinaryIO] , _lowercase : Optional[int] = None , **_lowercase : Any , ): SCREAMING_SNAKE_CASE__ : List[str] = dataset SCREAMING_SNAKE_CASE__ : List[str] = path_or_buf SCREAMING_SNAKE_CASE__ : int = batch_size or get_writer_batch_size(dataset.features ) SCREAMING_SNAKE_CASE__ : str = parquet_writer_kwargs def lowercase__ ( self : List[str] ): SCREAMING_SNAKE_CASE__ : int = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , '''wb+''' ) as buffer: SCREAMING_SNAKE_CASE__ : Any = self._write(file_obj=_lowercase , batch_size=_lowercase , **self.parquet_writer_kwargs ) else: SCREAMING_SNAKE_CASE__ : Optional[int] = self._write(file_obj=self.path_or_buf , batch_size=_lowercase , **self.parquet_writer_kwargs ) return written def lowercase__ ( self : Optional[int] , _lowercase : BinaryIO , _lowercase : int , **_lowercase : str ): SCREAMING_SNAKE_CASE__ : Any = 0 SCREAMING_SNAKE_CASE__ : Any = parquet_writer_kwargs.pop('''path_or_buf''' , _lowercase ) SCREAMING_SNAKE_CASE__ : List[str] = self.dataset.features.arrow_schema SCREAMING_SNAKE_CASE__ : Any = pq.ParquetWriter(_lowercase , schema=_lowercase , **_lowercase ) for offset in logging.tqdm( range(0 , len(self.dataset ) , _lowercase ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ): SCREAMING_SNAKE_CASE__ : Optional[int] = query_table( table=self.dataset._data , key=slice(_lowercase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(_lowercase ) written += batch.nbytes writer.close() return written
35
'''simple docstring''' from string import ascii_uppercase __lowerCamelCase : Optional[Any] = {char: i for i, char in enumerate(ascii_uppercase)} __lowerCamelCase : List[str] = dict(enumerate(ascii_uppercase)) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str: """simple docstring""" snake_case_ : Tuple = len(__magic_name__ ) snake_case_ : str = 0 while True: if x == i: snake_case_ : List[str] = 0 if len(__magic_name__ ) == len(__magic_name__ ): break key += key[i] i += 1 return key def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str: """simple docstring""" snake_case_ : str = "" snake_case_ : List[Any] = 0 for letter in message: if letter == " ": cipher_text += " " else: snake_case_ : Optional[Any] = (dicta[letter] - dicta[key_new[i]]) % 26 i += 1 cipher_text += dicta[x] return cipher_text def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str: """simple docstring""" snake_case_ : Dict = "" snake_case_ : Dict = 0 for letter in cipher_text: if letter == " ": or_txt += " " else: snake_case_ : str = (dicta[letter] + dicta[key_new[i]] + 26) % 26 i += 1 or_txt += dicta[x] return or_txt def __UpperCAmelCase ( )-> None: """simple docstring""" snake_case_ : List[str] = "THE GERMAN ATTACK" snake_case_ : List[str] = "SECRET" snake_case_ : Optional[int] = generate_key(__magic_name__ ,__magic_name__ ) snake_case_ : Any = cipher_text(__magic_name__ ,__magic_name__ ) print(F'''Encrypted Text = {s}''' ) print(F'''Original Text = {original_text(__magic_name__ ,__magic_name__ )}''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
653
0
from math import pi def lowercase ( __A : int , __A : int ) -> float: '''simple docstring''' return 2 * pi * radius * (angle / 360) if __name__ == "__main__": print(arc_length(90, 10))
36
'''simple docstring''' import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Dict: """simple docstring""" snake_case_ : Tuple = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") snake_case_ : Union[str, Any] = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(__magic_name__ ): os.makedirs(__magic_name__ ) snake_case_ : str = model.state_dict() def to_tf_var_name(__magic_name__ ): for patt, repl in iter(__magic_name__ ): snake_case_ : List[str] = name.replace(__magic_name__ ,__magic_name__ ) return F'''bert/{name}''' def create_tf_var(__magic_name__ ,__magic_name__ ,__magic_name__ ): snake_case_ : List[Any] = tf.dtypes.as_dtype(tensor.dtype ) snake_case_ : Union[str, Any] = tf.get_variable(dtype=__magic_name__ ,shape=tensor.shape ,name=__magic_name__ ,initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(__magic_name__ ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: snake_case_ : Optional[int] = to_tf_var_name(__magic_name__ ) snake_case_ : Dict = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): snake_case_ : List[Any] = torch_tensor.T snake_case_ : Union[str, Any] = create_tf_var(tensor=__magic_name__ ,name=__magic_name__ ,session=__magic_name__ ) tf.keras.backend.set_value(__magic_name__ ,__magic_name__ ) snake_case_ : List[str] = session.run(__magic_name__ ) print(F'''Successfully created {tf_name}: {np.allclose(__magic_name__ ,__magic_name__ )}''' ) snake_case_ : Any = tf.train.Saver(tf.trainable_variables() ) saver.save(__magic_name__ ,os.path.join(__magic_name__ ,model_name.replace("-" ,"_" ) + ".ckpt" ) ) def __UpperCAmelCase ( __magic_name__=None )-> Optional[Any]: """simple docstring""" snake_case_ : Any = argparse.ArgumentParser() parser.add_argument("--model_name" ,type=__magic_name__ ,required=__magic_name__ ,help="model name e.g. bert-base-uncased" ) parser.add_argument( "--cache_dir" ,type=__magic_name__ ,default=__magic_name__ ,required=__magic_name__ ,help="Directory containing pytorch model" ) parser.add_argument("--pytorch_model_path" ,type=__magic_name__ ,required=__magic_name__ ,help="/path/to/<pytorch-model-name>.bin" ) parser.add_argument("--tf_cache_dir" ,type=__magic_name__ ,required=__magic_name__ ,help="Directory in which to save tensorflow model" ) snake_case_ : Optional[int] = parser.parse_args(__magic_name__ ) snake_case_ : Optional[int] = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name ,state_dict=torch.load(args.pytorch_model_path ) ,cache_dir=args.cache_dir ,) convert_pytorch_checkpoint_to_tf(model=__magic_name__ ,ckpt_dir=args.tf_cache_dir ,model_name=args.model_name ) if __name__ == "__main__": main()
653
0
import argparse import collections import json import os import re import string import sys import numpy as np UpperCamelCase : List[str] = re.compile(r"""\b(a|an|the)\b""", re.UNICODE) UpperCamelCase : Union[str, Any] = None def UpperCamelCase_ ( ) -> List[str]: a__ : List[Any] = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." ) parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." ) parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." ) parser.add_argument( "--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." ) parser.add_argument( "--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." ) parser.add_argument( "--na-prob-thresh" , "-t" , type=__a , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , ) parser.add_argument( "--out-image-dir" , "-p" , metavar="out_images" , default=__a , help="Save precision-recall curves to directory." ) parser.add_argument("--verbose" , "-v" , action="store_true" ) if len(sys.argv ) == 1: parser.print_help() sys.exit(1 ) return parser.parse_args() def UpperCamelCase_ ( __a ) -> str: a__ : Optional[Any] = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: a__ : Dict = bool(qa["answers"]["text"] ) return qid_to_has_ans def UpperCamelCase_ ( __a ) -> List[Any]: def remove_articles(__a ): return ARTICLES_REGEX.sub(" " , __a ) def white_space_fix(__a ): return " ".join(text.split() ) def remove_punc(__a ): a__ : Union[str, Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(__a ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(__a ) ) ) ) def UpperCamelCase_ ( __a ) -> Dict: if not s: return [] return normalize_answer(__a ).split() def UpperCamelCase_ ( __a , __a ) -> str: return int(normalize_answer(__a ) == normalize_answer(__a ) ) def UpperCamelCase_ ( __a , __a ) -> Dict: a__ : int = get_tokens(__a ) a__ : Optional[Any] = get_tokens(__a ) a__ : Any = collections.Counter(__a ) & collections.Counter(__a ) a__ : Dict = sum(common.values() ) if len(__a ) == 0 or len(__a ) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks ) if num_same == 0: return 0 a__ : Tuple = 1.0 * num_same / len(__a ) a__ : str = 1.0 * num_same / len(__a ) a__ : str = (2 * precision * recall) / (precision + recall) return fa def UpperCamelCase_ ( __a , __a ) -> int: a__ : List[str] = {} a__ : Optional[int] = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: a__ : List[Any] = qa["id"] a__ : Dict = [t for t in qa["answers"]["text"] if normalize_answer(__a )] if not gold_answers: # For unanswerable questions, only correct answer is empty string a__ : Tuple = [""] if qid not in preds: print(f'''Missing prediction for {qid}''' ) continue a__ : Tuple = preds[qid] # Take max over all gold answers a__ : Optional[int] = max(compute_exact(__a , __a ) for a in gold_answers ) a__ : str = max(compute_fa(__a , __a ) for a in gold_answers ) return exact_scores, fa_scores def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[int]: a__ : Optional[Any] = {} for qid, s in scores.items(): a__ : Dict = na_probs[qid] > na_prob_thresh if pred_na: a__ : Dict = float(not qid_to_has_ans[qid] ) else: a__ : Optional[Any] = s return new_scores def UpperCamelCase_ ( __a , __a , __a=None ) -> Tuple: if not qid_list: a__ : Union[str, Any] = len(__a ) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores.values() ) / total), ("f1", 100.0 * sum(fa_scores.values() ) / total), ("total", total), ] ) else: a__ : int = len(__a ) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total), ("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total), ("total", total), ] ) def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: for k in new_eval: a__ : Optional[Any] = new_eval[k] def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[int]: plt.step(__a , __a , color="b" , alpha=0.2 , where="post" ) plt.fill_between(__a , __a , step="post" , alpha=0.2 , color="b" ) plt.xlabel("Recall" ) plt.ylabel("Precision" ) plt.xlim([0.0, 1.05] ) plt.ylim([0.0, 1.05] ) plt.title(__a ) plt.savefig(__a ) plt.clf() def UpperCamelCase_ ( __a , __a , __a , __a , __a=None , __a=None ) -> Dict: a__ : Optional[Any] = sorted(__a , key=lambda __a : na_probs[k] ) a__ : Any = 0.0 a__ : Optional[int] = 1.0 a__ : Optional[int] = 0.0 a__ : Any = [1.0] a__ : Tuple = [0.0] a__ : List[str] = 0.0 for i, qid in enumerate(__a ): if qid_to_has_ans[qid]: true_pos += scores[qid] a__ : Any = true_pos / float(i + 1 ) a__ : int = true_pos / float(__a ) if i == len(__a ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(__a ) recalls.append(__a ) if out_image: plot_pr_curve(__a , __a , __a , __a ) return {"ap": 100.0 * avg_prec} def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a ) -> str: if out_image_dir and not os.path.exists(__a ): os.makedirs(__a ) a__ : Optional[int] = sum(1 for v in qid_to_has_ans.values() if v ) if num_true_pos == 0: return a__ : Optional[int] = make_precision_recall_eval( __a , __a , __a , __a , out_image=os.path.join(__a , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , ) a__ : Optional[Any] = make_precision_recall_eval( __a , __a , __a , __a , out_image=os.path.join(__a , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , ) a__ : str = {k: float(__a ) for k, v in qid_to_has_ans.items()} a__ : Optional[Any] = make_precision_recall_eval( __a , __a , __a , __a , out_image=os.path.join(__a , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , ) merge_eval(__a , __a , "pr_exact" ) merge_eval(__a , __a , "pr_f1" ) merge_eval(__a , __a , "pr_oracle" ) def UpperCamelCase_ ( __a , __a , __a , __a ) -> str: if not qid_list: return a__ : Optional[Any] = [na_probs[k] for k in qid_list] a__ : str = np.ones_like(__a ) / float(len(__a ) ) plt.hist(__a , weights=__a , bins=20 , range=(0.0, 1.0) ) plt.xlabel("Model probability of no-answer" ) plt.ylabel("Proportion of dataset" ) plt.title(f'''Histogram of no-answer probability: {name}''' ) plt.savefig(os.path.join(__a , f'''na_prob_hist_{name}.png''' ) ) plt.clf() def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[Any]: a__ : str = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] ) a__ : Optional[Any] = num_no_ans a__ : Dict = cur_score a__ : Any = 0.0 a__ : Optional[Any] = sorted(__a , key=lambda __a : na_probs[k] ) for i, qid in enumerate(__a ): if qid not in scores: continue if qid_to_has_ans[qid]: a__ : Optional[int] = scores[qid] else: if preds[qid]: a__ : str = -1 else: a__ : Union[str, Any] = 0 cur_score += diff if cur_score > best_score: a__ : Any = cur_score a__ : Dict = na_probs[qid] return 100.0 * best_score / len(__a ), best_thresh def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a ) -> Any: a__, a__ : Tuple = find_best_thresh(__a , __a , __a , __a ) a__, a__ : Tuple = find_best_thresh(__a , __a , __a , __a ) a__ : Any = best_exact a__ : Any = exact_thresh a__ : List[Any] = best_fa a__ : Optional[int] = fa_thresh def UpperCamelCase_ ( ) -> Tuple: with open(OPTS.data_file ) as f: a__ : List[Any] = json.load(__a ) a__ : Any = dataset_json["data"] with open(OPTS.pred_file ) as f: a__ : int = json.load(__a ) if OPTS.na_prob_file: with open(OPTS.na_prob_file ) as f: a__ : List[str] = json.load(__a ) else: a__ : Optional[int] = {k: 0.0 for k in preds} a__ : Optional[Any] = make_qid_to_has_ans(__a ) # maps qid to True/False a__ : List[Any] = [k for k, v in qid_to_has_ans.items() if v] a__ : Union[str, Any] = [k for k, v in qid_to_has_ans.items() if not v] a__, a__ : str = get_raw_scores(__a , __a ) a__ : str = apply_no_ans_threshold(__a , __a , __a , OPTS.na_prob_thresh ) a__ : str = apply_no_ans_threshold(__a , __a , __a , OPTS.na_prob_thresh ) a__ : Tuple = make_eval_dict(__a , __a ) if has_ans_qids: a__ : str = make_eval_dict(__a , __a , qid_list=__a ) merge_eval(__a , __a , "HasAns" ) if no_ans_qids: a__ : List[Any] = make_eval_dict(__a , __a , qid_list=__a ) merge_eval(__a , __a , "NoAns" ) if OPTS.na_prob_file: find_all_best_thresh(__a , __a , __a , __a , __a , __a ) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(__a , __a , __a , __a , __a , OPTS.out_image_dir ) histogram_na_prob(__a , __a , OPTS.out_image_dir , "hasAns" ) histogram_na_prob(__a , __a , OPTS.out_image_dir , "noAns" ) if OPTS.out_file: with open(OPTS.out_file , "w" ) as f: json.dump(__a , __a ) else: print(json.dumps(__a , indent=2 ) ) if __name__ == "__main__": UpperCamelCase : Any = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use("""Agg""") import matplotlib.pyplot as plt main()
37
'''simple docstring''' from collections import deque from .hash_table import HashTable class A_ (a_ ): """simple docstring""" def __init__( self :List[str] , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Dict ) -> Union[str, Any]: '''simple docstring''' super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ ) def _A ( self :Optional[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Optional[int] = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(lowerCAmelCase__ ) snake_case_ : Tuple = self.values[key] def _A ( self :int ) -> Dict: '''simple docstring''' return ( sum(self.charge_factor - len(lowerCAmelCase__ ) for slot in self.values ) / self.size_table * self.charge_factor ) def _A ( self :str , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple=None ) -> Any: '''simple docstring''' if not ( len(self.values[key] ) == self.charge_factor and self.values.count(lowerCAmelCase__ ) == 0 ): return key return super()._collision_resolution(lowerCAmelCase__ , lowerCAmelCase__ )
653
0
'''simple docstring''' import os from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen, xsplitext from ..table import array_cast from ..utils.py_utils import no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: from .features import FeatureType A_ , A_ , A_ : Union[str, Any] = False, False, False @dataclass class __snake_case : '''simple docstring''' lowerCamelCase__ = None lowerCamelCase__ = True lowerCamelCase__ = True lowerCamelCase__ = None # Automatically constructed lowerCamelCase__ = "dict" lowerCamelCase__ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} ) lowerCamelCase__ = field(default='''Audio''' , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE ) def __call__( self ): return self.pa_type def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ): try: import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files. except ImportError as err: raise ImportError("""To support encoding audio data, please install 'soundfile'.""" ) from err if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): return {"bytes": None, "path": value} elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): return {"bytes": value, "path": None} elif "array" in value: # convert the audio array to wav bytes snake_case__ : Tuple = BytesIO() sf.write(__SCREAMING_SNAKE_CASE , value["""array"""] , value["""sampling_rate"""] , format="""wav""" ) return {"bytes": buffer.getvalue(), "path": None} elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith("""pcm""" ): # "PCM" only has raw audio bytes if value.get("""sampling_rate""" ) is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""" ) if value.get("""bytes""" ): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) snake_case__ : List[str] = np.frombuffer(value["""bytes"""] , dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7 else: snake_case__ : Tuple = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""" ).astype(np.floataa ) / 3_2_7_6_7 snake_case__ : str = BytesIO(bytes() ) sf.write(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , value["""sampling_rate"""] , format="""wav""" ) return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get("""path""" )} elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )} else: raise ValueError( f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}." ) def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): if not self.decode: raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" ) snake_case__ , snake_case__ : str = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None) if path is None and file is None: raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}." ) try: import librosa import soundfile as sf except ImportError as err: raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""" ) from err snake_case__ : Optional[Any] = xsplitext(__SCREAMING_SNAKE_CASE )[1][1:].lower() if path is not None else None if not config.IS_OPUS_SUPPORTED and audio_format == "opus": raise RuntimeError( """Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """ """You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ ) elif not config.IS_MP3_SUPPORTED and audio_format == "mp3": raise RuntimeError( """Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """ """You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ ) if file is None: snake_case__ : str = token_per_repo_id or {} snake_case__ : Tuple = path.split("""::""" )[-1] try: snake_case__ : str = string_to_dict(__SCREAMING_SNAKE_CASE , config.HUB_DATASETS_URL )["""repo_id"""] snake_case__ : int = token_per_repo_id[repo_id] except (ValueError, KeyError): snake_case__ : Dict = None with xopen(__SCREAMING_SNAKE_CASE , """rb""" , use_auth_token=__SCREAMING_SNAKE_CASE ) as f: snake_case__ , snake_case__ : Optional[int] = sf.read(__SCREAMING_SNAKE_CASE ) else: snake_case__ , snake_case__ : Tuple = sf.read(__SCREAMING_SNAKE_CASE ) snake_case__ : str = array.T if self.mono: snake_case__ : str = librosa.to_mono(__SCREAMING_SNAKE_CASE ) if self.sampling_rate and self.sampling_rate != sampling_rate: snake_case__ : List[Any] = librosa.resample(__SCREAMING_SNAKE_CASE , orig_sr=__SCREAMING_SNAKE_CASE , target_sr=self.sampling_rate ) snake_case__ : List[str] = self.sampling_rate return {"path": path, "array": array, "sampling_rate": sampling_rate} def __UpperCamelCase ( self ): from .features import Value if self.decode: raise ValueError("""Cannot flatten a decoded Audio feature.""" ) return { "bytes": Value("""binary""" ), "path": Value("""string""" ), } def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ): if pa.types.is_string(storage.type ): snake_case__ : List[str] = pa.array([None] * len(__SCREAMING_SNAKE_CASE ) , type=pa.binary() ) snake_case__ : Tuple = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): snake_case__ : List[str] = pa.array([None] * len(__SCREAMING_SNAKE_CASE ) , type=pa.string() ) snake_case__ : List[str] = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ): snake_case__ : Dict = pa.array([Audio().encode_example(__SCREAMING_SNAKE_CASE ) if x is not None else None for x in storage.to_pylist()] ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index("""bytes""" ) >= 0: snake_case__ : Tuple = storage.field("""bytes""" ) else: snake_case__ : Any = pa.array([None] * len(__SCREAMING_SNAKE_CASE ) , type=pa.binary() ) if storage.type.get_field_index("""path""" ) >= 0: snake_case__ : List[Any] = storage.field("""path""" ) else: snake_case__ : Union[str, Any] = pa.array([None] * len(__SCREAMING_SNAKE_CASE ) , type=pa.string() ) snake_case__ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() ) return array_cast(__SCREAMING_SNAKE_CASE , self.pa_type ) def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ): @no_op_if_value_is_null def path_to_bytes(__SCREAMING_SNAKE_CASE ): with xopen(__SCREAMING_SNAKE_CASE , """rb""" ) as f: snake_case__ : int = f.read() return bytes_ snake_case__ : Optional[int] = pa.array( [ (path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) snake_case__ : Optional[Any] = pa.array( [os.path.basename(__SCREAMING_SNAKE_CASE ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , ) snake_case__ : Optional[int] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() ) return array_cast(__SCREAMING_SNAKE_CASE , self.pa_type )
38
'''simple docstring''' from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar __lowerCamelCase : Dict = TypeVar('''KEY''') __lowerCamelCase : int = TypeVar('''VAL''') @dataclass(frozen=a_ , slots=a_ ) class A_ (Generic[KEY, VAL] ): """simple docstring""" a__ = 42 a__ = 42 class A_ (_Item ): """simple docstring""" def __init__( self :List[Any] ) -> None: '''simple docstring''' super().__init__(lowerCAmelCase__ , lowerCAmelCase__ ) def __bool__( self :Optional[int] ) -> bool: '''simple docstring''' return False __lowerCamelCase : Dict = _DeletedItem() class A_ (MutableMapping[KEY, VAL] ): """simple docstring""" def __init__( self :Dict , lowerCAmelCase__ :int = 8 , lowerCAmelCase__ :float = 0.7_5 ) -> None: '''simple docstring''' snake_case_ : Any = initial_block_size snake_case_ : list[_Item | None] = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 snake_case_ : Tuple = capacity_factor snake_case_ : List[Any] = 0 def _A ( self :Tuple , lowerCAmelCase__ :KEY ) -> int: '''simple docstring''' return hash(lowerCAmelCase__ ) % len(self._buckets ) def _A ( self :Any , lowerCAmelCase__ :int ) -> int: '''simple docstring''' return (ind + 1) % len(self._buckets ) def _A ( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> bool: '''simple docstring''' snake_case_ : Optional[int] = self._buckets[ind] if not stored: snake_case_ : int = _Item(lowerCAmelCase__ , lowerCAmelCase__ ) self._len += 1 return True elif stored.key == key: snake_case_ : Optional[int] = _Item(lowerCAmelCase__ , lowerCAmelCase__ ) return True else: return False def _A ( self :int ) -> bool: '''simple docstring''' snake_case_ : Any = len(self._buckets ) * self._capacity_factor return len(self ) >= int(lowerCAmelCase__ ) def _A ( self :Any ) -> bool: '''simple docstring''' if len(self._buckets ) <= self._initial_block_size: return False snake_case_ : Optional[int] = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def _A ( self :Tuple , lowerCAmelCase__ :int ) -> None: '''simple docstring''' snake_case_ : Tuple = self._buckets snake_case_ : int = [None] * new_size snake_case_ : Any = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def _A ( self :Optional[int] ) -> None: '''simple docstring''' self._resize(len(self._buckets ) * 2 ) def _A ( self :str ) -> None: '''simple docstring''' self._resize(len(self._buckets ) // 2 ) def _A ( self :Optional[int] , lowerCAmelCase__ :KEY ) -> Iterator[int]: '''simple docstring''' snake_case_ : str = self._get_bucket_index(lowerCAmelCase__ ) for _ in range(len(self._buckets ) ): yield ind snake_case_ : List[Any] = self._get_next_ind(lowerCAmelCase__ ) def _A ( self :Union[str, Any] , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> None: '''simple docstring''' for ind in self._iterate_buckets(lowerCAmelCase__ ): if self._try_set(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): break def __setitem__( self :Optional[int] , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> None: '''simple docstring''' if self._is_full(): self._size_up() self._add_item(lowerCAmelCase__ , lowerCAmelCase__ ) def __delitem__( self :List[Any] , lowerCAmelCase__ :KEY ) -> None: '''simple docstring''' for ind in self._iterate_buckets(lowerCAmelCase__ ): snake_case_ : int = self._buckets[ind] if item is None: raise KeyError(lowerCAmelCase__ ) if item is _deleted: continue if item.key == key: snake_case_ : List[str] = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self :List[str] , lowerCAmelCase__ :KEY ) -> VAL: '''simple docstring''' for ind in self._iterate_buckets(lowerCAmelCase__ ): snake_case_ : Optional[Any] = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(lowerCAmelCase__ ) def __len__( self :Optional[Any] ) -> int: '''simple docstring''' return self._len def __iter__( self :List[Any] ) -> Iterator[KEY]: '''simple docstring''' yield from (item.key for item in self._buckets if item) def __repr__( self :Any ) -> str: '''simple docstring''' snake_case_ : Dict = " ,".join( F'''{item.key}: {item.val}''' for item in self._buckets if item ) return F'''HashMap({val_string})'''
653
0
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): if not numbers: return 0 if not isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) or not all( isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for number in numbers ): raise ValueError('''numbers must be an iterable of integers''' ) snake_case_ = snake_case_ = snake_case_ = numbers[0] for i in range(1 , len(SCREAMING_SNAKE_CASE__ ) ): # update the maximum and minimum subarray products snake_case_ = numbers[i] if number < 0: snake_case_, snake_case_ = min_till_now, max_till_now snake_case_ = max(SCREAMING_SNAKE_CASE__ , max_till_now * number ) snake_case_ = min(SCREAMING_SNAKE_CASE__ , min_till_now * number ) # update the maximum product found till now snake_case_ = max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return max_prod
39
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase : str = logging.get_logger(__name__) __lowerCamelCase : Tuple = { '''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''', } class A_ (a_ ): """simple docstring""" a__ = '''gpt_bigcode''' a__ = ['''past_key_values'''] a__ = { '''hidden_size''': '''n_embd''', '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self :List[Any] , lowerCAmelCase__ :Any=50_257 , lowerCAmelCase__ :Dict=1_024 , lowerCAmelCase__ :Optional[int]=768 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :int=12 , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :List[str]="gelu_pytorch_tanh" , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Any=1E-5 , lowerCAmelCase__ :Union[str, Any]=0.0_2 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :int=50_256 , lowerCAmelCase__ :List[str]=50_256 , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :int=True , **lowerCAmelCase__ :Union[str, Any] , ) -> Any: '''simple docstring''' snake_case_ : List[Any] = vocab_size snake_case_ : Any = n_positions snake_case_ : Any = n_embd snake_case_ : Optional[Any] = n_layer snake_case_ : List[Any] = n_head snake_case_ : Tuple = n_inner snake_case_ : str = activation_function snake_case_ : Union[str, Any] = resid_pdrop snake_case_ : Optional[Any] = embd_pdrop snake_case_ : Any = attn_pdrop snake_case_ : List[Any] = layer_norm_epsilon snake_case_ : Tuple = initializer_range snake_case_ : int = scale_attn_weights snake_case_ : Union[str, Any] = use_cache snake_case_ : Dict = attention_softmax_in_fpaa snake_case_ : Any = scale_attention_softmax_in_fpaa snake_case_ : List[str] = multi_query snake_case_ : List[str] = bos_token_id snake_case_ : Any = eos_token_id super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
653
0
from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef import datasets __UpperCAmelCase = '''\ @inproceedings{wang2019glue, title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding}, author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.}, note={In the Proceedings of ICLR.}, year={2019} } ''' __UpperCAmelCase = '''\ GLUE, the General Language Understanding Evaluation benchmark (https://gluebenchmark.com/) is a collection of resources for training, evaluating, and analyzing natural language understanding systems. ''' __UpperCAmelCase = ''' Compute GLUE evaluation metric associated to each GLUE dataset. Args: predictions: list of predictions to score. Each translation should be tokenized into a list of tokens. references: list of lists of references for each translation. Each reference should be tokenized into a list of tokens. Returns: depending on the GLUE subset, one or several of: "accuracy": Accuracy "f1": F1 score "pearson": Pearson Correlation "spearmanr": Spearman Correlation "matthews_correlation": Matthew Correlation Examples: >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"] >>> references = [0, 1] >>> predictions = [0, 1] >>> results = glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\' >>> references = [0, 1] >>> predictions = [0, 1] >>> results = glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0, \'f1\': 1.0} >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\') >>> references = [0., 1., 2., 3., 4., 5.] >>> predictions = [0., 1., 2., 3., 4., 5.] >>> results = glue_metric.compute(predictions=predictions, references=references) >>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)}) {\'pearson\': 1.0, \'spearmanr\': 1.0} >>> glue_metric = datasets.load_metric(\'glue\', \'cola\') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'matthews_correlation\': 1.0} ''' def UpperCamelCase ( snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ) -> Optional[int]: return float((preds == labels).mean() ) def UpperCamelCase ( snake_case__ : str , snake_case__ : int ) -> str: UpperCamelCase : str = simple_accuracy(snake_case__ , snake_case__ ) UpperCamelCase : List[Any] = float(fa_score(y_true=snake_case__ , y_pred=snake_case__ ) ) return { "accuracy": acc, "f1": fa, } def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : Union[str, Any] ) -> Union[str, Any]: UpperCamelCase : List[Any] = float(pearsonr(snake_case__ , snake_case__ )[0] ) UpperCamelCase : Optional[Any] = float(spearmanr(snake_case__ , snake_case__ )[0] ) return { "pearson": pearson_corr, "spearmanr": spearman_corr, } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): def snake_case_ ( self ) -> int: if self.config_name not in [ "sst2", "mnli", "mnli_mismatched", "mnli_matched", "cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans", ]: raise KeyError( 'You should supply a configuration name selected in ' '["sst2", "mnli", "mnli_mismatched", "mnli_matched", ' '"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' ) return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { 'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ), 'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ), } ), codebase_urls=[], reference_urls=[], format='numpy', ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> str: if self.config_name == "cola": return {"matthews_correlation": matthews_corrcoef(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )} elif self.config_name == "stsb": return pearson_and_spearman(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) elif self.config_name in ["mrpc", "qqp"]: return acc_and_fa(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]: return {"accuracy": simple_accuracy(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )} else: raise KeyError( 'You should supply a configuration name selected in ' '["sst2", "mnli", "mnli_mismatched", "mnli_matched", ' '"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
40
'''simple docstring''' import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) __lowerCamelCase : Union[str, Any] = logging.getLogger(__name__) def __UpperCAmelCase ( __magic_name__ )-> str: """simple docstring""" snake_case_ : Dict = git.Repo(search_parent_directories=__magic_name__ ) snake_case_ : Optional[int] = { "repo_id": str(__magic_name__ ), "repo_sha": str(repo.head.object.hexsha ), "repo_branch": str(repo.active_branch ), } with open(os.path.join(__magic_name__ ,"git_log.json" ) ,"w" ) as f: json.dump(__magic_name__ ,__magic_name__ ,indent=4 ) def __UpperCAmelCase ( __magic_name__ )-> Tuple: """simple docstring""" if params.n_gpu <= 0: snake_case_ : Any = 0 snake_case_ : Any = -1 snake_case_ : Tuple = True snake_case_ : List[str] = False return assert torch.cuda.is_available() logger.info("Initializing GPUs" ) if params.n_gpu > 1: assert params.local_rank != -1 snake_case_ : Optional[int] = int(os.environ["WORLD_SIZE"] ) snake_case_ : int = int(os.environ["N_GPU_NODE"] ) snake_case_ : Any = int(os.environ["RANK"] ) # number of nodes / node ID snake_case_ : Dict = params.world_size // params.n_gpu_per_node snake_case_ : Optional[int] = params.global_rank // params.n_gpu_per_node snake_case_ : Tuple = True assert params.n_nodes == int(os.environ["N_NODES"] ) assert params.node_id == int(os.environ["NODE_RANK"] ) # local job (single GPU) else: assert params.local_rank == -1 snake_case_ : Optional[int] = 1 snake_case_ : str = 0 snake_case_ : List[Any] = 0 snake_case_ : int = 0 snake_case_ : Dict = 1 snake_case_ : Optional[Any] = 1 snake_case_ : str = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode snake_case_ : str = params.node_id == 0 and params.local_rank == 0 snake_case_ : str = params.n_nodes > 1 # summary snake_case_ : str = F'''--- Global rank: {params.global_rank} - ''' logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes ) logger.info(PREFIX + "Node ID : %i" % params.node_id ) logger.info(PREFIX + "Local rank : %i" % params.local_rank ) logger.info(PREFIX + "World size : %i" % params.world_size ) logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node ) logger.info(PREFIX + "Master : %s" % str(params.is_master ) ) logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) ) logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) ) logger.info(PREFIX + "Hostname : %s" % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info("Initializing PyTorch distributed" ) torch.distributed.init_process_group( init_method="env://" ,backend="nccl" ,) def __UpperCAmelCase ( __magic_name__ )-> Dict: """simple docstring""" np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
653
0
'''simple docstring''' import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''b0''': efficientnet.EfficientNetBa, '''b1''': efficientnet.EfficientNetBa, '''b2''': efficientnet.EfficientNetBa, '''b3''': efficientnet.EfficientNetBa, '''b4''': efficientnet.EfficientNetBa, '''b5''': efficientnet.EfficientNetBa, '''b6''': efficientnet.EfficientNetBa, '''b7''': efficientnet.EfficientNetBa, } lowerCAmelCase__ = { '''b0''': { '''hidden_dim''': 1280, '''width_coef''': 1.0, '''depth_coef''': 1.0, '''image_size''': 224, '''dropout_rate''': 0.2, '''dw_padding''': [], }, '''b1''': { '''hidden_dim''': 1280, '''width_coef''': 1.0, '''depth_coef''': 1.1, '''image_size''': 240, '''dropout_rate''': 0.2, '''dw_padding''': [16], }, '''b2''': { '''hidden_dim''': 1408, '''width_coef''': 1.1, '''depth_coef''': 1.2, '''image_size''': 260, '''dropout_rate''': 0.3, '''dw_padding''': [5, 8, 16], }, '''b3''': { '''hidden_dim''': 1536, '''width_coef''': 1.2, '''depth_coef''': 1.4, '''image_size''': 300, '''dropout_rate''': 0.3, '''dw_padding''': [5, 18], }, '''b4''': { '''hidden_dim''': 1792, '''width_coef''': 1.4, '''depth_coef''': 1.8, '''image_size''': 380, '''dropout_rate''': 0.4, '''dw_padding''': [6], }, '''b5''': { '''hidden_dim''': 2048, '''width_coef''': 1.6, '''depth_coef''': 2.2, '''image_size''': 456, '''dropout_rate''': 0.4, '''dw_padding''': [13, 27], }, '''b6''': { '''hidden_dim''': 2304, '''width_coef''': 1.8, '''depth_coef''': 2.6, '''image_size''': 528, '''dropout_rate''': 0.5, '''dw_padding''': [31], }, '''b7''': { '''hidden_dim''': 2560, '''width_coef''': 2.0, '''depth_coef''': 3.1, '''image_size''': 600, '''dropout_rate''': 0.5, '''dw_padding''': [18], }, } def _A ( A__ ): """simple docstring""" __lowercase = EfficientNetConfig() __lowercase = CONFIG_MAP[model_name]['''hidden_dim'''] __lowercase = CONFIG_MAP[model_name]['''width_coef'''] __lowercase = CONFIG_MAP[model_name]['''depth_coef'''] __lowercase = CONFIG_MAP[model_name]['''image_size'''] __lowercase = CONFIG_MAP[model_name]['''dropout_rate'''] __lowercase = CONFIG_MAP[model_name]['''dw_padding'''] __lowercase = '''huggingface/label-files''' __lowercase = '''imagenet-1k-id2label.json''' __lowercase = 1000 __lowercase = json.load(open(hf_hub_download(A__ , A__ , repo_type='''dataset''' ) , '''r''' ) ) __lowercase = {int(A__ ): v for k, v in idalabel.items()} __lowercase = idalabel __lowercase = {v: k for k, v in idalabel.items()} return config def _A ( ): """simple docstring""" __lowercase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __lowercase = Image.open(requests.get(A__ , stream=A__ ).raw ) return im def _A ( A__ ): """simple docstring""" __lowercase = CONFIG_MAP[model_name]['''image_size'''] __lowercase = EfficientNetImageProcessor( size={'''height''': size, '''width''': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=A__ , ) return preprocessor def _A ( A__ ): """simple docstring""" __lowercase = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )] __lowercase = sorted(set(A__ ) ) __lowercase = len(A__ ) __lowercase = {b: str(A__ ) for b, i in zip(A__ , range(A__ ) )} __lowercase = [] rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') ) rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') ) rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') ) rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') ) rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') ) for b in block_names: __lowercase = block_name_mapping[b] rename_keys.append((F"block{b}_expand_conv/kernel:0", F"encoder.blocks.{hf_b}.expansion.expand_conv.weight") ) rename_keys.append((F"block{b}_expand_bn/gamma:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.weight") ) rename_keys.append((F"block{b}_expand_bn/beta:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.bias") ) rename_keys.append( (F"block{b}_expand_bn/moving_mean:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_mean") ) rename_keys.append( (F"block{b}_expand_bn/moving_variance:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_var") ) rename_keys.append( (F"block{b}_dwconv/depthwise_kernel:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight") ) rename_keys.append((F"block{b}_bn/gamma:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight") ) rename_keys.append((F"block{b}_bn/beta:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias") ) rename_keys.append( (F"block{b}_bn/moving_mean:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean") ) rename_keys.append( (F"block{b}_bn/moving_variance:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var") ) rename_keys.append((F"block{b}_se_reduce/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.weight") ) rename_keys.append((F"block{b}_se_reduce/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.bias") ) rename_keys.append((F"block{b}_se_expand/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.weight") ) rename_keys.append((F"block{b}_se_expand/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.bias") ) rename_keys.append( (F"block{b}_project_conv/kernel:0", F"encoder.blocks.{hf_b}.projection.project_conv.weight") ) rename_keys.append((F"block{b}_project_bn/gamma:0", F"encoder.blocks.{hf_b}.projection.project_bn.weight") ) rename_keys.append((F"block{b}_project_bn/beta:0", F"encoder.blocks.{hf_b}.projection.project_bn.bias") ) rename_keys.append( (F"block{b}_project_bn/moving_mean:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_mean") ) rename_keys.append( (F"block{b}_project_bn/moving_variance:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_var") ) rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') ) rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') ) rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') ) rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') ) rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') ) __lowercase = {} for item in rename_keys: if item[0] in original_param_names: __lowercase = '''efficientnet.''' + item[1] __lowercase = '''classifier.weight''' __lowercase = '''classifier.bias''' return key_mapping def _A ( A__ , A__ , A__ ): """simple docstring""" for key, value in tf_params.items(): if "normalization" in key: continue __lowercase = key_mapping[key] if "_conv" in key and "kernel" in key: __lowercase = torch.from_numpy(A__ ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: __lowercase = torch.from_numpy(A__ ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: __lowercase = torch.from_numpy(np.transpose(A__ ) ) else: __lowercase = torch.from_numpy(A__ ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(A__ ) @torch.no_grad() def _A ( A__ , A__ , A__ , A__ ): """simple docstring""" __lowercase = model_classes[model_name]( include_top=A__ , weights='''imagenet''' , input_tensor=A__ , input_shape=A__ , pooling=A__ , classes=1000 , classifier_activation='''softmax''' , ) __lowercase = original_model.trainable_variables __lowercase = original_model.non_trainable_variables __lowercase = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: __lowercase = param.numpy() __lowercase = list(tf_params.keys() ) # Load HuggingFace model __lowercase = get_efficientnet_config(A__ ) __lowercase = EfficientNetForImageClassification(A__ ).eval() __lowercase = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print('''Converting parameters...''' ) __lowercase = rename_keys(A__ ) replace_params(A__ , A__ , A__ ) # Initialize preprocessor and preprocess input image __lowercase = convert_image_processor(A__ ) __lowercase = preprocessor(images=prepare_img() , return_tensors='''pt''' ) # HF model inference hf_model.eval() with torch.no_grad(): __lowercase = hf_model(**A__ ) __lowercase = outputs.logits.detach().numpy() # Original model inference __lowercase = False __lowercase = CONFIG_MAP[model_name]['''image_size'''] __lowercase = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) __lowercase = image.img_to_array(A__ ) __lowercase = np.expand_dims(A__ , axis=0 ) __lowercase = original_model.predict(A__ ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(A__ , A__ , atol=1e-3 ), "The predicted logits are not the same." print('''Model outputs match!''' ) if save_model: # Create folder to save model if not os.path.isdir(A__ ): os.mkdir(A__ ) # Save converted model and image processor hf_model.save_pretrained(A__ ) preprocessor.save_pretrained(A__ ) if push_to_hub: # Push model and image processor to hub print(F"Pushing converted {model_name} to the hub..." ) __lowercase = F"efficientnet-{model_name}" preprocessor.push_to_hub(A__ ) hf_model.push_to_hub(A__ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''b0''', type=str, help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''hf_model''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''') parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''') lowerCAmelCase__ = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
41
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import YolosImageProcessor class A_ (unittest.TestCase ): """simple docstring""" def __init__( self :Any , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict=7 , lowerCAmelCase__ :Union[str, Any]=3 , lowerCAmelCase__ :List[str]=30 , lowerCAmelCase__ :List[str]=400 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :str=True , lowerCAmelCase__ :Optional[int]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :Optional[int]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :str=True , lowerCAmelCase__ :int=1 / 255 , lowerCAmelCase__ :int=True , ) -> str: '''simple docstring''' snake_case_ : List[Any] = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333} snake_case_ : Dict = parent snake_case_ : Union[str, Any] = batch_size snake_case_ : Optional[Any] = num_channels snake_case_ : str = min_resolution snake_case_ : Dict = max_resolution snake_case_ : Optional[Any] = do_resize snake_case_ : str = size snake_case_ : Optional[int] = do_normalize snake_case_ : Dict = image_mean snake_case_ : Optional[int] = image_std snake_case_ : List[str] = do_rescale snake_case_ : Dict = rescale_factor snake_case_ : str = do_pad def _A ( self :List[Any] ) -> Dict: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def _A ( self :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str]=False ) -> str: '''simple docstring''' if not batched: snake_case_ : List[str] = image_inputs[0] if isinstance(lowerCAmelCase__ , Image.Image ): snake_case_, snake_case_ : int = image.size else: snake_case_, snake_case_ : Any = image.shape[1], image.shape[2] if w < h: snake_case_ : int = int(self.size["shortest_edge"] * h / w ) snake_case_ : List[Any] = self.size["shortest_edge"] elif w > h: snake_case_ : Optional[int] = self.size["shortest_edge"] snake_case_ : str = int(self.size["shortest_edge"] * w / h ) else: snake_case_ : Tuple = self.size["shortest_edge"] snake_case_ : Dict = self.size["shortest_edge"] else: snake_case_ : List[str] = [] for image in image_inputs: snake_case_, snake_case_ : Any = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) snake_case_ : str = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0] snake_case_ : int = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class A_ (a_ , unittest.TestCase ): """simple docstring""" a__ = YolosImageProcessor if is_vision_available() else None def _A ( self :Optional[Any] ) -> str: '''simple docstring''' snake_case_ : int = YolosImageProcessingTester(self ) @property def _A ( self :List[str] ) -> Any: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _A ( self :List[str] ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase__ , "image_mean" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "image_std" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) ) def _A ( self :List[Any] ) -> Any: '''simple docstring''' snake_case_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} ) self.assertEqual(image_processor.do_pad , lowerCAmelCase__ ) snake_case_ : Optional[int] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase__ ) self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} ) self.assertEqual(image_processor.do_pad , lowerCAmelCase__ ) def _A ( self :List[str] ) -> int: '''simple docstring''' pass def _A ( self :Optional[Any] ) -> Optional[Any]: '''simple docstring''' snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image ) # Test not batched input snake_case_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case_, snake_case_ : int = self.image_processor_tester.get_expected_values(lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ ) snake_case_ : Any = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _A ( self :Dict ) -> Dict: '''simple docstring''' snake_case_ : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , np.ndarray ) # Test not batched input snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case_, snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case_ : Tuple = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values snake_case_, snake_case_ : Dict = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _A ( self :Tuple ) -> Tuple: '''simple docstring''' snake_case_ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , torch.Tensor ) # Test not batched input snake_case_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case_ : List[Any] = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _A ( self :Tuple ) -> Dict: '''simple docstring''' snake_case_ : str = self.image_processing_class(**self.image_processor_dict ) snake_case_ : List[Any] = self.image_processing_class(do_resize=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ , do_rescale=lowerCAmelCase__ ) # create random PyTorch tensors snake_case_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , torch.Tensor ) # Test whether the method "pad" and calling the image processor return the same tensors snake_case_ : Tuple = image_processing_a.pad(lowerCAmelCase__ , return_tensors="pt" ) snake_case_ : Union[str, Any] = image_processing_a(lowerCAmelCase__ , return_tensors="pt" ) self.assertTrue( torch.allclose(encoded_images_with_method["pixel_values"] , encoded_images["pixel_values"] , atol=1E-4 ) ) @slow def _A ( self :str ) -> Any: '''simple docstring''' snake_case_ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: snake_case_ : int = json.loads(f.read() ) snake_case_ : Optional[int] = {"image_id": 39_769, "annotations": target} # encode them snake_case_ : Tuple = YolosImageProcessor.from_pretrained("hustvl/yolos-small" ) snake_case_ : Dict = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors="pt" ) # verify pixel values snake_case_ : Optional[int] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase__ ) snake_case_ : str = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) ) # verify area snake_case_ : Dict = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase__ ) ) # verify boxes snake_case_ : Optional[int] = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase__ ) snake_case_ : Any = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase__ , atol=1E-3 ) ) # verify image_id snake_case_ : Dict = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase__ ) ) # verify is_crowd snake_case_ : int = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase__ ) ) # verify class_labels snake_case_ : List[str] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase__ ) ) # verify orig_size snake_case_ : Any = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase__ ) ) # verify size snake_case_ : List[Any] = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase__ ) ) @slow def _A ( self :Dict ) -> int: '''simple docstring''' snake_case_ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: snake_case_ : Optional[int] = json.loads(f.read() ) snake_case_ : Tuple = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target} snake_case_ : Any = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them snake_case_ : int = YolosImageProcessor(format="coco_panoptic" ) snake_case_ : Union[str, Any] = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors="pt" ) # verify pixel values snake_case_ : Optional[int] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase__ ) snake_case_ : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) ) # verify area snake_case_ : int = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase__ ) ) # verify boxes snake_case_ : Optional[int] = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase__ ) snake_case_ : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase__ , atol=1E-3 ) ) # verify image_id snake_case_ : List[str] = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase__ ) ) # verify is_crowd snake_case_ : Dict = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase__ ) ) # verify class_labels snake_case_ : str = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase__ ) ) # verify masks snake_case_ : Any = 822_873 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCAmelCase__ ) # verify orig_size snake_case_ : int = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase__ ) ) # verify size snake_case_ : Union[str, Any] = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase__ ) )
653
0
'''simple docstring''' import logging import os from .state import PartialState class UpperCAmelCase ( logging.LoggerAdapter ): '''simple docstring''' @staticmethod def UpperCamelCase( SCREAMING_SNAKE_CASE_ ) -> List[Any]: '''simple docstring''' lowerCamelCase_ = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: '''simple docstring''' if PartialState._shared_state == {}: raise RuntimeError( 'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' ) lowerCamelCase_ = kwargs.pop('main_process_only' , SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = kwargs.pop('in_order' , SCREAMING_SNAKE_CASE_ ) if self.isEnabledFor(SCREAMING_SNAKE_CASE_ ): if self._should_log(SCREAMING_SNAKE_CASE_ ): lowerCamelCase_ ,lowerCamelCase_ = self.process(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.logger.log(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) elif in_order: lowerCamelCase_ = PartialState() for i in range(state.num_processes ): if i == state.process_index: lowerCamelCase_ ,lowerCamelCase_ = self.process(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.logger.log(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) state.wait_for_everyone() def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = None ) -> Optional[int]: if log_level is None: lowerCamelCase_ = os.environ.get('ACCELERATE_LOG_LEVEL' ,__UpperCamelCase ) lowerCamelCase_ = logging.getLogger(__UpperCamelCase ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(__UpperCamelCase ,{} )
42
'''simple docstring''' def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str: """simple docstring""" if not isinstance(__magic_name__ ,__magic_name__ ): raise ValueError("iterations must be defined as integers" ) if not isinstance(__magic_name__ ,__magic_name__ ) or not number >= 1: raise ValueError( "starting number must be\n and integer and be more than 0" ) if not iterations >= 1: raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" ) snake_case_ : Dict = "" while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(__magic_name__ ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
653
0
import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) lowercase__ = FlaxAutoModelForSeqaSeqLM.from_config(config=SCREAMING_SNAKE_CASE ) lowercase__ = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE ) lowercase__ = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp'''] if config.model_type == "t5": lowercase__ = '''SelfAttention''' if config.model_type == "longt5" and config.encoder_attention_type == "local": lowercase__ = '''LocalSelfAttention''' elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowercase__ = '''TransientGlobalSelfAttention''' else: raise ValueError( '''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`''' ''' attribute with a value from [\'local\', \'transient-global].''' ) # Encoder for layer_index in range(config.num_layers ): lowercase__ = f'layers_{str(SCREAMING_SNAKE_CASE )}' # Self-Attention lowercase__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel'''] lowercase__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel'''] lowercase__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel'''] lowercase__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel'''] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowercase__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale'''] # Layer Normalization lowercase__ = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale'''] if split_mlp_wi: lowercase__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel'''] lowercase__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel'''] else: lowercase__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel'''] lowercase__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel'''] # Layer Normalization lowercase__ = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale'''] # Assigning lowercase__ = flax_model.params['''encoder''']['''block'''][str(SCREAMING_SNAKE_CASE )]['''layer'''] lowercase__ = tax_attention_key lowercase__ = tax_attention_out lowercase__ = tax_attention_query lowercase__ = tax_attention_value lowercase__ = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowercase__ = tax_global_layer_norm if split_mlp_wi: lowercase__ = tax_mlp_wi_a lowercase__ = tax_mlp_wi_a else: lowercase__ = tax_mlp_wi lowercase__ = tax_mlp_wo lowercase__ = tax_mlp_layer_norm lowercase__ = flax_model_encoder_layer_block # Only for layer 0: lowercase__ = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T lowercase__ = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowercase__ = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T lowercase__ = tax_encoder_global_rel_embedding # Assigning lowercase__ = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale'''] lowercase__ = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): lowercase__ = f'layers_{str(SCREAMING_SNAKE_CASE )}' # Self-Attention lowercase__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel'''] lowercase__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel'''] lowercase__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel'''] lowercase__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel'''] # Layer Normalization lowercase__ = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][ '''scale''' ] # Encoder-Decoder-Attention lowercase__ = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention'''] lowercase__ = tax_enc_dec_attention_module['''key''']['''kernel'''] lowercase__ = tax_enc_dec_attention_module['''out''']['''kernel'''] lowercase__ = tax_enc_dec_attention_module['''query''']['''kernel'''] lowercase__ = tax_enc_dec_attention_module['''value''']['''kernel'''] # Layer Normalization lowercase__ = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale'''] # MLP if split_mlp_wi: lowercase__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel'''] lowercase__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel'''] else: lowercase__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel'''] lowercase__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel'''] # Layer Normalization lowercase__ = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale'''] # Assigning lowercase__ = flax_model.params['''decoder''']['''block'''][str(SCREAMING_SNAKE_CASE )]['''layer'''] lowercase__ = tax_attention_key lowercase__ = tax_attention_out lowercase__ = tax_attention_query lowercase__ = tax_attention_value lowercase__ = tax_pre_attention_layer_norm lowercase__ = tax_enc_dec_attention_key lowercase__ = tax_enc_dec_attention_out lowercase__ = tax_enc_dec_attention_query lowercase__ = tax_enc_dec_attention_value lowercase__ = tax_cross_layer_norm if split_mlp_wi: lowercase__ = tax_mlp_wi_a lowercase__ = tax_mlp_wi_a else: lowercase__ = tax_mlp_wi lowercase__ = tax_mlp_wo lowercase__ = txa_mlp_layer_norm lowercase__ = flax_model_decoder_layer_block # Decoder Normalization lowercase__ = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale'''] lowercase__ = txa_decoder_norm # Only for layer 0: lowercase__ = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T lowercase__ = tax_decoder_rel_embedding # Token Embeddings lowercase__ = tax_model['''target''']['''token_embedder''']['''embedding'''] lowercase__ = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: lowercase__ = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel'''] flax_model.save_pretrained(SCREAMING_SNAKE_CASE ) print('''T5X Model was sucessfully converted!''' ) if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.' ) parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.') parser.add_argument( '--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.' ) lowerCAmelCase = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
43
'''simple docstring''' import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __lowerCamelCase : Tuple = 16 __lowerCamelCase : Optional[int] = 32 def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = 16 )-> int: """simple docstring""" snake_case_ : Optional[int] = AutoTokenizer.from_pretrained("bert-base-cased" ) snake_case_ : str = load_dataset("glue" ,"mrpc" ) def tokenize_function(__magic_name__ ): # max_length=None => use the model max length (it's actually the default) snake_case_ : Dict = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=__magic_name__ ,max_length=__magic_name__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): snake_case_ : Any = datasets.map( __magic_name__ ,batched=__magic_name__ ,remove_columns=["idx", "sentence1", "sentence2"] ,) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library snake_case_ : List[Any] = tokenized_datasets.rename_column("label" ,"labels" ) def collate_fn(__magic_name__ ): # On TPU it's best to pad everything to the same length or training will be very slow. snake_case_ : int = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": snake_case_ : Tuple = 16 elif accelerator.mixed_precision != "no": snake_case_ : str = 8 else: snake_case_ : Optional[Any] = None return tokenizer.pad( __magic_name__ ,padding="longest" ,max_length=__magic_name__ ,pad_to_multiple_of=__magic_name__ ,return_tensors="pt" ,) # Instantiate dataloaders. snake_case_ : str = DataLoader( tokenized_datasets["train"] ,shuffle=__magic_name__ ,collate_fn=__magic_name__ ,batch_size=__magic_name__ ) snake_case_ : Optional[Any] = DataLoader( tokenized_datasets["validation"] ,shuffle=__magic_name__ ,collate_fn=__magic_name__ ,batch_size=__magic_name__ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders __lowerCamelCase : Optional[Any] = mocked_dataloaders # noqa: F811 def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Dict: """simple docstring""" if os.environ.get("TESTING_MOCKED_DATALOADERS" ,__magic_name__ ) == "1": snake_case_ : List[str] = 2 # Initialize accelerator snake_case_ : Union[str, Any] = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs snake_case_ : List[str] = config["lr"] snake_case_ : Dict = int(config["num_epochs"] ) snake_case_ : Dict = int(config["seed"] ) snake_case_ : Optional[int] = int(config["batch_size"] ) snake_case_ : Dict = evaluate.load("glue" ,"mrpc" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=__magic_name__ ) def inner_training_loop(__magic_name__ ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(__magic_name__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) snake_case_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" ,return_dict=__magic_name__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). snake_case_ : Optional[int] = model.to(accelerator.device ) # Instantiate optimizer snake_case_ : List[Any] = AdamW(params=model.parameters() ,lr=__magic_name__ ) snake_case_, snake_case_ : int = get_dataloaders(__magic_name__ ,__magic_name__ ) # Instantiate scheduler snake_case_ : Tuple = get_linear_schedule_with_warmup( optimizer=__magic_name__ ,num_warmup_steps=100 ,num_training_steps=(len(__magic_name__ ) * num_epochs) ,) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : Tuple = accelerator.prepare( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ) # Now we train the model for epoch in range(__magic_name__ ): model.train() for step, batch in enumerate(__magic_name__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) snake_case_ : int = model(**__magic_name__ ) snake_case_ : Any = outputs.loss accelerator.backward(__magic_name__ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__magic_name__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): snake_case_ : Union[str, Any] = model(**__magic_name__ ) snake_case_ : List[str] = outputs.logits.argmax(dim=-1 ) snake_case_, snake_case_ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=__magic_name__ ,references=__magic_name__ ,) snake_case_ : Tuple = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' ,__magic_name__ ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def __UpperCAmelCase ( )-> List[str]: """simple docstring""" snake_case_ : List[Any] = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" ,type=__magic_name__ ,default=__magic_name__ ,choices=["no", "fp16", "bf16", "fp8"] ,help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." ,) parser.add_argument("--cpu" ,action="store_true" ,help="If passed, will train on the CPU." ) snake_case_ : str = parser.parse_args() snake_case_ : Optional[int] = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(__magic_name__ ,__magic_name__ ) if __name__ == "__main__": main()
653
0
'''simple docstring''' import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str]=False ): """simple docstring""" try: _lowerCamelCase : Tuple = os.environ[key] except KeyError: # KEY isn't set, default to `default`. _lowerCamelCase : str = default else: # KEY is set, convert it to True or False. try: _lowerCamelCase : Optional[int] = strtobool(_lowerCAmelCase ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F'If set, {key} must be yes or no.' ) return _value UpperCAmelCase_ : Any = parse_flag_from_env('RUN_SLOW', default=False) def A_ ( _lowerCAmelCase : List[Any] ): """simple docstring""" return unittest.skip("Test was skipped" )(_lowerCAmelCase ) def A_ ( _lowerCAmelCase : str ): """simple docstring""" return unittest.skipUnless(_run_slow_tests , "test is slow" )(_lowerCAmelCase ) def A_ ( _lowerCAmelCase : Union[str, Any] ): """simple docstring""" return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU" )(_lowerCAmelCase ) def A_ ( _lowerCAmelCase : Optional[Any] ): """simple docstring""" return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU" )(_lowerCAmelCase ) def A_ ( _lowerCAmelCase : Optional[int] ): """simple docstring""" return unittest.skipUnless(is_xpu_available() , "test requires a XPU" )(_lowerCAmelCase ) def A_ ( _lowerCAmelCase : Tuple ): """simple docstring""" return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`" )(_lowerCAmelCase ) def A_ ( _lowerCAmelCase : Tuple ): """simple docstring""" return unittest.skipUnless( is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite" )(_lowerCAmelCase ) def A_ ( _lowerCAmelCase : Tuple ): """simple docstring""" return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library" )(_lowerCAmelCase ) def A_ ( _lowerCAmelCase : Tuple ): """simple docstring""" return unittest.skipUnless(is_tpu_available() , "test requires TPU" )(_lowerCAmelCase ) def A_ ( _lowerCAmelCase : Optional[Any] ): """simple docstring""" return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU" )(_lowerCAmelCase ) def A_ ( _lowerCAmelCase : Tuple ): """simple docstring""" return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU" )(_lowerCAmelCase ) def A_ ( _lowerCAmelCase : Optional[Any] ): """simple docstring""" return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs" )(_lowerCAmelCase ) def A_ ( _lowerCAmelCase : Optional[int] ): """simple docstring""" return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs" )(_lowerCAmelCase ) def A_ ( _lowerCAmelCase : Optional[Any] ): """simple docstring""" return unittest.skipUnless(is_safetensors_available() , "test requires safetensors" )(_lowerCAmelCase ) def A_ ( _lowerCAmelCase : Optional[int] ): """simple docstring""" return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed" )(_lowerCAmelCase ) def A_ ( _lowerCAmelCase : Optional[int] ): """simple docstring""" return unittest.skipUnless(is_torch_version(">=" , "1.12.0" ) , "test requires torch version >= 1.12.0" )(_lowerCAmelCase ) def A_ ( _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Optional[int]=None ): """simple docstring""" if test_case is None: return partial(_lowerCAmelCase , version=_lowerCAmelCase ) return unittest.skipUnless(is_torch_version(">=" , _lowerCAmelCase ) , F'test requires torch version >= {version}' )(_lowerCAmelCase ) def A_ ( _lowerCAmelCase : List[str] ): """simple docstring""" return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard" )(_lowerCAmelCase ) def A_ ( _lowerCAmelCase : Optional[Any] ): """simple docstring""" return unittest.skipUnless(is_wandb_available() , "test requires wandb" )(_lowerCAmelCase ) def A_ ( _lowerCAmelCase : Optional[int] ): """simple docstring""" return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml" )(_lowerCAmelCase ) UpperCAmelCase_ : Optional[Any] = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def A_ ( _lowerCAmelCase : Dict ): """simple docstring""" return unittest.skipUnless( _atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(_lowerCAmelCase ) class UpperCAmelCase__ ( unittest.TestCase ): lowerCAmelCase_ = True @classmethod def lowerCamelCase_ ( cls : Any ): _lowerCamelCase : List[str] = tempfile.mkdtemp() @classmethod def lowerCamelCase_ ( cls : Tuple ): if os.path.exists(cls.tmpdir ): shutil.rmtree(cls.tmpdir ) def lowerCamelCase_ ( self : Optional[int] ): if self.clear_on_setup: for path in Path(self.tmpdir ).glob("**/*" ): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(__A ) class UpperCAmelCase__ ( unittest.TestCase ): def lowerCamelCase_ ( self : Any ): super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class UpperCAmelCase__ ( unittest.TestCase ): def lowerCamelCase_ ( self : int,__A : Union[mock.Mock, List[mock.Mock]] ): _lowerCamelCase : Tuple = mocks if isinstance(__A,(tuple, list) ) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop ) def A_ ( _lowerCAmelCase : Any ): """simple docstring""" _lowerCamelCase : Tuple = AcceleratorState() _lowerCamelCase : str = tensor[None].clone().to(state.device ) _lowerCamelCase : List[Any] = gather(_lowerCAmelCase ).cpu() _lowerCamelCase : Optional[Any] = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] , _lowerCAmelCase ): return False return True class UpperCAmelCase__ : def __init__( self : int,__A : Any,__A : List[Any],__A : str ): _lowerCamelCase : Tuple = returncode _lowerCamelCase : List[str] = stdout _lowerCamelCase : Any = stderr async def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : int ): """simple docstring""" while True: _lowerCamelCase : Optional[Any] = await stream.readline() if line: callback(_lowerCAmelCase ) else: break async def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : Optional[int]=False ): """simple docstring""" if echo: print("\nRunning: " , " ".join(_lowerCAmelCase ) ) _lowerCamelCase : List[str] = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_lowerCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCAmelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) _lowerCamelCase : List[Any] = [] _lowerCamelCase : List[str] = [] def tee(_lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : int="" ): _lowerCamelCase : Optional[Any] = line.decode("utf-8" ).rstrip() sink.append(_lowerCAmelCase ) if not quiet: print(_lowerCAmelCase , _lowerCAmelCase , file=_lowerCAmelCase ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda _lowerCAmelCase : tee(_lowerCAmelCase , _lowerCAmelCase , sys.stdout , label="stdout:" ) ) ), asyncio.create_task(_read_stream(p.stderr , lambda _lowerCAmelCase : tee(_lowerCAmelCase , _lowerCAmelCase , sys.stderr , label="stderr:" ) ) ), ] , timeout=_lowerCAmelCase , ) return _RunOutput(await p.wait() , _lowerCAmelCase , _lowerCAmelCase ) def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : int=None , _lowerCAmelCase : int=None , _lowerCAmelCase : List[Any]=180 , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Optional[Any]=True ): """simple docstring""" _lowerCamelCase : Dict = asyncio.get_event_loop() _lowerCamelCase : List[Any] = loop.run_until_complete( _stream_subprocess(_lowerCAmelCase , env=_lowerCAmelCase , stdin=_lowerCAmelCase , timeout=_lowerCAmelCase , quiet=_lowerCAmelCase , echo=_lowerCAmelCase ) ) _lowerCamelCase : List[str] = " ".join(_lowerCAmelCase ) if result.returncode > 0: _lowerCamelCase : int = "\n".join(result.stderr ) raise RuntimeError( F'\'{cmd_str}\' failed with returncode {result.returncode}\n\n' F'The combined stderr from workers follows:\n{stderr}' ) return result class UpperCAmelCase__ ( A ): pass def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple=False ): """simple docstring""" try: _lowerCamelCase : Optional[Any] = subprocess.check_output(_lowerCAmelCase , stderr=subprocess.STDOUT ) if return_stdout: if hasattr(_lowerCAmelCase , "decode" ): _lowerCamelCase : List[str] = output.decode("utf-8" ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( F'Command `{" ".join(_lowerCAmelCase )}` failed with the following error:\n\n{e.output.decode()}' ) from e
44
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class A_ (a_ ): """simple docstring""" a__ = '''facebook/bart-large-mnli''' a__ = ( '''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which ''' '''should be the text to classify, and `labels`, which should be the list of labels to use for classification. ''' '''It returns the most likely label in the list of provided `labels` for the input text.''' ) a__ = '''text_classifier''' a__ = AutoTokenizer a__ = AutoModelForSequenceClassification a__ = ['''text''', ['''text''']] a__ = ['''text'''] def _A ( self :List[str] ) -> Union[str, Any]: '''simple docstring''' super().setup() snake_case_ : Optional[int] = self.model.config snake_case_ : Any = -1 for idx, label in config.idalabel.items(): if label.lower().startswith("entail" ): snake_case_ : Union[str, Any] = int(lowerCAmelCase__ ) if self.entailment_id == -1: raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." ) def _A ( self :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :Tuple ) -> int: '''simple docstring''' snake_case_ : Tuple = labels return self.pre_processor( [text] * len(lowerCAmelCase__ ) , [F'''This example is {label}''' for label in labels] , return_tensors="pt" , padding="max_length" , ) def _A ( self :Any , lowerCAmelCase__ :str ) -> List[str]: '''simple docstring''' snake_case_ : Optional[int] = outputs.logits snake_case_ : Tuple = torch.argmax(logits[:, 2] ).item() return self._labels[label_id]
653
0
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = torch.device("cpu") def A ( ) -> Dict: UpperCamelCase__ :Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCamelCase__ :Union[str, Any] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ) return im def A ( lowercase__ : Tuple ) -> Any: if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.17_03E00, 2.11_07E00, -2.08_11E00, 8.86_85E-01, 2.43_60E-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.96_36E-01, 2.34_78E-01, -1.69_63E00, -1.73_81E00, -8.63_37E-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.27_68E-01, -4.74_29E-01, -1.08_97E00, -1.02_48E00, 3.55_23E-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.53_30E-01, 2.42_11E-01, -6.01_85E-01, -8.27_89E-01, -6.04_46E-02] ) def A ( lowercase__ : Dict , lowercase__ : int , lowercase__ : Dict ) -> List[Any]: UpperCamelCase__ :Tuple = dct.pop(lowercase__ ) UpperCamelCase__ :Any = val def A ( lowercase__ : Union[str, Any] ) -> int: UpperCamelCase__ :int = [] for k in state_dict.keys(): UpperCamelCase__ :Dict = k if ".pwconv" in k: UpperCamelCase__ :Any = k_new.replace(""".pwconv""" , """.point_wise_conv""" ) if ".dwconv" in k: UpperCamelCase__ :str = k_new.replace(""".dwconv""" , """.depth_wise_conv""" ) if ".Proj." in k: UpperCamelCase__ :Union[str, Any] = k_new.replace(""".Proj.""" , """.proj.""" ) if "patch_embed" in k_new: UpperCamelCase__ :int = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" ) if "network" in k_new: UpperCamelCase__ :Union[str, Any] = k_new.split(""".""" ) if ls[2].isdigit(): UpperCamelCase__ :List[str] = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] ) else: UpperCamelCase__ :List[Any] = k_new.replace("""network""" , """swiftformer.encoder.network""" ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def A ( lowercase__ : str , lowercase__ : Tuple , lowercase__ : List[Any] ) -> List[Any]: UpperCamelCase__ :str = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size UpperCamelCase__ :Any = 1000 UpperCamelCase__ :int = """huggingface/label-files""" UpperCamelCase__ :Dict = """imagenet-1k-id2label.json""" UpperCamelCase__ :Dict = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="""dataset""" ) , """r""" ) ) UpperCamelCase__ :str = {int(lowercase__ ): v for k, v in idalabel.items()} UpperCamelCase__ :Optional[int] = idalabel UpperCamelCase__ :str = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": UpperCamelCase__ :Optional[int] = [3, 3, 6, 4] UpperCamelCase__ :Dict = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": UpperCamelCase__ :Tuple = [3, 3, 9, 6] UpperCamelCase__ :Tuple = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": UpperCamelCase__ :int = [4, 3, 10, 5] UpperCamelCase__ :Dict = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": UpperCamelCase__ :Any = [4, 4, 12, 6] UpperCamelCase__ :Union[str, Any] = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("""https""" ): UpperCamelCase__ :Any = torch.hub.load_state_dict_from_url(lowercase__ , map_location="""cpu""" , check_hash=lowercase__ ) else: UpperCamelCase__ :str = torch.load(lowercase__ , map_location="""cpu""" ) UpperCamelCase__ :Optional[Any] = checkpoint UpperCamelCase__ :Optional[int] = create_rename_keys(lowercase__ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(lowercase__ , lowercase__ , lowercase__ ) # load HuggingFace model UpperCamelCase__ :Optional[int] = SwiftFormerForImageClassification(lowercase__ ).eval() hf_model.load_state_dict(lowercase__ ) # prepare test inputs UpperCamelCase__ :Dict = prepare_img() UpperCamelCase__ :int = ViTImageProcessor.from_pretrained("""preprocessor_config""" ) UpperCamelCase__ :Union[str, Any] = processor(images=lowercase__ , return_tensors="""pt""" ) # compare outputs from both models UpperCamelCase__ :int = get_expected_output(lowercase__ ) UpperCamelCase__ :Dict = hf_model(inputs["""pixel_values"""] ).logits assert hf_logits.shape == torch.Size([1, 1000] ) assert torch.allclose(hf_logits[0, 0:5] , lowercase__ , atol=1E-3 ) Path(lowercase__ ).mkdir(exist_ok=lowercase__ ) print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(lowercase__ ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--swiftformer_name", default="swiftformer_xs", choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"], type=str, help="Name of the SwiftFormer model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default="./converted_outputs/", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.") UpperCamelCase = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
45
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) __lowerCamelCase : Any = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Any = ['''ViTFeatureExtractor'''] __lowerCamelCase : Any = ['''ViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Optional[Any] = [ '''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ViTForImageClassification''', '''ViTForMaskedImageModeling''', '''ViTModel''', '''ViTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Union[str, Any] = [ '''TFViTForImageClassification''', '''TFViTModel''', '''TFViTPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Tuple = [ '''FlaxViTForImageClassification''', '''FlaxViTModel''', '''FlaxViTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys __lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
653
0
"""simple docstring""" from ..utils import DummyObject, requires_backends class A_ ( metaclass=_a ): lowerCAmelCase__ = ['transformers', 'torch', 'note_seq'] def __init__( self: Union[str, Any] ,*__lowerCAmelCase: List[str] ,**__lowerCAmelCase: List[str] ): '''simple docstring''' requires_backends(self ,["transformers", "torch", "note_seq"] ) @classmethod def _lowercase ( cls: Any ,*__lowerCAmelCase: str ,**__lowerCAmelCase: Dict ): '''simple docstring''' requires_backends(cls ,["transformers", "torch", "note_seq"] ) @classmethod def _lowercase ( cls: Dict ,*__lowerCAmelCase: int ,**__lowerCAmelCase: str ): '''simple docstring''' requires_backends(cls ,["transformers", "torch", "note_seq"] )
46
'''simple docstring''' import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class A_ : """simple docstring""" def __init__( self :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str]=2 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :List[str]=7 , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :List[str]=99 , lowerCAmelCase__ :Union[str, Any]=36 , lowerCAmelCase__ :Dict=3 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Optional[int]=37 , lowerCAmelCase__ :Dict="gelu" , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Optional[int]=512 , lowerCAmelCase__ :Union[str, Any]=16 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :Any=0.0_2 , lowerCAmelCase__ :Dict=6 , lowerCAmelCase__ :Optional[int]=6 , lowerCAmelCase__ :Any=3 , lowerCAmelCase__ :int=4 , lowerCAmelCase__ :int=None , lowerCAmelCase__ :Any=1_000 , ) -> Any: '''simple docstring''' snake_case_ : Optional[int] = parent snake_case_ : Union[str, Any] = batch_size snake_case_ : Optional[int] = num_channels snake_case_ : List[Any] = image_size snake_case_ : Optional[int] = patch_size snake_case_ : Union[str, Any] = text_seq_length snake_case_ : Dict = is_training snake_case_ : Optional[Any] = use_input_mask snake_case_ : Union[str, Any] = use_token_type_ids snake_case_ : Dict = use_labels snake_case_ : List[str] = vocab_size snake_case_ : Optional[Any] = hidden_size snake_case_ : List[str] = num_hidden_layers snake_case_ : int = num_attention_heads snake_case_ : List[str] = intermediate_size snake_case_ : str = hidden_act snake_case_ : Optional[Any] = hidden_dropout_prob snake_case_ : Optional[int] = attention_probs_dropout_prob snake_case_ : Union[str, Any] = max_position_embeddings snake_case_ : List[Any] = type_vocab_size snake_case_ : Union[str, Any] = type_sequence_label_size snake_case_ : List[Any] = initializer_range snake_case_ : Union[str, Any] = coordinate_size snake_case_ : int = shape_size snake_case_ : Tuple = num_labels snake_case_ : List[Any] = num_choices snake_case_ : List[str] = scope snake_case_ : Dict = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) snake_case_ : str = text_seq_length snake_case_ : Optional[int] = (image_size // patch_size) ** 2 + 1 snake_case_ : str = self.text_seq_length + self.image_seq_length def _A ( self :Union[str, Any] ) -> Tuple: '''simple docstring''' snake_case_ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) snake_case_ : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: snake_case_ : Optional[Any] = bbox[i, j, 3] snake_case_ : Any = bbox[i, j, 1] snake_case_ : Tuple = t if bbox[i, j, 2] < bbox[i, j, 0]: snake_case_ : str = bbox[i, j, 2] snake_case_ : Dict = bbox[i, j, 0] snake_case_ : Union[str, Any] = t snake_case_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ : Dict = None if self.use_input_mask: snake_case_ : str = random_attention_mask([self.batch_size, self.text_seq_length] ) snake_case_ : Any = None if self.use_token_type_ids: snake_case_ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) snake_case_ : Union[str, Any] = None snake_case_ : str = None if self.use_labels: snake_case_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) snake_case_ : str = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def _A ( self :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] ) -> Optional[Any]: '''simple docstring''' snake_case_ : Tuple = LayoutLMvaModel(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() # text + image snake_case_ : Tuple = model(lowerCAmelCase__ , pixel_values=lowerCAmelCase__ ) snake_case_ : Optional[int] = model( lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ ) snake_case_ : Optional[int] = model(lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ ) snake_case_ : int = model(lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only snake_case_ : List[Any] = model(lowerCAmelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only snake_case_ : Union[str, Any] = model(pixel_values=lowerCAmelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def _A ( self :str , lowerCAmelCase__ :str , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple ) -> List[Any]: '''simple docstring''' snake_case_ : str = self.num_labels snake_case_ : List[Any] = LayoutLMvaForSequenceClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case_ : Optional[int] = model( lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] ) -> str: '''simple docstring''' snake_case_ : Optional[int] = self.num_labels snake_case_ : str = LayoutLMvaForTokenClassification(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case_ : List[Any] = model( lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def _A ( self :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :str ) -> Tuple: '''simple docstring''' snake_case_ : List[str] = LayoutLMvaForQuestionAnswering(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case_ : List[Any] = model( lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _A ( self :int ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Dict = self.prepare_config_and_inputs() ( ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ) : Optional[Any] = config_and_inputs snake_case_ : Tuple = { "input_ids": input_ids, "bbox": bbox, "pixel_values": pixel_values, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class A_ (a_ , a_ , unittest.TestCase ): """simple docstring""" a__ = False a__ = False a__ = False a__ = ( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) a__ = ( {'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel} if is_torch_available() else {} ) def _A ( self :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] ) -> List[str]: '''simple docstring''' return True def _A ( self :List[Any] ) -> str: '''simple docstring''' snake_case_ : Tuple = LayoutLMvaModelTester(self ) snake_case_ : Optional[int] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 ) def _A ( self :Tuple , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any]=False ) -> Any: '''simple docstring''' snake_case_ : List[str] = copy.deepcopy(lowerCAmelCase__ ) if model_class in get_values(lowerCAmelCase__ ): snake_case_ : Optional[Any] = { k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous() if isinstance(lowerCAmelCase__ , torch.Tensor ) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(lowerCAmelCase__ ): snake_case_ : Union[str, Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ ) elif model_class in get_values(lowerCAmelCase__ ): snake_case_ : List[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ ) snake_case_ : str = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ ) elif model_class in [ *get_values(lowerCAmelCase__ ), ]: snake_case_ : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ ) elif model_class in [ *get_values(lowerCAmelCase__ ), ]: snake_case_ : List[str] = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCAmelCase__ , ) return inputs_dict def _A ( self :Any ) -> Any: '''simple docstring''' self.config_tester.run_common_tests() def _A ( self :int ) -> int: '''simple docstring''' snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def _A ( self :Any ) -> Dict: '''simple docstring''' snake_case_ : Dict = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: snake_case_ : int = type self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def _A ( self :int ) -> str: '''simple docstring''' snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ ) def _A ( self :List[Any] ) -> Optional[Any]: '''simple docstring''' snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ ) def _A ( self :int ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ ) @slow def _A ( self :Tuple ) -> List[Any]: '''simple docstring''' for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ : str = LayoutLMvaModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) def __UpperCAmelCase ( )-> List[str]: """simple docstring""" snake_case_ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch class A_ (unittest.TestCase ): """simple docstring""" @cached_property def _A ( self :Union[str, Any] ) -> Optional[Any]: '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase__ ) if is_vision_available() else None @slow def _A ( self :Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Optional[int] = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(lowerCAmelCase__ ) snake_case_ : Optional[Any] = self.default_image_processor snake_case_ : Optional[int] = prepare_img() snake_case_ : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).pixel_values.to(lowerCAmelCase__ ) snake_case_ : List[str] = torch.tensor([[1, 2]] ) snake_case_ : Any = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 ) # forward pass snake_case_ : Any = model( input_ids=input_ids.to(lowerCAmelCase__ ) , bbox=bbox.to(lowerCAmelCase__ ) , pixel_values=pixel_values.to(lowerCAmelCase__ ) , ) # verify the logits snake_case_ : Optional[Any] = torch.Size((1, 199, 768) ) self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase__ ) snake_case_ : str = torch.tensor( [[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(lowerCAmelCase__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
653
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''', '''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''', } class _UpperCamelCase( __lowerCamelCase ): __SCREAMING_SNAKE_CASE : Union[str, Any] = '''roberta''' def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5_0_2_6_5 , SCREAMING_SNAKE_CASE__ : Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE__ : str=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : List[str]=1e-12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Tuple="absolute" , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : Any , ): '''simple docstring''' super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) __a : Optional[Any] = vocab_size __a : Tuple = hidden_size __a : List[str] = num_hidden_layers __a : List[Any] = num_attention_heads __a : str = hidden_act __a : Optional[Any] = intermediate_size __a : Dict = hidden_dropout_prob __a : List[str] = attention_probs_dropout_prob __a : Optional[Any] = max_position_embeddings __a : Dict = type_vocab_size __a : str = initializer_range __a : List[str] = layer_norm_eps __a : Optional[int] = position_embedding_type __a : Union[str, Any] = use_cache __a : str = classifier_dropout class _UpperCamelCase( __lowerCamelCase ): @property def __lowerCAmelCase ( self : Any ): '''simple docstring''' if self.task == "multiple-choice": __a : List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: __a : Dict = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
47
'''simple docstring''' import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def __UpperCAmelCase ( __magic_name__ )-> int: # picklable for multiprocessing """simple docstring""" return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def __UpperCAmelCase ( )-> List[str]: """simple docstring""" with parallel_backend("spark" ): assert ParallelBackendConfig.backend_name == "spark" snake_case_ : str = [1, 2, 3] with pytest.raises(__magic_name__ ): with parallel_backend("unsupported backend" ): map_nested(__magic_name__ ,__magic_name__ ,num_proc=2 ) with pytest.raises(__magic_name__ ): with parallel_backend("unsupported backend" ): map_nested(__magic_name__ ,__magic_name__ ,num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize("num_proc" ,[2, -1] ) def __UpperCAmelCase ( __magic_name__ )-> List[Any]: """simple docstring""" snake_case_ : Optional[Any] = [1, 2] snake_case_ : Union[str, Any] = {"a": 1, "b": 2} snake_case_ : str = {"a": [1, 2], "b": [3, 4]} snake_case_ : List[str] = {"a": {"1": 1}, "b": 2} snake_case_ : Optional[int] = {"a": 1, "b": 2, "c": 3, "d": 4} snake_case_ : Tuple = [2, 3] snake_case_ : str = {"a": 2, "b": 3} snake_case_ : Dict = {"a": [2, 3], "b": [4, 5]} snake_case_ : List[Any] = {"a": {"1": 2}, "b": 3} snake_case_ : str = {"a": 2, "b": 3, "c": 4, "d": 5} with parallel_backend("spark" ): assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
653
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_owlvit import OwlViTImageProcessor UpperCAmelCase__ : int = logging.get_logger(__name__) class A ( SCREAMING_SNAKE_CASE__ ): def __init__( self : Any , *__magic_name__ : Any , **__magic_name__ : Dict ): """simple docstring""" warnings.warn( "The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use OwlViTImageProcessor instead." , __magic_name__ , ) super().__init__(*__magic_name__ , **__magic_name__ )
48
'''simple docstring''' from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase : Dict = logging.get_logger(__name__) # TODO Update this __lowerCamelCase : int = { '''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''', # See all ESM models at https://huggingface.co/models?filter=esm } class A_ (a_ ): """simple docstring""" a__ = '''esm''' def __init__( self :Dict , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :str=None , lowerCAmelCase__ :int=768 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :Dict=12 , lowerCAmelCase__ :Union[str, Any]=3_072 , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :List[Any]=1_026 , lowerCAmelCase__ :int=0.0_2 , lowerCAmelCase__ :Optional[int]=1E-1_2 , lowerCAmelCase__ :List[str]="absolute" , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :List[str]=False , lowerCAmelCase__ :List[Any]=False , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :str=None , **lowerCAmelCase__ :Union[str, Any] , ) -> Any: '''simple docstring''' super().__init__(pad_token_id=lowerCAmelCase__ , mask_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) snake_case_ : str = vocab_size snake_case_ : str = hidden_size snake_case_ : List[str] = num_hidden_layers snake_case_ : List[str] = num_attention_heads snake_case_ : Any = intermediate_size snake_case_ : Optional[Any] = hidden_dropout_prob snake_case_ : Tuple = attention_probs_dropout_prob snake_case_ : List[Any] = max_position_embeddings snake_case_ : str = initializer_range snake_case_ : List[Any] = layer_norm_eps snake_case_ : str = position_embedding_type snake_case_ : Optional[int] = use_cache snake_case_ : str = emb_layer_norm_before snake_case_ : List[Any] = token_dropout snake_case_ : str = is_folding_model if is_folding_model: if esmfold_config is None: logger.info("No esmfold_config supplied for folding model, using default values." ) snake_case_ : Optional[Any] = EsmFoldConfig() elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): snake_case_ : Union[str, Any] = EsmFoldConfig(**lowerCAmelCase__ ) snake_case_ : Optional[Any] = esmfold_config if vocab_list is None: logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" ) snake_case_ : List[str] = get_default_vocab_list() else: snake_case_ : List[str] = vocab_list else: snake_case_ : List[Any] = None snake_case_ : int = None if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , lowerCAmelCase__ ): raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" ) def _A ( self :Optional[int] ) -> List[Any]: '''simple docstring''' snake_case_ : Any = super().to_dict() if isinstance(self.esmfold_config , lowerCAmelCase__ ): snake_case_ : Optional[int] = self.esmfold_config.to_dict() return output @dataclass class A_ : """simple docstring""" a__ = None a__ = True a__ = False a__ = False a__ = False a__ = 0 a__ = True a__ = False a__ = 128 a__ = None def _A ( self :Dict ) -> int: '''simple docstring''' if self.trunk is None: snake_case_ : Dict = TrunkConfig() elif isinstance(self.trunk , lowerCAmelCase__ ): snake_case_ : int = TrunkConfig(**self.trunk ) def _A ( self :Optional[int] ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Tuple = asdict(self ) snake_case_ : Optional[int] = self.trunk.to_dict() return output @dataclass class A_ : """simple docstring""" a__ = 48 a__ = 1024 a__ = 128 a__ = 32 a__ = 32 a__ = 32 a__ = 0 a__ = 0 a__ = False a__ = 4 a__ = 128 a__ = None def _A ( self :List[Any] ) -> Union[str, Any]: '''simple docstring''' if self.structure_module is None: snake_case_ : Optional[int] = StructureModuleConfig() elif isinstance(self.structure_module , lowerCAmelCase__ ): snake_case_ : List[str] = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( "`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got" F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( "`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got" F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' ) snake_case_ : Dict = self.sequence_state_dim // self.sequence_head_width snake_case_ : Optional[int] = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( "`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got" F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( "`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got" F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' ) if self.dropout >= 0.4: raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' ) def _A ( self :Tuple ) -> List[str]: '''simple docstring''' snake_case_ : int = asdict(self ) snake_case_ : Dict = self.structure_module.to_dict() return output @dataclass class A_ : """simple docstring""" a__ = 384 a__ = 128 a__ = 16 a__ = 128 a__ = 12 a__ = 4 a__ = 8 a__ = 0.1 a__ = 8 a__ = 1 a__ = 2 a__ = 7 a__ = 10 a__ = 1E-8 a__ = 1E5 def _A ( self :Dict ) -> Dict: '''simple docstring''' return asdict(self ) def __UpperCAmelCase ( )-> int: """simple docstring""" return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
653
0
"""simple docstring""" import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs _lowercase : Union[str, Any] = imread(r'digital_image_processing/image_data/lena_small.jpg') _lowercase : Any = cvtColor(img, COLOR_BGR2GRAY) def lowercase__ ( ): __UpperCAmelCase = cn.convert_to_negative(snake_case_ ) # assert negative_img array for at least one True assert negative_img.any() def lowercase__ ( ): with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img: # Work around assertion for response assert str(cc.change_contrast(snake_case_ , 110 ) ).startswith( '''<PIL.Image.Image image mode=RGB size=100x100 at''' ) def lowercase__ ( ): __UpperCAmelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def lowercase__ ( ): __UpperCAmelCase = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0 ) # assert ambiguous array for all == True assert canny_img.all() __UpperCAmelCase = canny.canny(snake_case_ ) # assert canny array for at least one True assert canny_array.any() def lowercase__ ( ): assert gg.gaussian_filter(snake_case_ , 5 , sigma=0.9 ).all() def lowercase__ ( ): # laplace diagonals __UpperCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) __UpperCAmelCase = conv.img_convolve(snake_case_ , snake_case_ ).astype(snake_case_ ) assert res.any() def lowercase__ ( ): assert med.median_filter(snake_case_ , 3 ).any() def lowercase__ ( ): __UpperCAmelCase , __UpperCAmelCase = sob.sobel_filter(snake_case_ ) assert grad.any() and theta.any() def lowercase__ ( ): __UpperCAmelCase = sp.make_sepia(snake_case_ , 20 ) assert sepia.all() def lowercase__ ( snake_case_ :str = "digital_image_processing/image_data/lena_small.jpg" ): __UpperCAmelCase = bs.Burkes(imread(snake_case_ , 1 ) , 120 ) burkes.process() assert burkes.output_img.any() def lowercase__ ( snake_case_ :str = "digital_image_processing/image_data/lena_small.jpg" , ): __UpperCAmelCase = rs.NearestNeighbour(imread(snake_case_ , 1 ) , 400 , 200 ) nn.process() assert nn.output.any() def lowercase__ ( ): __UpperCAmelCase = '''digital_image_processing/image_data/lena.jpg''' # Reading the image and converting it to grayscale. __UpperCAmelCase = imread(snake_case_ , 0 ) # Test for get_neighbors_pixel function() return not None __UpperCAmelCase = 0 __UpperCAmelCase = 0 __UpperCAmelCase = image[x_coordinate][y_coordinate] __UpperCAmelCase = lbp.get_neighbors_pixel( snake_case_ , snake_case_ , snake_case_ , snake_case_ ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image __UpperCAmelCase = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): __UpperCAmelCase = lbp.local_binary_value(snake_case_ , snake_case_ , snake_case_ ) assert lbp_image.any()
49
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase : Any = { '''configuration_longformer''': [ '''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongformerConfig''', '''LongformerOnnxConfig''', ], '''tokenization_longformer''': ['''LongformerTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Any = ['''LongformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = [ '''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LongformerForMaskedLM''', '''LongformerForMultipleChoice''', '''LongformerForQuestionAnswering''', '''LongformerForSequenceClassification''', '''LongformerForTokenClassification''', '''LongformerModel''', '''LongformerPreTrainedModel''', '''LongformerSelfAttention''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Any = [ '''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLongformerForMaskedLM''', '''TFLongformerForMultipleChoice''', '''TFLongformerForQuestionAnswering''', '''TFLongformerForSequenceClassification''', '''TFLongformerForTokenClassification''', '''TFLongformerModel''', '''TFLongformerPreTrainedModel''', '''TFLongformerSelfAttention''', ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys __lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
653
0
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_electra import ElectraTokenizer UpperCamelCase : Union[str, Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} UpperCamelCase : str = { 'vocab_file': { 'google/electra-small-generator': ( 'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt' ), 'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt', 'google/electra-large-generator': ( 'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt' ), 'google/electra-small-discriminator': ( 'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt' ), 'google/electra-base-discriminator': ( 'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt' ), 'google/electra-large-discriminator': ( 'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'google/electra-small-generator': ( 'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json' ), 'google/electra-base-generator': ( 'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json' ), 'google/electra-large-generator': ( 'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json' ), 'google/electra-small-discriminator': ( 'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json' ), 'google/electra-base-discriminator': ( 'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json' ), 'google/electra-large-discriminator': ( 'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json' ), }, } UpperCamelCase : List[str] = { 'google/electra-small-generator': 5_12, 'google/electra-base-generator': 5_12, 'google/electra-large-generator': 5_12, 'google/electra-small-discriminator': 5_12, 'google/electra-base-discriminator': 5_12, 'google/electra-large-discriminator': 5_12, } UpperCamelCase : List[str] = { 'google/electra-small-generator': {'do_lower_case': True}, 'google/electra-base-generator': {'do_lower_case': True}, 'google/electra-large-generator': {'do_lower_case': True}, 'google/electra-small-discriminator': {'do_lower_case': True}, 'google/electra-base-discriminator': {'do_lower_case': True}, 'google/electra-large-discriminator': {'do_lower_case': True}, } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_INIT_CONFIGURATION _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = ElectraTokenizer def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase="[UNK]" ,_lowerCAmelCase="[SEP]" ,_lowerCAmelCase="[PAD]" ,_lowerCAmelCase="[CLS]" ,_lowerCAmelCase="[MASK]" ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,**_lowerCAmelCase ,): super().__init__( _lowerCAmelCase ,tokenizer_file=_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,tokenize_chinese_chars=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ,**_lowerCAmelCase ,) lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" ,_lowerCAmelCase ) != do_lower_case or normalizer_state.get("""strip_accents""" ,_lowerCAmelCase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" ,_lowerCAmelCase ) != tokenize_chinese_chars ): lowerCamelCase__ = getattr(_lowerCAmelCase ,normalizer_state.pop("""type""" ) ) lowerCamelCase__ = do_lower_case lowerCamelCase__ = strip_accents lowerCamelCase__ = tokenize_chinese_chars lowerCamelCase__ = normalizer_class(**_lowerCAmelCase ) lowerCamelCase__ = do_lower_case def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ): lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = self._tokenizer.model.save(_lowerCAmelCase ,name=_lowerCAmelCase ) return tuple(_lowerCAmelCase )
50
'''simple docstring''' import inspect import tempfile import unittest from huggingface_hub import hf_hub_download from transformers import is_torch_available from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin __lowerCamelCase : Optional[int] = 1E-4 if is_torch_available(): import torch from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder @require_torch class A_ : """simple docstring""" def __init__( self :Tuple , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[Any]=16 , lowerCAmelCase__ :Any=13 , lowerCAmelCase__ :Optional[Any]=7 , lowerCAmelCase__ :str=14 , lowerCAmelCase__ :Union[str, Any]=10 , lowerCAmelCase__ :Tuple=19 , lowerCAmelCase__ :Optional[Any]=5 , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Any=16 , lowerCAmelCase__ :str=2 , lowerCAmelCase__ :List[Any]=4 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Optional[int]=[1, 2, 3, 4, 5] , lowerCAmelCase__ :str=25 , lowerCAmelCase__ :Optional[Any]=5 , ) -> Dict: '''simple docstring''' snake_case_ : List[str] = d_model snake_case_ : Dict = parent snake_case_ : Optional[Any] = batch_size snake_case_ : Optional[Any] = prediction_length snake_case_ : str = context_length snake_case_ : Tuple = cardinality snake_case_ : List[str] = num_time_features snake_case_ : Optional[Any] = lags_sequence snake_case_ : Union[str, Any] = embedding_dimension snake_case_ : Optional[Any] = is_training snake_case_ : Optional[Any] = hidden_size snake_case_ : Any = num_hidden_layers snake_case_ : Optional[Any] = num_attention_heads snake_case_ : int = intermediate_size snake_case_ : Any = hidden_act snake_case_ : Union[str, Any] = hidden_dropout_prob snake_case_ : Dict = attention_probs_dropout_prob snake_case_ : List[str] = context_length snake_case_ : Any = prediction_length + label_length snake_case_ : Union[str, Any] = label_length snake_case_ : List[Any] = moving_average snake_case_ : str = autocorrelation_factor def _A ( self :List[Any] ) -> Any: '''simple docstring''' return AutoformerConfig( d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , ) def _A ( self :Union[str, Any] , lowerCAmelCase__ :Optional[Any] ) -> Dict: '''simple docstring''' snake_case_ : Any = config.context_length + max(config.lags_sequence ) snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, 1] , config.cardinality[0] ) snake_case_ : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] ) snake_case_ : List[Any] = floats_tensor([self.batch_size, _past_length] ) snake_case_ : Dict = floats_tensor([self.batch_size, _past_length] ) > 0.5 # decoder inputs snake_case_ : List[Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] ) snake_case_ : List[Any] = floats_tensor([self.batch_size, config.prediction_length] ) snake_case_ : int = { "past_values": past_values, "static_categorical_features": static_categorical_features, "past_time_features": past_time_features, "past_observed_mask": past_observed_mask, "future_time_features": future_time_features, "future_values": future_values, } return inputs_dict def _A ( self :Dict ) -> Tuple: '''simple docstring''' snake_case_ : str = self.get_config() snake_case_ : int = self.prepare_autoformer_inputs_dict(lowerCAmelCase__ ) return config, inputs_dict def _A ( self :Optional[int] ) -> Dict: '''simple docstring''' snake_case_, snake_case_ : Union[str, Any] = self.prepare_config_and_inputs() return config, inputs_dict def _A ( self :Tuple , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int] ) -> List[str]: '''simple docstring''' snake_case_ : Dict = AutoformerModel(config=lowerCAmelCase__ ).to(lowerCAmelCase__ ).eval() snake_case_ : Optional[int] = model(**lowerCAmelCase__ ) snake_case_ : Any = outputs.encoder_last_hidden_state snake_case_ : Dict = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : Optional[Any] = model.get_encoder() encoder.save_pretrained(lowerCAmelCase__ ) snake_case_ : Tuple = AutoformerEncoder.from_pretrained(lowerCAmelCase__ ).to(lowerCAmelCase__ ) snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : List[str] = model.create_network_inputs(**lowerCAmelCase__ ) snake_case_, snake_case_ : Optional[int] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] ) snake_case_ : List[Any] = torch.cat( (transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , ) snake_case_ : Optional[int] = encoder(inputs_embeds=lowerCAmelCase__ )[0] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 ) snake_case_ : Any = ( torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 ) .unsqueeze(1 ) .repeat(1 , config.prediction_length , 1 ) ) snake_case_ : List[str] = torch.zeros( [transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , ) snake_case_ : Optional[Any] = torch.cat( ( torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) snake_case_ : Any = torch.cat( ( torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : List[Any] = model.get_decoder() decoder.save_pretrained(lowerCAmelCase__ ) snake_case_ : int = AutoformerDecoder.from_pretrained(lowerCAmelCase__ ).to(lowerCAmelCase__ ) snake_case_ : Tuple = decoder( trend=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 ) @require_torch class A_ (a_ , a_ , unittest.TestCase ): """simple docstring""" a__ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else () a__ = (AutoformerForPrediction,) if is_torch_available() else () a__ = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {} a__ = False a__ = False a__ = False a__ = False a__ = False a__ = False def _A ( self :Dict ) -> int: '''simple docstring''' snake_case_ : Tuple = AutoformerModelTester(self ) snake_case_ : str = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ ) def _A ( self :List[str] ) -> Tuple: '''simple docstring''' self.config_tester.run_common_tests() def _A ( self :List[Any] ) -> Union[str, Any]: '''simple docstring''' snake_case_, snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: snake_case_ : List[Any] = model_class(lowerCAmelCase__ ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCAmelCase__ ) snake_case_, snake_case_ : str = model_class.from_pretrained(lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ ) self.assertEqual(info["missing_keys"] , [] ) def _A ( self :Optional[int] ) -> Tuple: '''simple docstring''' snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*lowerCAmelCase__ ) @unittest.skip(reason="Model has no tokens embeddings" ) def _A ( self :str ) -> str: '''simple docstring''' pass def _A ( self :Optional[Any] ) -> Union[str, Any]: '''simple docstring''' snake_case_ : List[Any] = inspect.signature(getattr(lowerCAmelCase__ , "forward" ) ) # The main input is the name of the argument after `self` snake_case_ : Dict = list(model_signature.parameters.keys() )[1] self.assertEqual(AutoformerModel.main_input_name , lowerCAmelCase__ ) def _A ( self :Optional[Any] ) -> Optional[int]: '''simple docstring''' snake_case_, snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : Tuple = model_class(lowerCAmelCase__ ) snake_case_ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ : Optional[Any] = [*signature.parameters.keys()] snake_case_ : Dict = [ "past_values", "past_time_features", "past_observed_mask", "static_categorical_features", "static_real_features", "future_values", "future_time_features", ] if model.__class__.__name__ in ["AutoformerForPrediction"]: expected_arg_names.append("future_observed_mask" ) expected_arg_names.extend( [ "decoder_attention_mask", "head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs", "past_key_values", "output_hidden_states", "output_attentions", "use_cache", "return_dict", ] ) self.assertListEqual(arg_names[: len(lowerCAmelCase__ )] , lowerCAmelCase__ ) def _A ( self :int ) -> Any: '''simple docstring''' snake_case_, snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Union[str, Any] = True snake_case_ : List[str] = getattr(self.model_tester , "seq_length" , lowerCAmelCase__ ) snake_case_ : Dict = getattr(self.model_tester , "decoder_seq_length" , lowerCAmelCase__ ) snake_case_ : Union[str, Any] = getattr(self.model_tester , "encoder_seq_length" , lowerCAmelCase__ ) snake_case_ : Union[str, Any] = getattr(self.model_tester , "d_model" , lowerCAmelCase__ ) snake_case_ : Dict = getattr(self.model_tester , "num_attention_heads" , lowerCAmelCase__ ) snake_case_ : Optional[int] = d_model // num_attention_heads for model_class in self.all_model_classes: snake_case_ : Any = True snake_case_ : Any = False snake_case_ : Dict = True snake_case_ : List[str] = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): snake_case_ : Tuple = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) snake_case_ : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] snake_case_ : Optional[int] = True snake_case_ : Any = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): snake_case_ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) snake_case_ : str = outputs.encoder_attentions self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) snake_case_ : Tuple = len(lowerCAmelCase__ ) snake_case_ : List[str] = 7 if "last_hidden_state" in outputs: correct_outlen += 1 if "trend" in outputs: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned if "loss" in outputs: correct_outlen += 1 if "params" in outputs: correct_outlen += 1 self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) # decoder attentions snake_case_ : Optional[int] = outputs.decoder_attentions self.assertIsInstance(lowerCAmelCase__ , (list, tuple) ) self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # cross attentions snake_case_ : List[Any] = outputs.cross_attentions self.assertIsInstance(lowerCAmelCase__ , (list, tuple) ) self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # Check attention is always last and order is fine snake_case_ : Optional[int] = True snake_case_ : List[Any] = True snake_case_ : Union[str, Any] = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): snake_case_ : List[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) self.assertEqual(out_len + 2 , len(lowerCAmelCase__ ) ) snake_case_ : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) @is_flaky() def _A ( self :Any ) -> Optional[Any]: '''simple docstring''' super().test_retain_grad_hidden_states_attentions() def __UpperCAmelCase ( __magic_name__="train-batch.pt" )-> int: """simple docstring""" snake_case_ : List[str] = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" ,filename=__magic_name__ ,repo_type="dataset" ) snake_case_ : List[str] = torch.load(__magic_name__ ,map_location=__magic_name__ ) return batch @require_torch @slow class A_ (unittest.TestCase ): """simple docstring""" def _A ( self :str ) -> Any: '''simple docstring''' snake_case_ : Optional[int] = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ ) snake_case_ : List[str] = prepare_batch() with torch.no_grad(): snake_case_ : int = model( past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0] snake_case_ : Optional[int] = torch.Size( (64, model.config.prediction_length + model.config.label_length, model.config.feature_size) ) self.assertEqual(output.shape , lowerCAmelCase__ ) snake_case_ : Optional[Any] = torch.tensor( [[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=lowerCAmelCase__ ) self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) ) def _A ( self :Any ) -> str: '''simple docstring''' snake_case_ : str = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ ) snake_case_ : Optional[Any] = prepare_batch("val-batch.pt" ) with torch.no_grad(): snake_case_ : Tuple = model( past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state snake_case_ : Dict = torch.Size((64, model.config.context_length, model.config.d_model) ) self.assertEqual(output.shape , lowerCAmelCase__ ) snake_case_ : Any = torch.tensor( [[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=lowerCAmelCase__ ) self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) ) def _A ( self :List[str] ) -> Any: '''simple docstring''' snake_case_ : List[Any] = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ ) snake_case_ : str = prepare_batch("val-batch.pt" ) with torch.no_grad(): snake_case_ : Optional[Any] = model.generate( static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , ) snake_case_ : List[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) ) self.assertEqual(outputs.sequences.shape , lowerCAmelCase__ ) snake_case_ : Dict = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=lowerCAmelCase__ ) snake_case_ : Optional[Any] = outputs.sequences.mean(dim=1 ) self.assertTrue(torch.allclose(mean_prediction[0, -3:] , lowerCAmelCase__ , rtol=1E-1 ) )
653
0
'''simple docstring''' from pathlib import Path import fire def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int ) -> int: """simple docstring""" UpperCAmelCase = Path(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = Path(SCREAMING_SNAKE_CASE_ ) dest_dir.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) for path in src_dir.iterdir(): UpperCAmelCase = [x.rstrip() for x in list(path.open().readlines() )][:n] UpperCAmelCase = dest_dir.joinpath(path.name ) print(SCREAMING_SNAKE_CASE_ ) dest_path.open('''w''' ).write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) ) if __name__ == "__main__": fire.Fire(minify)
51
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A_ (a_ , unittest.TestCase ): """simple docstring""" a__ = RobertaTokenizer a__ = RobertaTokenizerFast a__ = True a__ = {'''cls_token''': '''<s>'''} def _A ( self :Optional[int] ) -> List[Any]: '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt snake_case_ : List[Any] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] snake_case_ : Tuple = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) ) snake_case_ : List[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] snake_case_ : int = {"unk_token": "<unk>"} snake_case_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowerCAmelCase__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowerCAmelCase__ ) ) def _A ( self :Optional[Any] , **lowerCAmelCase__ :str ) -> str: '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def _A ( self :Any , **lowerCAmelCase__ :Tuple ) -> Optional[int]: '''simple docstring''' kwargs.update(self.special_tokens_map ) return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def _A ( self :Optional[int] , lowerCAmelCase__ :str ) -> Optional[int]: '''simple docstring''' snake_case_ : int = "lower newer" snake_case_ : Tuple = "lower newer" return input_text, output_text def _A ( self :Tuple ) -> Union[str, Any]: '''simple docstring''' snake_case_ : str = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) snake_case_ : Dict = "lower newer" snake_case_ : int = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"] snake_case_ : str = tokenizer.tokenize(lowerCAmelCase__ ) # , add_prefix_space=True) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) snake_case_ : List[str] = tokens + [tokenizer.unk_token] snake_case_ : Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ ) def _A ( self :Any ) -> str: '''simple docstring''' snake_case_ : List[str] = self.get_tokenizer() self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=lowerCAmelCase__ ) , [0, 31_414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=lowerCAmelCase__ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , ) @slow def _A ( self :str ) -> List[str]: '''simple docstring''' snake_case_ : Tuple = self.tokenizer_class.from_pretrained("roberta-base" ) snake_case_ : List[str] = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase__ ) snake_case_ : List[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase__ ) snake_case_ : List[str] = tokenizer.encode( "sequence builders" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ ) snake_case_ : Union[str, Any] = tokenizer.encode( "sequence builders" , "multi-sequence build" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ ) snake_case_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ ) snake_case_ : Any = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def _A ( self :List[Any] ) -> Any: '''simple docstring''' snake_case_ : Optional[Any] = self.get_tokenizer() snake_case_ : Tuple = "Encode this sequence." snake_case_ : Optional[Any] = tokenizer.byte_encoder[" ".encode("utf-8" )[0]] # Testing encoder arguments snake_case_ : str = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ ) snake_case_ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ ) snake_case_ : List[Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ ) snake_case_ : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) tokenizer.add_special_tokens({"bos_token": "<s>"} ) snake_case_ : str = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) snake_case_ : int = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ ) # Testing spaces after special tokens snake_case_ : List[Any] = "<mask>" tokenizer.add_special_tokens( {"mask_token": AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )} ) # mask token has a left space snake_case_ : str = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) snake_case_ : List[str] = "Encode <mask> sequence" snake_case_ : List[Any] = "Encode <mask>sequence" snake_case_ : Tuple = tokenizer.encode(lowerCAmelCase__ ) snake_case_ : int = encoded.index(lowerCAmelCase__ ) snake_case_ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) snake_case_ : List[str] = tokenizer.encode(lowerCAmelCase__ ) snake_case_ : Union[str, Any] = encoded.index(lowerCAmelCase__ ) snake_case_ : int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def _A ( self :Tuple ) -> Tuple: '''simple docstring''' pass def _A ( self :int ) -> Optional[Any]: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case_ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) snake_case_ : List[Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) snake_case_ : Any = "A, <mask> AllenNLP sentence." snake_case_ : str = tokenizer_r.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ ) snake_case_ : int = tokenizer_p.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) snake_case_ : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) snake_case_ : str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) def _A ( self :int ) -> Tuple: '''simple docstring''' for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): snake_case_ : str = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) snake_case_ : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) snake_case_ : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["add_prefix_space"] , lowerCAmelCase__ ) self.assertEqual(post_processor_state["add_prefix_space"] , lowerCAmelCase__ ) self.assertEqual(post_processor_state["trim_offsets"] , lowerCAmelCase__ ) def _A ( self :List[str] ) -> List[Any]: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case_ : str = "hello" # `hello` is a token in the vocabulary of `pretrained_name` snake_case_ : Tuple = F'''{text_of_1_token} {text_of_1_token}''' snake_case_ : Any = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , ) snake_case_ : List[str] = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) snake_case_ : Tuple = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , ) snake_case_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , ) snake_case_ : Dict = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) snake_case_ : str = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , ) snake_case_ : Tuple = F''' {text}''' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) snake_case_ : Dict = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ) + 1, 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , ) snake_case_ : Any = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) snake_case_ : Any = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , ) snake_case_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ ) snake_case_ : Optional[int] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
653
0
"""simple docstring""" import argparse import math import os import torch from neural_compressor.utils.pytorch import load from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel def __A ( ) -> Dict: __a : Optional[int] = argparse.ArgumentParser() parser.add_argument( '''-m''' , '''--pretrained_model_name_or_path''' , type=a_ , default=a_ , required=a_ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , ) parser.add_argument( '''-c''' , '''--caption''' , type=a_ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , ) parser.add_argument( '''-n''' , '''--images_num''' , type=a_ , default=4 , help='''How much images to generate.''' , ) parser.add_argument( '''-s''' , '''--seed''' , type=a_ , default=42 , help='''Seed for random process.''' , ) parser.add_argument( '''-ci''' , '''--cuda_id''' , type=a_ , default=0 , help='''cuda_id.''' , ) __a : Any = parser.parse_args() return args def __A ( a_ :List[str] , a_ :Dict , a_ :int) -> List[Any]: if not len(a_) == rows * cols: raise ValueError('''The specified number of rows and columns are not correct.''') __a , __a : Union[str, Any] = imgs[0].size __a : List[str] = Image.new('''RGB''' , size=(cols * w, rows * h)) __a , __a : Optional[int] = grid.size for i, img in enumerate(a_): grid.paste(a_ , box=(i % cols * w, i // cols * h)) return grid def __A ( a_ :int , a_ :Dict="robotic cat with wings" , a_ :Any=7.5 , a_ :Optional[Any]=50 , a_ :Optional[int]=1 , a_ :int=42 , ) -> List[str]: __a : Any = torch.Generator(pipeline.device).manual_seed(a_) __a : Optional[Any] = pipeline( a_ , guidance_scale=a_ , num_inference_steps=a_ , generator=a_ , num_images_per_prompt=a_ , ).images __a : Dict = int(math.sqrt(a_)) __a : Dict = image_grid(a_ , rows=_rows , cols=num_images_per_prompt // _rows) return grid, images A = parse_args() # Load models and create wrapper for stable diffusion A = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''') A = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''') A = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''') A = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''') A = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer ) A = lambda images, clip_input: (images, False) if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')): A = load(args.pretrained_model_name_or_path, model=unet) unet.eval() setattr(pipeline, '''unet''', unet) else: A = unet.to(torch.device('''cuda''', args.cuda_id)) A = pipeline.to(unet.device) A , A = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed) grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split())))) A = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split())) os.makedirs(dirname, exist_ok=True) for idx, image in enumerate(images): image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
52
'''simple docstring''' import math def __UpperCAmelCase ( __magic_name__ )-> bool: """simple docstring""" snake_case_ : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(__magic_name__ ) def __UpperCAmelCase ( __magic_name__ = 1 / 1_2345 )-> int: """simple docstring""" snake_case_ : Any = 0 snake_case_ : int = 0 snake_case_ : Union[str, Any] = 3 while True: snake_case_ : Any = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(__magic_name__ ): snake_case_ : Optional[Any] = int(__magic_name__ ) total_partitions += 1 if check_partition_perfect(__magic_name__ ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(__magic_name__ ) integer += 1 if __name__ == "__main__": print(f'''{solution() = }''')
653
0
import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Optional[Any] ): assert isinstance(lowerCAmelCase_, lowerCAmelCase_ ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory', [False, True] ) def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : str, lowerCAmelCase_ : int ): __lowerCAmelCase = tmp_path / 'cache' __lowerCAmelCase = {'text': 'string'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_, keep_in_memory=lowerCAmelCase_ ).read() _check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ ) @pytest.mark.parametrize( 'features', [ None, {'text': 'string'}, {'text': 'int32'}, {'text': 'float32'}, ], ) def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : Dict, lowerCAmelCase_ : Union[str, Any] ): __lowerCAmelCase = tmp_path / 'cache' __lowerCAmelCase = {'text': 'string'} __lowerCAmelCase = features.copy() if features else default_expected_features __lowerCAmelCase = ( Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None ) __lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, features=lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read() _check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ ) @pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] ) def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Any ): __lowerCAmelCase = tmp_path / 'cache' __lowerCAmelCase = {'text': 'string'} __lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_, split=lowerCAmelCase_ ).read() _check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('path_type', [str, list] ) def a_ ( lowerCAmelCase_ : Dict, lowerCAmelCase_ : Any, lowerCAmelCase_ : Dict ): if issubclass(lowerCAmelCase_, lowerCAmelCase_ ): __lowerCAmelCase = text_path elif issubclass(lowerCAmelCase_, lowerCAmelCase_ ): __lowerCAmelCase = [text_path] __lowerCAmelCase = tmp_path / 'cache' __lowerCAmelCase = {'text': 'string'} __lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read() _check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ ) def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : int, lowerCAmelCase_ : Tuple=("train",) ): assert isinstance(lowerCAmelCase_, lowerCAmelCase_ ) for split in splits: __lowerCAmelCase = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory', [False, True] ) def a_ ( lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Dict ): __lowerCAmelCase = tmp_path / 'cache' __lowerCAmelCase = {'text': 'string'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __lowerCAmelCase = TextDatasetReader({'train': text_path}, cache_dir=lowerCAmelCase_, keep_in_memory=lowerCAmelCase_ ).read() _check_text_datasetdict(lowerCAmelCase_, lowerCAmelCase_ ) @pytest.mark.parametrize( 'features', [ None, {'text': 'string'}, {'text': 'int32'}, {'text': 'float32'}, ], ) def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[Any] ): __lowerCAmelCase = tmp_path / 'cache' # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" __lowerCAmelCase = {'text': 'string'} __lowerCAmelCase = features.copy() if features else default_expected_features __lowerCAmelCase = ( Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None ) __lowerCAmelCase = TextDatasetReader({'train': text_path}, features=lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read() _check_text_datasetdict(lowerCAmelCase_, lowerCAmelCase_ ) @pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] ) def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : str, lowerCAmelCase_ : Optional[int] ): if split: __lowerCAmelCase = {split: text_path} else: __lowerCAmelCase = 'train' __lowerCAmelCase = {'train': text_path, 'test': text_path} __lowerCAmelCase = tmp_path / 'cache' __lowerCAmelCase = {'text': 'string'} __lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read() _check_text_datasetdict(lowerCAmelCase_, lowerCAmelCase_, splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
53
'''simple docstring''' import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() __lowerCamelCase : int = logging.get_logger() @dataclass class A_ : """simple docstring""" a__ = 42 a__ = field(default_factory=a_ ) a__ = field(default_factory=a_ ) def _A ( self :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tensor , lowerCAmelCase__ :Tensor ) -> int: '''simple docstring''' snake_case_ : int = len(list(m.modules() ) ) == 1 or isinstance(lowerCAmelCase__ , nn.Convad ) or isinstance(lowerCAmelCase__ , nn.BatchNormad ) if has_not_submodules: self.traced.append(lowerCAmelCase__ ) def __call__( self :List[Any] , lowerCAmelCase__ :Tensor ) -> Union[str, Any]: '''simple docstring''' for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(lowerCAmelCase__ ) [x.remove() for x in self.handles] return self @property def _A ( self :int ) -> List[Any]: '''simple docstring''' return list(filter(lambda lowerCAmelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class A_ : """simple docstring""" a__ = 42 a__ = 42 a__ = 0 a__ = field(default_factory=a_ ) a__ = field(default_factory=a_ ) def __call__( self :Tuple , lowerCAmelCase__ :Tensor ) -> Tuple: '''simple docstring''' snake_case_ : List[Any] = Tracker(self.dest )(lowerCAmelCase__ ).parametrized snake_case_ : Tuple = Tracker(self.src )(lowerCAmelCase__ ).parametrized snake_case_ : List[str] = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.src_skip , lowerCAmelCase__ ) ) snake_case_ : Tuple = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.dest_skip , lowerCAmelCase__ ) ) if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ): raise Exception( F'''Numbers of operations are different. Source module has {len(lowerCAmelCase__ )} operations while''' F''' destination module has {len(lowerCAmelCase__ )}.''' ) for dest_m, src_m in zip(lowerCAmelCase__ , lowerCAmelCase__ ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F'''Transfered from={src_m} to={dest_m}''' ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ = True )-> Optional[int]: """simple docstring""" print(F'''Converting {name}...''' ) with torch.no_grad(): snake_case_ : List[str] = timm.create_model(__magic_name__ ,pretrained=__magic_name__ ).eval() snake_case_ : Optional[int] = ResNetForImageClassification(__magic_name__ ).eval() snake_case_ : Dict = ModuleTransfer(src=__magic_name__ ,dest=__magic_name__ ) snake_case_ : Optional[int] = torch.randn((1, 3, 224, 224) ) module_transfer(__magic_name__ ) assert torch.allclose(from_model(__magic_name__ ) ,our_model(__magic_name__ ).logits ), "The model logits don't match the original one." snake_case_ : str = F'''resnet{'-'.join(name.split('resnet' ) )}''' print(__magic_name__ ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add model" ,use_temp_dir=__magic_name__ ,) # we can use the convnext one snake_case_ : Optional[Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add image processor" ,use_temp_dir=__magic_name__ ,) print(F'''Pushed {checkpoint_name}''' ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = None ,__magic_name__ = True )-> Tuple: """simple docstring""" snake_case_ : List[str] = "imagenet-1k-id2label.json" snake_case_ : Optional[Any] = 1000 snake_case_ : List[Any] = (1, num_labels) snake_case_ : Optional[Any] = "huggingface/label-files" snake_case_ : Dict = num_labels snake_case_ : List[Any] = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) ) snake_case_ : List[str] = {int(__magic_name__ ): v for k, v in idalabel.items()} snake_case_ : Any = idalabel snake_case_ : List[Any] = {v: k for k, v in idalabel.items()} snake_case_ : Optional[int] = partial(__magic_name__ ,num_labels=__magic_name__ ,idalabel=__magic_name__ ,labelaid=__magic_name__ ) snake_case_ : Optional[int] = { "resnet18": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ), "resnet26": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ), "resnet34": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ), "resnet50": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ), "resnet101": ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ), "resnet152": ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ), } if model_name: convert_weight_and_push(__magic_name__ ,names_to_config[model_name] ,__magic_name__ ,__magic_name__ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ) return config, expected_shape if __name__ == "__main__": __lowerCamelCase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default=None, type=str, help=( '''The name of the model you wish to convert, it must be one of the supported resnet* architecture,''' ''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=Path, required=True, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=True, type=bool, required=False, help='''If True, push model and image processor to the hub.''', ) __lowerCamelCase : Tuple = parser.parse_args() __lowerCamelCase : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
653
0
from __future__ import annotations from typing import Any def a__ ( lowercase__ ): '''simple docstring''' if not postfix_notation: return 0 UpperCAmelCase_ ={"+", "-", "*", "/"} UpperCAmelCase_ =[] for token in postfix_notation: if token in operations: UpperCAmelCase_ , UpperCAmelCase_ =stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(lowercase__ ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
54
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase : List[Any] = logging.get_logger(__name__) __lowerCamelCase : Dict = { '''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''', } class A_ (a_ ): """simple docstring""" a__ = '''roc_bert''' def __init__( self :Dict , lowerCAmelCase__ :Optional[Any]=30_522 , lowerCAmelCase__ :Dict=768 , lowerCAmelCase__ :str=12 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :Optional[Any]=3_072 , lowerCAmelCase__ :Any="gelu" , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :List[str]=512 , lowerCAmelCase__ :int=2 , lowerCAmelCase__ :Optional[int]=0.0_2 , lowerCAmelCase__ :Tuple=1E-1_2 , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :List[str]=0 , lowerCAmelCase__ :Optional[Any]="absolute" , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :List[str]=768 , lowerCAmelCase__ :Optional[Any]=910 , lowerCAmelCase__ :str=512 , lowerCAmelCase__ :int=24_858 , lowerCAmelCase__ :List[Any]=True , **lowerCAmelCase__ :int , ) -> List[str]: '''simple docstring''' snake_case_ : int = vocab_size snake_case_ : Dict = max_position_embeddings snake_case_ : int = hidden_size snake_case_ : str = num_hidden_layers snake_case_ : Union[str, Any] = num_attention_heads snake_case_ : int = intermediate_size snake_case_ : Optional[Any] = hidden_act snake_case_ : Optional[int] = hidden_dropout_prob snake_case_ : List[Any] = attention_probs_dropout_prob snake_case_ : Dict = initializer_range snake_case_ : str = type_vocab_size snake_case_ : Tuple = layer_norm_eps snake_case_ : Optional[Any] = use_cache snake_case_ : Optional[Any] = enable_pronunciation snake_case_ : List[Any] = enable_shape snake_case_ : Optional[int] = pronunciation_embed_dim snake_case_ : Dict = pronunciation_vocab_size snake_case_ : int = shape_embed_dim snake_case_ : Any = shape_vocab_size snake_case_ : Optional[int] = concat_input snake_case_ : List[Any] = position_embedding_type snake_case_ : Any = classifier_dropout super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
653
0
import pytest import datasets # Import fixture modules as plugins SCREAMING_SNAKE_CASE :str = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec'] def UpperCAmelCase ( a_ , a_ ) -> Optional[Any]: """simple docstring""" for item in items: if any(marker in item.keywords for marker in ["integration", "unit"] ): continue item.add_marker(pytest.mark.unit ) def UpperCAmelCase ( a_ ) -> str: """simple docstring""" config.addinivalue_line("markers" , "torchaudio_latest: mark test to run with torchaudio>=0.12" ) @pytest.fixture(autouse=a_ ) def UpperCAmelCase ( a_ , a_ ) -> Optional[int]: """simple docstring""" __A = tmp_path_factory.getbasetemp() / "cache" __A = test_hf_cache_home / "datasets" __A = test_hf_cache_home / "metrics" __A = test_hf_cache_home / "modules" monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE" , str(a_ ) ) monkeypatch.setattr("datasets.config.HF_METRICS_CACHE" , str(a_ ) ) monkeypatch.setattr("datasets.config.HF_MODULES_CACHE" , str(a_ ) ) __A = test_hf_datasets_cache / "downloads" monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH" , str(a_ ) ) __A = test_hf_datasets_cache / "downloads" / "extracted" monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(a_ ) ) @pytest.fixture(autouse=a_ , scope="session" ) def UpperCAmelCase ( ) -> Union[str, Any]: """simple docstring""" datasets.disable_progress_bar() @pytest.fixture(autouse=a_ ) def UpperCAmelCase ( a_ ) -> Union[str, Any]: """simple docstring""" monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS" , a_ ) @pytest.fixture def UpperCAmelCase ( a_ ) -> str: """simple docstring""" monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING" , a_ )
55
'''simple docstring''' def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int: """simple docstring""" def update_area_of_max_square(__magic_name__ ,__magic_name__ ) -> int: # BASE CASE if row >= rows or col >= cols: return 0 snake_case_ : str = update_area_of_max_square(__magic_name__ ,col + 1 ) snake_case_ : Dict = update_area_of_max_square(row + 1 ,col + 1 ) snake_case_ : int = update_area_of_max_square(row + 1 ,__magic_name__ ) if mat[row][col]: snake_case_ : str = 1 + min([right, diagonal, down] ) snake_case_ : Tuple = max(largest_square_area[0] ,__magic_name__ ) return sub_problem_sol else: return 0 snake_case_ : Union[str, Any] = [0] update_area_of_max_square(0 ,0 ) return largest_square_area[0] def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int: """simple docstring""" def update_area_of_max_square_using_dp_array( __magic_name__ ,__magic_name__ ,__magic_name__ ) -> int: if row >= rows or col >= cols: return 0 if dp_array[row][col] != -1: return dp_array[row][col] snake_case_ : Dict = update_area_of_max_square_using_dp_array(__magic_name__ ,col + 1 ,__magic_name__ ) snake_case_ : List[Any] = update_area_of_max_square_using_dp_array(row + 1 ,col + 1 ,__magic_name__ ) snake_case_ : Any = update_area_of_max_square_using_dp_array(row + 1 ,__magic_name__ ,__magic_name__ ) if mat[row][col]: snake_case_ : int = 1 + min([right, diagonal, down] ) snake_case_ : Tuple = max(largest_square_area[0] ,__magic_name__ ) snake_case_ : Optional[Any] = sub_problem_sol return sub_problem_sol else: return 0 snake_case_ : List[Any] = [0] snake_case_ : Optional[int] = [[-1] * cols for _ in range(__magic_name__ )] update_area_of_max_square_using_dp_array(0 ,0 ,__magic_name__ ) return largest_square_area[0] def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int: """simple docstring""" snake_case_ : Dict = [[0] * (cols + 1) for _ in range(rows + 1 )] snake_case_ : Dict = 0 for row in range(rows - 1 ,-1 ,-1 ): for col in range(cols - 1 ,-1 ,-1 ): snake_case_ : List[str] = dp_array[row][col + 1] snake_case_ : Any = dp_array[row + 1][col + 1] snake_case_ : Any = dp_array[row + 1][col] if mat[row][col] == 1: snake_case_ : Any = 1 + min(__magic_name__ ,__magic_name__ ,__magic_name__ ) snake_case_ : str = max(dp_array[row][col] ,__magic_name__ ) else: snake_case_ : Optional[Any] = 0 return largest_square_area def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int: """simple docstring""" snake_case_ : str = [0] * (cols + 1) snake_case_ : Tuple = [0] * (cols + 1) snake_case_ : List[str] = 0 for row in range(rows - 1 ,-1 ,-1 ): for col in range(cols - 1 ,-1 ,-1 ): snake_case_ : Optional[Any] = current_row[col + 1] snake_case_ : Optional[int] = next_row[col + 1] snake_case_ : Dict = next_row[col] if mat[row][col] == 1: snake_case_ : Union[str, Any] = 1 + min(__magic_name__ ,__magic_name__ ,__magic_name__ ) snake_case_ : Any = max(current_row[col] ,__magic_name__ ) else: snake_case_ : Dict = 0 snake_case_ : Optional[Any] = current_row return largest_square_area if __name__ == "__main__": import doctest doctest.testmod() print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
653
0
'''simple docstring''' import os import zipfile import pytest from datasets.utils.extract import ( BzipaExtractor, Extractor, GzipExtractor, LzaExtractor, SevenZipExtractor, TarExtractor, XzExtractor, ZipExtractor, ZstdExtractor, ) from .utils import require_lza, require_pyazr, require_zstandard @pytest.mark.parametrize( 'compression_format, is_archive' , [ ('7z', True), ('bz2', False), ('gzip', False), ('lz4', False), ('tar', True), ('xz', False), ('zip', True), ('zstd', False), ] , ) def _a (lowercase__ : Optional[Any] , lowercase__ : Optional[int] , lowercase__ : int , lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : Tuple , lowercase__ : List[str] , lowercase__ : Dict , lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : List[str] , lowercase__ : Any , ) -> Union[str, Any]: """simple docstring""" __snake_case = { '7z': (seven_zip_file, SevenZipExtractor), 'bz2': (bza_file, BzipaExtractor), 'gzip': (gz_file, GzipExtractor), 'lz4': (lza_file, LzaExtractor), 'tar': (tar_file, TarExtractor), 'xz': (xz_file, XzExtractor), 'zip': (zip_file, ZipExtractor), 'zstd': (zstd_file, ZstdExtractor), } __snake_case , __snake_case = input_paths_and_base_extractors[compression_format] if input_path is None: __snake_case = f'for \'{compression_format}\' compression_format, ' if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(lowercase__ ) assert base_extractor.is_extractable(lowercase__ ) __snake_case = tmp_path / ('extracted' if is_archive else 'extracted.txt') base_extractor.extract(lowercase__ , lowercase__ ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name __snake_case = file_path.read_text(encoding='utf-8' ) else: __snake_case = output_path.read_text(encoding='utf-8' ) __snake_case = text_file.read_text(encoding='utf-8' ) assert extracted_file_content == expected_file_content @pytest.mark.parametrize( 'compression_format, is_archive' , [ ('7z', True), ('bz2', False), ('gzip', False), ('lz4', False), ('tar', True), ('xz', False), ('zip', True), ('zstd', False), ] , ) def _a (lowercase__ : Optional[Any] , lowercase__ : str , lowercase__ : Dict , lowercase__ : int , lowercase__ : List[str] , lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : Optional[Any] , lowercase__ : Optional[int] , lowercase__ : Dict , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , ) -> Dict: """simple docstring""" __snake_case = { '7z': seven_zip_file, 'bz2': bza_file, 'gzip': gz_file, 'lz4': lza_file, 'tar': tar_file, 'xz': xz_file, 'zip': zip_file, 'zstd': zstd_file, } __snake_case = input_paths[compression_format] if input_path is None: __snake_case = f'for \'{compression_format}\' compression_format, ' if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(lowercase__ ) __snake_case = Extractor.infer_extractor_format(lowercase__ ) assert extractor_format is not None __snake_case = tmp_path / ('extracted' if is_archive else 'extracted.txt') Extractor.extract(lowercase__ , lowercase__ , lowercase__ ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name __snake_case = file_path.read_text(encoding='utf-8' ) else: __snake_case = output_path.read_text(encoding='utf-8' ) __snake_case = text_file.read_text(encoding='utf-8' ) assert extracted_file_content == expected_file_content @pytest.fixture def _a (lowercase__ : List[str] , lowercase__ : Union[str, Any] ) -> Any: """simple docstring""" import tarfile __snake_case = tmp_path / 'data_dot_dot' directory.mkdir() __snake_case = directory / 'tar_file_with_dot_dot.tar' with tarfile.TarFile(lowercase__ , 'w' ) as f: f.add(lowercase__ , arcname=os.path.join('..' , text_file.name ) ) return path @pytest.fixture def _a (lowercase__ : str ) -> Dict: """simple docstring""" import tarfile __snake_case = tmp_path / 'data_sym_link' directory.mkdir() __snake_case = directory / 'tar_file_with_sym_link.tar' os.symlink('..' , directory / 'subdir' , target_is_directory=lowercase__ ) with tarfile.TarFile(lowercase__ , 'w' ) as f: f.add(str(directory / 'subdir' ) , arcname='subdir' ) # str required by os.readlink on Windows and Python < 3.8 return path @pytest.mark.parametrize( 'insecure_tar_file, error_log' , [('tar_file_with_dot_dot', 'illegal path'), ('tar_file_with_sym_link', 'Symlink')] , ) def _a (lowercase__ : List[Any] , lowercase__ : List[Any] , lowercase__ : List[Any] , lowercase__ : Any , lowercase__ : Optional[Any] , lowercase__ : List[Any] ) -> str: """simple docstring""" __snake_case = { 'tar_file_with_dot_dot': tar_file_with_dot_dot, 'tar_file_with_sym_link': tar_file_with_sym_link, } __snake_case = insecure_tar_files[insecure_tar_file] __snake_case = tmp_path / 'extracted' TarExtractor.extract(lowercase__ , lowercase__ ) assert caplog.text for record in caplog.records: assert record.levelname == "ERROR" assert error_log in record.msg def _a (lowercase__ : Dict ) -> Union[str, Any]: """simple docstring""" # We should have less false positives than zipfile.is_zipfile # We do that by checking only the magic number __snake_case = tmpdir / 'not_a_zip_file' # From: https://github.com/python/cpython/pull/5053 __snake_case = ( B'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00' B'\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I' B'DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07' B'\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82' ) with not_a_zip_file.open('wb' ) as f: f.write(lowercase__ ) assert zipfile.is_zipfile(str(lowercase__ ) ) # is a false positive for `zipfile` assert not ZipExtractor.is_extractable(lowercase__ ) # but we're right
56
'''simple docstring''' import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def __UpperCAmelCase ( __magic_name__ ,__magic_name__=7 )-> Tuple: """simple docstring""" snake_case_ : List[str] = None if token is not None: snake_case_ : List[str] = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''} # The id of a workflow (not of a workflow run) snake_case_ : Dict = "636036" snake_case_ : List[str] = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs''' # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}''' snake_case_ : Optional[Any] = requests.get(__magic_name__ ,headers=__magic_name__ ).json() return result["workflow_runs"] def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]: """simple docstring""" snake_case_ : str = get_daily_ci_runs(__magic_name__ ) snake_case_ : Optional[int] = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": snake_case_ : Dict = workflow_run["id"] break return workflow_run_id def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[Any]: """simple docstring""" snake_case_ : Optional[Any] = get_last_daily_ci_runs(__magic_name__ ) if workflow_run_id is not None: snake_case_ : Union[str, Any] = get_artifacts_links(worflow_run_id=__magic_name__ ,token=__magic_name__ ) for artifact_name in artifact_names: if artifact_name in artifacts_links: snake_case_ : Union[str, Any] = artifacts_links[artifact_name] download_artifact( artifact_name=__magic_name__ ,artifact_url=__magic_name__ ,output_dir=__magic_name__ ,token=__magic_name__ ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[Any]: """simple docstring""" get_last_daily_ci_artifacts(__magic_name__ ,__magic_name__ ,__magic_name__ ) snake_case_ : Union[str, Any] = {} for artifact_name in artifact_names: snake_case_ : Any = os.path.join(__magic_name__ ,F'''{artifact_name}.zip''' ) if os.path.isfile(__magic_name__ ): snake_case_ : Tuple = {} with zipfile.ZipFile(__magic_name__ ) as z: for filename in z.namelist(): if not os.path.isdir(__magic_name__ ): # read the file with z.open(__magic_name__ ) as f: snake_case_ : Optional[Any] = f.read().decode("UTF-8" ) return results
653
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available A_ : Tuple = { 'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Any = [ 'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'GraphormerForGraphClassification', 'GraphormerModel', 'GraphormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_graphormer import ( GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST, GraphormerForGraphClassification, GraphormerModel, GraphormerPreTrainedModel, ) else: import sys A_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
57
'''simple docstring''' from string import ascii_uppercase __lowerCamelCase : Optional[Any] = {char: i for i, char in enumerate(ascii_uppercase)} __lowerCamelCase : List[str] = dict(enumerate(ascii_uppercase)) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str: """simple docstring""" snake_case_ : Tuple = len(__magic_name__ ) snake_case_ : str = 0 while True: if x == i: snake_case_ : List[str] = 0 if len(__magic_name__ ) == len(__magic_name__ ): break key += key[i] i += 1 return key def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str: """simple docstring""" snake_case_ : str = "" snake_case_ : List[Any] = 0 for letter in message: if letter == " ": cipher_text += " " else: snake_case_ : Optional[Any] = (dicta[letter] - dicta[key_new[i]]) % 26 i += 1 cipher_text += dicta[x] return cipher_text def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str: """simple docstring""" snake_case_ : Dict = "" snake_case_ : Dict = 0 for letter in cipher_text: if letter == " ": or_txt += " " else: snake_case_ : str = (dicta[letter] + dicta[key_new[i]] + 26) % 26 i += 1 or_txt += dicta[x] return or_txt def __UpperCAmelCase ( )-> None: """simple docstring""" snake_case_ : List[str] = "THE GERMAN ATTACK" snake_case_ : List[str] = "SECRET" snake_case_ : Optional[int] = generate_key(__magic_name__ ,__magic_name__ ) snake_case_ : Any = cipher_text(__magic_name__ ,__magic_name__ ) print(F'''Encrypted Text = {s}''' ) print(F'''Original Text = {original_text(__magic_name__ ,__magic_name__ )}''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
653
0
"""simple docstring""" import argparse import torch from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] ): '''simple docstring''' if openai_config_file == "": snake_case_ : List[Any] = OpenAIGPTConfig() else: snake_case_ : Optional[int] = OpenAIGPTConfig.from_json_file(__UpperCamelCase ) snake_case_ : List[Any] = OpenAIGPTModel(__UpperCamelCase ) # Load weights from numpy load_tf_weights_in_openai_gpt(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # Save pytorch-model snake_case_ : List[Any] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME snake_case_ : Tuple = pytorch_dump_folder_path + """/""" + CONFIG_NAME print(F'Save PyTorch model to {pytorch_weights_dump_path}' ) torch.save(model.state_dict() , __UpperCamelCase ) print(F'Save configuration file to {pytorch_config_dump_path}' ) with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __lowerCAmelCase : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--openai_checkpoint_folder_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--openai_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained OpenAI model. \n''' '''This specifies the model architecture.''' ), ) __lowerCAmelCase : Union[str, Any] = parser.parse_args() convert_openai_checkpoint_to_pytorch( args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path )
58
'''simple docstring''' import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Dict: """simple docstring""" snake_case_ : Tuple = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") snake_case_ : Union[str, Any] = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(__magic_name__ ): os.makedirs(__magic_name__ ) snake_case_ : str = model.state_dict() def to_tf_var_name(__magic_name__ ): for patt, repl in iter(__magic_name__ ): snake_case_ : List[str] = name.replace(__magic_name__ ,__magic_name__ ) return F'''bert/{name}''' def create_tf_var(__magic_name__ ,__magic_name__ ,__magic_name__ ): snake_case_ : List[Any] = tf.dtypes.as_dtype(tensor.dtype ) snake_case_ : Union[str, Any] = tf.get_variable(dtype=__magic_name__ ,shape=tensor.shape ,name=__magic_name__ ,initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(__magic_name__ ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: snake_case_ : Optional[int] = to_tf_var_name(__magic_name__ ) snake_case_ : Dict = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): snake_case_ : List[Any] = torch_tensor.T snake_case_ : Union[str, Any] = create_tf_var(tensor=__magic_name__ ,name=__magic_name__ ,session=__magic_name__ ) tf.keras.backend.set_value(__magic_name__ ,__magic_name__ ) snake_case_ : List[str] = session.run(__magic_name__ ) print(F'''Successfully created {tf_name}: {np.allclose(__magic_name__ ,__magic_name__ )}''' ) snake_case_ : Any = tf.train.Saver(tf.trainable_variables() ) saver.save(__magic_name__ ,os.path.join(__magic_name__ ,model_name.replace("-" ,"_" ) + ".ckpt" ) ) def __UpperCAmelCase ( __magic_name__=None )-> Optional[Any]: """simple docstring""" snake_case_ : Any = argparse.ArgumentParser() parser.add_argument("--model_name" ,type=__magic_name__ ,required=__magic_name__ ,help="model name e.g. bert-base-uncased" ) parser.add_argument( "--cache_dir" ,type=__magic_name__ ,default=__magic_name__ ,required=__magic_name__ ,help="Directory containing pytorch model" ) parser.add_argument("--pytorch_model_path" ,type=__magic_name__ ,required=__magic_name__ ,help="/path/to/<pytorch-model-name>.bin" ) parser.add_argument("--tf_cache_dir" ,type=__magic_name__ ,required=__magic_name__ ,help="Directory in which to save tensorflow model" ) snake_case_ : Optional[int] = parser.parse_args(__magic_name__ ) snake_case_ : Optional[int] = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name ,state_dict=torch.load(args.pytorch_model_path ) ,cache_dir=args.cache_dir ,) convert_pytorch_checkpoint_to_tf(model=__magic_name__ ,ckpt_dir=args.tf_cache_dir ,model_name=args.model_name ) if __name__ == "__main__": main()
653
0
import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) __A = "\\n Text data.\n Second line of data." __A = "file" @pytest.fixture(scope="session" ) def lowerCAmelCase_ ( __a ) -> Union[str, Any]: """simple docstring""" lowerCamelCase__: Any =tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd") lowerCamelCase__: Optional[Any] =bytes(__a , "utf-8" ) with zstd.open(__a , "wb" ) as f: f.write(__a ) return path @pytest.fixture def lowerCAmelCase_ ( __a ) -> Union[str, Any]: """simple docstring""" with open(os.path.join(tmpfs.local_root_dir , __a ) , "w" ) as f: f.write(__a ) return FILE_PATH @pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] ) def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a ) -> List[str]: """simple docstring""" lowerCamelCase__: Tuple ={"gzip": gz_file, "xz": xz_file, "zstd": zstd_path} lowerCamelCase__: Dict =input_paths[compression_format] lowerCamelCase__: Dict =tmp_path / "cache" lowerCamelCase__: Optional[Any] =DownloadConfig(cache_dir=__a , extract_compressed_file=__a ) lowerCamelCase__: Tuple =cached_path(__a , download_config=__a ) with open(__a ) as f: lowerCamelCase__: int =f.read() with open(__a ) as f: lowerCamelCase__: Optional[int] =f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize("default_extracted" , [True, False] ) @pytest.mark.parametrize("default_cache_dir" , [True, False] ) def lowerCAmelCase_ ( __a , __a , __a , __a , __a ) -> Tuple: """simple docstring""" lowerCamelCase__: Optional[int] ="custom_cache" lowerCamelCase__: List[str] ="custom_extracted_dir" lowerCamelCase__: List[str] =tmp_path / "custom_extracted_path" if default_extracted: lowerCamelCase__: Optional[int] =("downloads" if default_cache_dir else custom_cache_dir, "extracted") else: monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , __a ) monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(__a ) ) lowerCamelCase__: Optional[int] =custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) lowerCamelCase__: List[str] =xz_file lowerCamelCase__: Optional[int] =( DownloadConfig(extract_compressed_file=__a ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__a ) ) lowerCamelCase__: int =cached_path(__a , download_config=__a ) assert Path(__a ).parent.parts[-2:] == expected def lowerCAmelCase_ ( __a ) -> List[str]: """simple docstring""" lowerCamelCase__: Any =str(Path(__a ).resolve() ) assert cached_path(__a ) == text_file # relative path lowerCamelCase__: Optional[Any] =str(Path(__a ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(__a ) == text_file def lowerCAmelCase_ ( __a ) -> str: """simple docstring""" lowerCamelCase__: Tuple =str(tmp_path.resolve() / "__missing_file__.txt" ) with pytest.raises(__a ): cached_path(__a ) # relative path lowerCamelCase__: int ="./__missing_file__.txt" with pytest.raises(__a ): cached_path(__a ) def lowerCAmelCase_ ( __a ) -> str: """simple docstring""" lowerCamelCase__: int =get_from_cache(F"""tmp://{tmpfs_file}""" ) with open(__a ) as f: lowerCamelCase__: Dict =f.read() assert output_file_content == FILE_CONTENT @patch("datasets.config.HF_DATASETS_OFFLINE" , __a ) def lowerCAmelCase_ ( ) -> Union[str, Any]: """simple docstring""" with pytest.raises(__a ): cached_path("https://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE" , __a ) def lowerCAmelCase_ ( __a ) -> Optional[Any]: """simple docstring""" lowerCamelCase__: Dict =tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(__a ): http_get("https://huggingface.co" , temp_file=__a ) with pytest.raises(__a ): http_head("https://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE" , __a ) def lowerCAmelCase_ ( __a ) -> Tuple: """simple docstring""" lowerCamelCase__: Optional[int] =tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(__a ): ftp_get("ftp://huggingface.co" , temp_file=__a ) with pytest.raises(__a ): ftp_head("ftp://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE" , __a ) def lowerCAmelCase_ ( __a ) -> Optional[int]: """simple docstring""" lowerCamelCase__: Optional[Any] =tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(__a ): fsspec_get("s3://huggingface.co" , temp_file=__a ) with pytest.raises(__a ): fsspec_head("s3://huggingface.co" )
59
'''simple docstring''' from collections import deque from .hash_table import HashTable class A_ (a_ ): """simple docstring""" def __init__( self :List[str] , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Dict ) -> Union[str, Any]: '''simple docstring''' super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ ) def _A ( self :Optional[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Optional[int] = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(lowerCAmelCase__ ) snake_case_ : Tuple = self.values[key] def _A ( self :int ) -> Dict: '''simple docstring''' return ( sum(self.charge_factor - len(lowerCAmelCase__ ) for slot in self.values ) / self.size_table * self.charge_factor ) def _A ( self :str , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple=None ) -> Any: '''simple docstring''' if not ( len(self.values[key] ) == self.charge_factor and self.values.count(lowerCAmelCase__ ) == 0 ): return key return super()._collision_resolution(lowerCAmelCase__ , lowerCAmelCase__ )
653
0